1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2019 HabanaLabs, Ltd.
8 #include <uapi/misc/habanalabs.h>
9 #include "habanalabs.h"
10 #include "include/hw_ip/mmu/mmu_general.h"
12 #include <linux/uaccess.h>
13 #include <linux/slab.h>
14 #include <linux/genalloc.h>
16 #define HL_MMU_DEBUG 0
19 * The va ranges in context object contain a list with the available chunks of
20 * device virtual memory.
21 * There is one range for host allocations and one for DRAM allocations.
23 * On initialization each range contains one chunk of all of its available
24 * virtual range which is a half of the total device virtual range.
26 * On each mapping of physical pages, a suitable virtual range chunk (with a
27 * minimum size) is selected from the list. If the chunk size equals the
28 * requested size, the chunk is returned. Otherwise, the chunk is split into
29 * two chunks - one to return as result and a remainder to stay in the list.
31 * On each Unmapping of a virtual address, the relevant virtual chunk is
32 * returned to the list. The chunk is added to the list and if its edges match
33 * the edges of the adjacent chunks (means a contiguous chunk can be created),
34 * the chunks are merged.
36 * On finish, the list is checked to have only one chunk of all the relevant
37 * virtual range (which is a half of the device total virtual range).
38 * If not (means not all mappings were unmapped), a warning is printed.
42 * alloc_device_memory - allocate device memory
44 * @ctx : current context
45 * @args : host parameters containing the requested size
46 * @ret_handle : result handle
48 * This function does the following:
49 * - Allocate the requested size rounded up to 2MB pages
50 * - Return unique handle
52 static int alloc_device_memory(struct hl_ctx
*ctx
, struct hl_mem_in
*args
,
55 struct hl_device
*hdev
= ctx
->hdev
;
56 struct hl_vm
*vm
= &hdev
->vm
;
57 struct hl_vm_phys_pg_pack
*phys_pg_pack
;
58 u64 paddr
= 0, total_size
, num_pgs
, i
;
59 u32 num_curr_pgs
, page_size
, page_shift
;
64 page_size
= hdev
->asic_prop
.dram_page_size
;
65 page_shift
= __ffs(page_size
);
66 num_pgs
= (args
->alloc
.mem_size
+ (page_size
- 1)) >> page_shift
;
67 total_size
= num_pgs
<< page_shift
;
69 contiguous
= args
->flags
& HL_MEM_CONTIGUOUS
;
72 paddr
= (u64
) gen_pool_alloc(vm
->dram_pg_pool
, total_size
);
75 "failed to allocate %llu huge contiguous pages\n",
81 phys_pg_pack
= kzalloc(sizeof(*phys_pg_pack
), GFP_KERNEL
);
87 phys_pg_pack
->vm_type
= VM_TYPE_PHYS_PACK
;
88 phys_pg_pack
->asid
= ctx
->asid
;
89 phys_pg_pack
->npages
= num_pgs
;
90 phys_pg_pack
->page_size
= page_size
;
91 phys_pg_pack
->total_size
= total_size
;
92 phys_pg_pack
->flags
= args
->flags
;
93 phys_pg_pack
->contiguous
= contiguous
;
95 phys_pg_pack
->pages
= kvmalloc_array(num_pgs
, sizeof(u64
), GFP_KERNEL
);
96 if (!phys_pg_pack
->pages
) {
101 if (phys_pg_pack
->contiguous
) {
102 for (i
= 0 ; i
< num_pgs
; i
++)
103 phys_pg_pack
->pages
[i
] = paddr
+ i
* page_size
;
105 for (i
= 0 ; i
< num_pgs
; i
++) {
106 phys_pg_pack
->pages
[i
] = (u64
) gen_pool_alloc(
109 if (!phys_pg_pack
->pages
[i
]) {
111 "Failed to allocate device memory (out of memory)\n");
120 spin_lock(&vm
->idr_lock
);
121 handle
= idr_alloc(&vm
->phys_pg_pack_handles
, phys_pg_pack
, 1, 0,
123 spin_unlock(&vm
->idr_lock
);
126 dev_err(hdev
->dev
, "Failed to get handle for page\n");
131 for (i
= 0 ; i
< num_pgs
; i
++)
132 kref_get(&vm
->dram_pg_pool_refcount
);
134 phys_pg_pack
->handle
= handle
;
136 atomic64_add(phys_pg_pack
->total_size
, &ctx
->dram_phys_mem
);
137 atomic64_add(phys_pg_pack
->total_size
, &hdev
->dram_used_mem
);
139 *ret_handle
= handle
;
145 if (!phys_pg_pack
->contiguous
)
146 for (i
= 0 ; i
< num_curr_pgs
; i
++)
147 gen_pool_free(vm
->dram_pg_pool
, phys_pg_pack
->pages
[i
],
150 kvfree(phys_pg_pack
->pages
);
155 gen_pool_free(vm
->dram_pg_pool
, paddr
, total_size
);
161 * dma_map_host_va - DMA mapping of the given host virtual address.
162 * @hdev: habanalabs device structure
163 * @addr: the host virtual address of the memory area
164 * @size: the size of the memory area
165 * @p_userptr: pointer to result userptr structure
167 * This function does the following:
168 * - Allocate userptr structure
169 * - Pin the given host memory using the userptr structure
170 * - Perform DMA mapping to have the DMA addresses of the pages
172 static int dma_map_host_va(struct hl_device
*hdev
, u64 addr
, u64 size
,
173 struct hl_userptr
**p_userptr
)
175 struct hl_userptr
*userptr
;
178 userptr
= kzalloc(sizeof(*userptr
), GFP_KERNEL
);
184 rc
= hl_pin_host_memory(hdev
, addr
, size
, userptr
);
186 dev_err(hdev
->dev
, "Failed to pin host memory\n");
190 rc
= hdev
->asic_funcs
->asic_dma_map_sg(hdev
, userptr
->sgt
->sgl
,
191 userptr
->sgt
->nents
, DMA_BIDIRECTIONAL
);
193 dev_err(hdev
->dev
, "failed to map sgt with DMA region\n");
197 userptr
->dma_mapped
= true;
198 userptr
->dir
= DMA_BIDIRECTIONAL
;
199 userptr
->vm_type
= VM_TYPE_USERPTR
;
201 *p_userptr
= userptr
;
206 hl_unpin_host_memory(hdev
, userptr
);
215 * dma_unmap_host_va - DMA unmapping of the given host virtual address.
216 * @hdev: habanalabs device structure
217 * @userptr: userptr to free
219 * This function does the following:
220 * - Unpins the physical pages
221 * - Frees the userptr structure
223 static void dma_unmap_host_va(struct hl_device
*hdev
,
224 struct hl_userptr
*userptr
)
226 hl_unpin_host_memory(hdev
, userptr
);
231 * dram_pg_pool_do_release - free DRAM pages pool
233 * @ref : pointer to reference object
235 * This function does the following:
236 * - Frees the idr structure of physical pages handles
237 * - Frees the generic pool of DRAM physical pages
239 static void dram_pg_pool_do_release(struct kref
*ref
)
241 struct hl_vm
*vm
= container_of(ref
, struct hl_vm
,
242 dram_pg_pool_refcount
);
245 * free the idr here as only here we know for sure that there are no
246 * allocated physical pages and hence there are no handles in use
248 idr_destroy(&vm
->phys_pg_pack_handles
);
249 gen_pool_destroy(vm
->dram_pg_pool
);
253 * free_phys_pg_pack - free physical page pack
254 * @hdev: habanalabs device structure
255 * @phys_pg_pack: physical page pack to free
257 * This function does the following:
258 * - For DRAM memory only, iterate over the pack and free each physical block
259 * structure by returning it to the general pool
260 * - Free the hl_vm_phys_pg_pack structure
262 static void free_phys_pg_pack(struct hl_device
*hdev
,
263 struct hl_vm_phys_pg_pack
*phys_pg_pack
)
265 struct hl_vm
*vm
= &hdev
->vm
;
268 if (!phys_pg_pack
->created_from_userptr
) {
269 if (phys_pg_pack
->contiguous
) {
270 gen_pool_free(vm
->dram_pg_pool
, phys_pg_pack
->pages
[0],
271 phys_pg_pack
->total_size
);
273 for (i
= 0; i
< phys_pg_pack
->npages
; i
++)
274 kref_put(&vm
->dram_pg_pool_refcount
,
275 dram_pg_pool_do_release
);
277 for (i
= 0 ; i
< phys_pg_pack
->npages
; i
++) {
278 gen_pool_free(vm
->dram_pg_pool
,
279 phys_pg_pack
->pages
[i
],
280 phys_pg_pack
->page_size
);
281 kref_put(&vm
->dram_pg_pool_refcount
,
282 dram_pg_pool_do_release
);
287 kvfree(phys_pg_pack
->pages
);
292 * free_device_memory - free device memory
294 * @ctx : current context
295 * @handle : handle of the memory chunk to free
297 * This function does the following:
298 * - Free the device memory related to the given handle
300 static int free_device_memory(struct hl_ctx
*ctx
, u32 handle
)
302 struct hl_device
*hdev
= ctx
->hdev
;
303 struct hl_vm
*vm
= &hdev
->vm
;
304 struct hl_vm_phys_pg_pack
*phys_pg_pack
;
306 spin_lock(&vm
->idr_lock
);
307 phys_pg_pack
= idr_find(&vm
->phys_pg_pack_handles
, handle
);
309 if (atomic_read(&phys_pg_pack
->mapping_cnt
) > 0) {
310 dev_err(hdev
->dev
, "handle %u is mapped, cannot free\n",
312 spin_unlock(&vm
->idr_lock
);
317 * must remove from idr before the freeing of the physical
318 * pages as the refcount of the pool is also the trigger of the
321 idr_remove(&vm
->phys_pg_pack_handles
, handle
);
322 spin_unlock(&vm
->idr_lock
);
324 atomic64_sub(phys_pg_pack
->total_size
, &ctx
->dram_phys_mem
);
325 atomic64_sub(phys_pg_pack
->total_size
, &hdev
->dram_used_mem
);
327 free_phys_pg_pack(hdev
, phys_pg_pack
);
329 spin_unlock(&vm
->idr_lock
);
331 "free device memory failed, no match for handle %u\n",
340 * clear_va_list_locked - free virtual addresses list
342 * @hdev : habanalabs device structure
343 * @va_list : list of virtual addresses to free
345 * This function does the following:
346 * - Iterate over the list and free each virtual addresses block
348 * This function should be called only when va_list lock is taken
350 static void clear_va_list_locked(struct hl_device
*hdev
,
351 struct list_head
*va_list)
353 struct hl_vm_va_block
*va_block
, *tmp
;
355 list_for_each_entry_safe(va_block
, tmp
, va_list, node
) {
356 list_del(&va_block
->node
);
362 * print_va_list_locked - print virtual addresses list
364 * @hdev : habanalabs device structure
365 * @va_list : list of virtual addresses to print
367 * This function does the following:
368 * - Iterate over the list and print each virtual addresses block
370 * This function should be called only when va_list lock is taken
372 static void print_va_list_locked(struct hl_device
*hdev
,
373 struct list_head
*va_list)
376 struct hl_vm_va_block
*va_block
;
378 dev_dbg(hdev
->dev
, "print va list:\n");
380 list_for_each_entry(va_block
, va_list, node
)
382 "va block, start: 0x%llx, end: 0x%llx, size: %llu\n",
383 va_block
->start
, va_block
->end
, va_block
->size
);
388 * merge_va_blocks_locked - merge a virtual block if possible
390 * @hdev : pointer to the habanalabs device structure
391 * @va_list : pointer to the virtual addresses block list
392 * @va_block : virtual block to merge with adjacent blocks
394 * This function does the following:
395 * - Merge the given blocks with the adjacent blocks if their virtual ranges
396 * create a contiguous virtual range
398 * This Function should be called only when va_list lock is taken
400 static void merge_va_blocks_locked(struct hl_device
*hdev
,
401 struct list_head
*va_list, struct hl_vm_va_block
*va_block
)
403 struct hl_vm_va_block
*prev
, *next
;
405 prev
= list_prev_entry(va_block
, node
);
406 if (&prev
->node
!= va_list && prev
->end
+ 1 == va_block
->start
) {
407 prev
->end
= va_block
->end
;
408 prev
->size
= prev
->end
- prev
->start
;
409 list_del(&va_block
->node
);
414 next
= list_next_entry(va_block
, node
);
415 if (&next
->node
!= va_list && va_block
->end
+ 1 == next
->start
) {
416 next
->start
= va_block
->start
;
417 next
->size
= next
->end
- next
->start
;
418 list_del(&va_block
->node
);
424 * add_va_block_locked - add a virtual block to the virtual addresses list
426 * @hdev : pointer to the habanalabs device structure
427 * @va_list : pointer to the virtual addresses block list
428 * @start : start virtual address
429 * @end : end virtual address
431 * This function does the following:
432 * - Add the given block to the virtual blocks list and merge with other
433 * blocks if a contiguous virtual block can be created
435 * This Function should be called only when va_list lock is taken
437 static int add_va_block_locked(struct hl_device
*hdev
,
438 struct list_head
*va_list, u64 start
, u64 end
)
440 struct hl_vm_va_block
*va_block
, *res
= NULL
;
441 u64 size
= end
- start
;
443 print_va_list_locked(hdev
, va_list);
445 list_for_each_entry(va_block
, va_list, node
) {
446 /* TODO: remove upon matureness */
447 if (hl_mem_area_crosses_range(start
, size
, va_block
->start
,
450 "block crossing ranges at start 0x%llx, end 0x%llx\n",
451 va_block
->start
, va_block
->end
);
455 if (va_block
->end
< start
)
459 va_block
= kmalloc(sizeof(*va_block
), GFP_KERNEL
);
463 va_block
->start
= start
;
465 va_block
->size
= size
;
468 list_add(&va_block
->node
, va_list);
470 list_add(&va_block
->node
, &res
->node
);
472 merge_va_blocks_locked(hdev
, va_list, va_block
);
474 print_va_list_locked(hdev
, va_list);
480 * add_va_block - wrapper for add_va_block_locked
482 * @hdev : pointer to the habanalabs device structure
483 * @va_list : pointer to the virtual addresses block list
484 * @start : start virtual address
485 * @end : end virtual address
487 * This function does the following:
488 * - Takes the list lock and calls add_va_block_locked
490 static inline int add_va_block(struct hl_device
*hdev
,
491 struct hl_va_range
*va_range
, u64 start
, u64 end
)
495 mutex_lock(&va_range
->lock
);
496 rc
= add_va_block_locked(hdev
, &va_range
->list
, start
, end
);
497 mutex_unlock(&va_range
->lock
);
503 * get_va_block - get a virtual block with the requested size
505 * @hdev : pointer to the habanalabs device structure
506 * @va_range : pointer to the virtual addresses range
507 * @size : requested block size
508 * @hint_addr : hint for request address by the user
509 * @is_userptr : is host or DRAM memory
511 * This function does the following:
512 * - Iterate on the virtual block list to find a suitable virtual block for the
514 * - Reserve the requested block and update the list
515 * - Return the start address of the virtual block
517 static u64
get_va_block(struct hl_device
*hdev
,
518 struct hl_va_range
*va_range
, u64 size
, u64 hint_addr
,
521 struct hl_vm_va_block
*va_block
, *new_va_block
= NULL
;
522 u64 valid_start
, valid_size
, prev_start
, prev_end
, page_mask
,
523 res_valid_start
= 0, res_valid_size
= 0;
525 bool add_prev
= false;
529 * We cannot know if the user allocated memory with huge pages
530 * or not, hence we continue with the biggest possible
533 page_size
= hdev
->asic_prop
.pmmu
.huge_page_size
;
535 page_size
= hdev
->asic_prop
.dmmu
.page_size
;
537 page_mask
= ~((u64
)page_size
- 1);
539 mutex_lock(&va_range
->lock
);
541 print_va_list_locked(hdev
, &va_range
->list
);
543 list_for_each_entry(va_block
, &va_range
->list
, node
) {
544 /* calc the first possible aligned addr */
545 valid_start
= va_block
->start
;
547 if (valid_start
& (page_size
- 1)) {
548 valid_start
&= page_mask
;
549 valid_start
+= page_size
;
550 if (valid_start
> va_block
->end
)
554 valid_size
= va_block
->end
- valid_start
;
556 if (valid_size
>= size
&&
557 (!new_va_block
|| valid_size
< res_valid_size
)) {
558 new_va_block
= va_block
;
559 res_valid_start
= valid_start
;
560 res_valid_size
= valid_size
;
563 if (hint_addr
&& hint_addr
>= valid_start
&&
564 ((hint_addr
+ size
) <= va_block
->end
)) {
565 new_va_block
= va_block
;
566 res_valid_start
= hint_addr
;
567 res_valid_size
= valid_size
;
573 dev_err(hdev
->dev
, "no available va block for size %llu\n",
578 if (res_valid_start
> new_va_block
->start
) {
579 prev_start
= new_va_block
->start
;
580 prev_end
= res_valid_start
- 1;
582 new_va_block
->start
= res_valid_start
;
583 new_va_block
->size
= res_valid_size
;
588 if (new_va_block
->size
> size
) {
589 new_va_block
->start
+= size
;
590 new_va_block
->size
= new_va_block
->end
- new_va_block
->start
;
592 list_del(&new_va_block
->node
);
597 add_va_block_locked(hdev
, &va_range
->list
, prev_start
,
600 print_va_list_locked(hdev
, &va_range
->list
);
602 mutex_unlock(&va_range
->lock
);
604 return res_valid_start
;
608 * get_sg_info - get number of pages and the DMA address from SG list
611 * @dma_addr : pointer to DMA address to return
613 * Calculate the number of consecutive pages described by the SG list. Take the
614 * offset of the address in the first page, add to it the length and round it up
615 * to the number of needed pages.
617 static u32
get_sg_info(struct scatterlist
*sg
, dma_addr_t
*dma_addr
)
619 *dma_addr
= sg_dma_address(sg
);
621 return ((((*dma_addr
) & (PAGE_SIZE
- 1)) + sg_dma_len(sg
)) +
622 (PAGE_SIZE
- 1)) >> PAGE_SHIFT
;
626 * init_phys_pg_pack_from_userptr - initialize physical page pack from host
628 * @ctx: current context
629 * @userptr: userptr to initialize from
630 * @pphys_pg_pack: result pointer
632 * This function does the following:
633 * - Pin the physical pages related to the given virtual block
634 * - Create a physical page pack from the physical pages related to the given
637 static int init_phys_pg_pack_from_userptr(struct hl_ctx
*ctx
,
638 struct hl_userptr
*userptr
,
639 struct hl_vm_phys_pg_pack
**pphys_pg_pack
)
641 struct hl_mmu_properties
*mmu_prop
= &ctx
->hdev
->asic_prop
.pmmu
;
642 struct hl_vm_phys_pg_pack
*phys_pg_pack
;
643 struct scatterlist
*sg
;
645 u64 page_mask
, total_npages
;
646 u32 npages
, page_size
= PAGE_SIZE
,
647 huge_page_size
= mmu_prop
->huge_page_size
;
648 bool first
= true, is_huge_page_opt
= true;
650 u32 pgs_in_huge_page
= huge_page_size
>> __ffs(page_size
);
652 phys_pg_pack
= kzalloc(sizeof(*phys_pg_pack
), GFP_KERNEL
);
656 phys_pg_pack
->vm_type
= userptr
->vm_type
;
657 phys_pg_pack
->created_from_userptr
= true;
658 phys_pg_pack
->asid
= ctx
->asid
;
659 atomic_set(&phys_pg_pack
->mapping_cnt
, 1);
661 /* Only if all dma_addrs are aligned to 2MB and their
662 * sizes is at least 2MB, we can use huge page mapping.
663 * We limit the 2MB optimization to this condition,
664 * since later on we acquire the related VA range as one
668 for_each_sg(userptr
->sgt
->sgl
, sg
, userptr
->sgt
->nents
, i
) {
669 npages
= get_sg_info(sg
, &dma_addr
);
671 total_npages
+= npages
;
673 if ((npages
% pgs_in_huge_page
) ||
674 (dma_addr
& (huge_page_size
- 1)))
675 is_huge_page_opt
= false;
678 if (is_huge_page_opt
) {
679 page_size
= huge_page_size
;
680 do_div(total_npages
, pgs_in_huge_page
);
683 page_mask
= ~(((u64
) page_size
) - 1);
685 phys_pg_pack
->pages
= kvmalloc_array(total_npages
, sizeof(u64
),
687 if (!phys_pg_pack
->pages
) {
689 goto page_pack_arr_mem_err
;
692 phys_pg_pack
->npages
= total_npages
;
693 phys_pg_pack
->page_size
= page_size
;
694 phys_pg_pack
->total_size
= total_npages
* page_size
;
697 for_each_sg(userptr
->sgt
->sgl
, sg
, userptr
->sgt
->nents
, i
) {
698 npages
= get_sg_info(sg
, &dma_addr
);
700 /* align down to physical page size and save the offset */
703 phys_pg_pack
->offset
= dma_addr
& (page_size
- 1);
704 dma_addr
&= page_mask
;
708 phys_pg_pack
->pages
[j
++] = dma_addr
;
709 dma_addr
+= page_size
;
711 if (is_huge_page_opt
)
712 npages
-= pgs_in_huge_page
;
718 *pphys_pg_pack
= phys_pg_pack
;
722 page_pack_arr_mem_err
:
729 * map_phys_pg_pack - maps the physical page pack.
730 * @ctx: current context
731 * @vaddr: start address of the virtual area to map from
732 * @phys_pg_pack: the pack of physical pages to map to
734 * This function does the following:
735 * - Maps each chunk of virtual memory to matching physical chunk
736 * - Stores number of successful mappings in the given argument
737 * - Returns 0 on success, error code otherwise
739 static int map_phys_pg_pack(struct hl_ctx
*ctx
, u64 vaddr
,
740 struct hl_vm_phys_pg_pack
*phys_pg_pack
)
742 struct hl_device
*hdev
= ctx
->hdev
;
743 u64 next_vaddr
= vaddr
, paddr
, mapped_pg_cnt
= 0, i
;
744 u32 page_size
= phys_pg_pack
->page_size
;
747 for (i
= 0 ; i
< phys_pg_pack
->npages
; i
++) {
748 paddr
= phys_pg_pack
->pages
[i
];
750 rc
= hl_mmu_map(ctx
, next_vaddr
, paddr
, page_size
);
753 "map failed for handle %u, npages: %llu, mapped: %llu",
754 phys_pg_pack
->handle
, phys_pg_pack
->npages
,
760 next_vaddr
+= page_size
;
767 for (i
= 0 ; i
< mapped_pg_cnt
; i
++) {
768 if (hl_mmu_unmap(ctx
, next_vaddr
, page_size
))
769 dev_warn_ratelimited(hdev
->dev
,
770 "failed to unmap handle %u, va: 0x%llx, pa: 0x%llx, page size: %u\n",
771 phys_pg_pack
->handle
, next_vaddr
,
772 phys_pg_pack
->pages
[i
], page_size
);
774 next_vaddr
+= page_size
;
781 * unmap_phys_pg_pack - unmaps the physical page pack
782 * @ctx: current context
783 * @vaddr: start address of the virtual area to unmap
784 * @phys_pg_pack: the pack of physical pages to unmap
786 static void unmap_phys_pg_pack(struct hl_ctx
*ctx
, u64 vaddr
,
787 struct hl_vm_phys_pg_pack
*phys_pg_pack
)
789 struct hl_device
*hdev
= ctx
->hdev
;
793 page_size
= phys_pg_pack
->page_size
;
796 for (i
= 0 ; i
< phys_pg_pack
->npages
; i
++, next_vaddr
+= page_size
) {
797 if (hl_mmu_unmap(ctx
, next_vaddr
, page_size
))
798 dev_warn_ratelimited(hdev
->dev
,
799 "unmap failed for vaddr: 0x%llx\n", next_vaddr
);
802 * unmapping on Palladium can be really long, so avoid a CPU
803 * soft lockup bug by sleeping a little between unmapping pages
806 usleep_range(500, 1000);
810 static int get_paddr_from_handle(struct hl_ctx
*ctx
, struct hl_mem_in
*args
,
813 struct hl_device
*hdev
= ctx
->hdev
;
814 struct hl_vm
*vm
= &hdev
->vm
;
815 struct hl_vm_phys_pg_pack
*phys_pg_pack
;
818 handle
= lower_32_bits(args
->map_device
.handle
);
819 spin_lock(&vm
->idr_lock
);
820 phys_pg_pack
= idr_find(&vm
->phys_pg_pack_handles
, handle
);
822 spin_unlock(&vm
->idr_lock
);
823 dev_err(hdev
->dev
, "no match for handle %u\n", handle
);
827 *paddr
= phys_pg_pack
->pages
[0];
829 spin_unlock(&vm
->idr_lock
);
835 * map_device_va - map the given memory
837 * @ctx : current context
838 * @args : host parameters with handle/host virtual address
839 * @device_addr : pointer to result device virtual address
841 * This function does the following:
842 * - If given a physical device memory handle, map to a device virtual block
843 * and return the start address of this block
844 * - If given a host virtual address and size, find the related physical pages,
845 * map a device virtual block to this pages and return the start address of
848 static int map_device_va(struct hl_ctx
*ctx
, struct hl_mem_in
*args
,
851 struct hl_device
*hdev
= ctx
->hdev
;
852 struct hl_vm
*vm
= &hdev
->vm
;
853 struct hl_vm_phys_pg_pack
*phys_pg_pack
;
854 struct hl_userptr
*userptr
= NULL
;
855 struct hl_vm_hash_node
*hnode
;
856 enum vm_type_t
*vm_type
;
857 u64 ret_vaddr
, hint_addr
;
860 bool is_userptr
= args
->flags
& HL_MEM_USERPTR
;
866 u64 addr
= args
->map_host
.host_virt_addr
,
867 size
= args
->map_host
.mem_size
;
869 rc
= dma_map_host_va(hdev
, addr
, size
, &userptr
);
871 dev_err(hdev
->dev
, "failed to get userptr from va\n");
875 rc
= init_phys_pg_pack_from_userptr(ctx
, userptr
,
879 "unable to init page pack for vaddr 0x%llx\n",
881 goto init_page_pack_err
;
884 vm_type
= (enum vm_type_t
*) userptr
;
885 hint_addr
= args
->map_host
.hint_addr
;
887 handle
= lower_32_bits(args
->map_device
.handle
);
889 spin_lock(&vm
->idr_lock
);
890 phys_pg_pack
= idr_find(&vm
->phys_pg_pack_handles
, handle
);
892 spin_unlock(&vm
->idr_lock
);
894 "no match for handle %u\n", handle
);
898 /* increment now to avoid freeing device memory while mapping */
899 atomic_inc(&phys_pg_pack
->mapping_cnt
);
901 spin_unlock(&vm
->idr_lock
);
903 vm_type
= (enum vm_type_t
*) phys_pg_pack
;
905 hint_addr
= args
->map_device
.hint_addr
;
909 * relevant for mapping device physical memory only, as host memory is
912 if (!is_userptr
&& !(phys_pg_pack
->flags
& HL_MEM_SHARED
) &&
913 phys_pg_pack
->asid
!= ctx
->asid
) {
915 "Failed to map memory, handle %u is not shared\n",
921 hnode
= kzalloc(sizeof(*hnode
), GFP_KERNEL
);
927 ret_vaddr
= get_va_block(hdev
,
928 is_userptr
? &ctx
->host_va_range
: &ctx
->dram_va_range
,
929 phys_pg_pack
->total_size
, hint_addr
, is_userptr
);
931 dev_err(hdev
->dev
, "no available va block for handle %u\n",
937 mutex_lock(&ctx
->mmu_lock
);
939 rc
= map_phys_pg_pack(ctx
, ret_vaddr
, phys_pg_pack
);
941 mutex_unlock(&ctx
->mmu_lock
);
942 dev_err(hdev
->dev
, "mapping page pack failed for handle %u\n",
947 hdev
->asic_funcs
->mmu_invalidate_cache(hdev
, false, *vm_type
);
949 mutex_unlock(&ctx
->mmu_lock
);
951 ret_vaddr
+= phys_pg_pack
->offset
;
953 hnode
->ptr
= vm_type
;
954 hnode
->vaddr
= ret_vaddr
;
956 mutex_lock(&ctx
->mem_hash_lock
);
957 hash_add(ctx
->mem_hash
, &hnode
->node
, ret_vaddr
);
958 mutex_unlock(&ctx
->mem_hash_lock
);
960 *device_addr
= ret_vaddr
;
963 free_phys_pg_pack(hdev
, phys_pg_pack
);
968 if (add_va_block(hdev
,
969 is_userptr
? &ctx
->host_va_range
: &ctx
->dram_va_range
,
971 ret_vaddr
+ phys_pg_pack
->total_size
- 1))
973 "release va block failed for handle 0x%x, vaddr: 0x%llx\n",
980 atomic_dec(&phys_pg_pack
->mapping_cnt
);
982 free_phys_pg_pack(hdev
, phys_pg_pack
);
985 dma_unmap_host_va(hdev
, userptr
);
991 * unmap_device_va - unmap the given device virtual address
993 * @ctx : current context
994 * @vaddr : device virtual address to unmap
995 * @ctx_free : true if in context free flow, false otherwise.
997 * This function does the following:
998 * - Unmap the physical pages related to the given virtual address
999 * - return the device virtual block to the virtual block list
1001 static int unmap_device_va(struct hl_ctx
*ctx
, u64 vaddr
, bool ctx_free
)
1003 struct hl_device
*hdev
= ctx
->hdev
;
1004 struct hl_vm_phys_pg_pack
*phys_pg_pack
= NULL
;
1005 struct hl_vm_hash_node
*hnode
= NULL
;
1006 struct hl_userptr
*userptr
= NULL
;
1007 struct hl_va_range
*va_range
;
1008 enum vm_type_t
*vm_type
;
1012 /* protect from double entrance */
1013 mutex_lock(&ctx
->mem_hash_lock
);
1014 hash_for_each_possible(ctx
->mem_hash
, hnode
, node
, (unsigned long)vaddr
)
1015 if (vaddr
== hnode
->vaddr
)
1019 mutex_unlock(&ctx
->mem_hash_lock
);
1021 "unmap failed, no mem hnode for vaddr 0x%llx\n",
1026 hash_del(&hnode
->node
);
1027 mutex_unlock(&ctx
->mem_hash_lock
);
1029 vm_type
= hnode
->ptr
;
1031 if (*vm_type
== VM_TYPE_USERPTR
) {
1033 va_range
= &ctx
->host_va_range
;
1034 userptr
= hnode
->ptr
;
1035 rc
= init_phys_pg_pack_from_userptr(ctx
, userptr
,
1039 "unable to init page pack for vaddr 0x%llx\n",
1043 } else if (*vm_type
== VM_TYPE_PHYS_PACK
) {
1045 va_range
= &ctx
->dram_va_range
;
1046 phys_pg_pack
= hnode
->ptr
;
1049 "unmap failed, unknown vm desc for vaddr 0x%llx\n",
1055 if (atomic_read(&phys_pg_pack
->mapping_cnt
) == 0) {
1056 dev_err(hdev
->dev
, "vaddr 0x%llx is not mapped\n", vaddr
);
1058 goto mapping_cnt_err
;
1061 vaddr
&= ~(((u64
) phys_pg_pack
->page_size
) - 1);
1063 mutex_lock(&ctx
->mmu_lock
);
1065 unmap_phys_pg_pack(ctx
, vaddr
, phys_pg_pack
);
1068 * During context free this function is called in a loop to clean all
1069 * the context mappings. Hence the cache invalidation can be called once
1070 * at the loop end rather than for each iteration
1073 hdev
->asic_funcs
->mmu_invalidate_cache(hdev
, true, *vm_type
);
1075 mutex_unlock(&ctx
->mmu_lock
);
1078 * No point in maintaining the free VA block list if the context is
1079 * closing as the list will be freed anyway
1082 rc
= add_va_block(hdev
, va_range
, vaddr
,
1083 vaddr
+ phys_pg_pack
->total_size
- 1);
1086 "add va block failed for vaddr: 0x%llx\n",
1090 atomic_dec(&phys_pg_pack
->mapping_cnt
);
1094 free_phys_pg_pack(hdev
, phys_pg_pack
);
1095 dma_unmap_host_va(hdev
, userptr
);
1102 free_phys_pg_pack(hdev
, phys_pg_pack
);
1104 mutex_lock(&ctx
->mem_hash_lock
);
1105 hash_add(ctx
->mem_hash
, &hnode
->node
, vaddr
);
1106 mutex_unlock(&ctx
->mem_hash_lock
);
1111 static int mem_ioctl_no_mmu(struct hl_fpriv
*hpriv
, union hl_mem_args
*args
)
1113 struct hl_device
*hdev
= hpriv
->hdev
;
1114 struct hl_ctx
*ctx
= hpriv
->ctx
;
1115 u64 device_addr
= 0;
1119 switch (args
->in
.op
) {
1120 case HL_MEM_OP_ALLOC
:
1121 if (args
->in
.alloc
.mem_size
== 0) {
1123 "alloc size must be larger than 0\n");
1128 /* Force contiguous as there are no real MMU
1129 * translations to overcome physical memory gaps
1131 args
->in
.flags
|= HL_MEM_CONTIGUOUS
;
1132 rc
= alloc_device_memory(ctx
, &args
->in
, &handle
);
1134 memset(args
, 0, sizeof(*args
));
1135 args
->out
.handle
= (__u64
) handle
;
1138 case HL_MEM_OP_FREE
:
1139 rc
= free_device_memory(ctx
, args
->in
.free
.handle
);
1143 if (args
->in
.flags
& HL_MEM_USERPTR
) {
1144 device_addr
= args
->in
.map_host
.host_virt_addr
;
1147 rc
= get_paddr_from_handle(ctx
, &args
->in
,
1151 memset(args
, 0, sizeof(*args
));
1152 args
->out
.device_virt_addr
= device_addr
;
1155 case HL_MEM_OP_UNMAP
:
1160 dev_err(hdev
->dev
, "Unknown opcode for memory IOCTL\n");
1169 int hl_mem_ioctl(struct hl_fpriv
*hpriv
, void *data
)
1171 union hl_mem_args
*args
= data
;
1172 struct hl_device
*hdev
= hpriv
->hdev
;
1173 struct hl_ctx
*ctx
= hpriv
->ctx
;
1174 u64 device_addr
= 0;
1178 if (hl_device_disabled_or_in_reset(hdev
)) {
1179 dev_warn_ratelimited(hdev
->dev
,
1180 "Device is %s. Can't execute MEMORY IOCTL\n",
1181 atomic_read(&hdev
->in_reset
) ? "in_reset" : "disabled");
1185 if (!hdev
->mmu_enable
)
1186 return mem_ioctl_no_mmu(hpriv
, args
);
1188 switch (args
->in
.op
) {
1189 case HL_MEM_OP_ALLOC
:
1190 if (!hdev
->dram_supports_virtual_memory
) {
1191 dev_err(hdev
->dev
, "DRAM alloc is not supported\n");
1196 if (args
->in
.alloc
.mem_size
== 0) {
1198 "alloc size must be larger than 0\n");
1202 rc
= alloc_device_memory(ctx
, &args
->in
, &handle
);
1204 memset(args
, 0, sizeof(*args
));
1205 args
->out
.handle
= (__u64
) handle
;
1208 case HL_MEM_OP_FREE
:
1209 rc
= free_device_memory(ctx
, args
->in
.free
.handle
);
1213 rc
= map_device_va(ctx
, &args
->in
, &device_addr
);
1215 memset(args
, 0, sizeof(*args
));
1216 args
->out
.device_virt_addr
= device_addr
;
1219 case HL_MEM_OP_UNMAP
:
1220 rc
= unmap_device_va(ctx
, args
->in
.unmap
.device_virt_addr
,
1225 dev_err(hdev
->dev
, "Unknown opcode for memory IOCTL\n");
1234 static int get_user_memory(struct hl_device
*hdev
, u64 addr
, u64 size
,
1235 u32 npages
, u64 start
, u32 offset
,
1236 struct hl_userptr
*userptr
)
1240 if (!access_ok((void __user
*) (uintptr_t) addr
, size
)) {
1241 dev_err(hdev
->dev
, "user pointer is invalid - 0x%llx\n", addr
);
1245 userptr
->vec
= frame_vector_create(npages
);
1246 if (!userptr
->vec
) {
1247 dev_err(hdev
->dev
, "Failed to create frame vector\n");
1251 rc
= get_vaddr_frames(start
, npages
, FOLL_FORCE
| FOLL_WRITE
,
1256 "Failed to map host memory, user ptr probably wrong\n");
1258 goto destroy_framevec
;
1263 if (frame_vector_to_pages(userptr
->vec
) < 0) {
1265 "Failed to translate frame vector to pages\n");
1270 rc
= sg_alloc_table_from_pages(userptr
->sgt
,
1271 frame_vector_pages(userptr
->vec
),
1272 npages
, offset
, size
, GFP_ATOMIC
);
1274 dev_err(hdev
->dev
, "failed to create SG table from pages\n");
1281 put_vaddr_frames(userptr
->vec
);
1283 frame_vector_destroy(userptr
->vec
);
1288 * hl_pin_host_memory - pins a chunk of host memory.
1289 * @hdev: pointer to the habanalabs device structure
1290 * @addr: the host virtual address of the memory area
1291 * @size: the size of the memory area
1292 * @userptr: pointer to hl_userptr structure
1294 * This function does the following:
1295 * - Pins the physical pages
1296 * - Create an SG list from those pages
1298 int hl_pin_host_memory(struct hl_device
*hdev
, u64 addr
, u64 size
,
1299 struct hl_userptr
*userptr
)
1306 dev_err(hdev
->dev
, "size to pin is invalid - %llu\n", size
);
1311 * If the combination of the address and size requested for this memory
1312 * region causes an integer overflow, return error.
1314 if (((addr
+ size
) < addr
) ||
1315 PAGE_ALIGN(addr
+ size
) < (addr
+ size
)) {
1317 "user pointer 0x%llx + %llu causes integer overflow\n",
1323 * This function can be called also from data path, hence use atomic
1324 * always as it is not a big allocation.
1326 userptr
->sgt
= kzalloc(sizeof(*userptr
->sgt
), GFP_ATOMIC
);
1330 start
= addr
& PAGE_MASK
;
1331 offset
= addr
& ~PAGE_MASK
;
1332 end
= PAGE_ALIGN(addr
+ size
);
1333 npages
= (end
- start
) >> PAGE_SHIFT
;
1335 userptr
->size
= size
;
1336 userptr
->addr
= addr
;
1337 userptr
->dma_mapped
= false;
1338 INIT_LIST_HEAD(&userptr
->job_node
);
1340 rc
= get_user_memory(hdev
, addr
, size
, npages
, start
, offset
,
1344 "failed to get user memory for address 0x%llx\n",
1349 hl_debugfs_add_userptr(hdev
, userptr
);
1354 kfree(userptr
->sgt
);
1359 * hl_unpin_host_memory - unpins a chunk of host memory.
1360 * @hdev: pointer to the habanalabs device structure
1361 * @userptr: pointer to hl_userptr structure
1363 * This function does the following:
1364 * - Unpins the physical pages related to the host memory
1365 * - Free the SG list
1367 void hl_unpin_host_memory(struct hl_device
*hdev
, struct hl_userptr
*userptr
)
1369 struct page
**pages
;
1371 hl_debugfs_remove_userptr(hdev
, userptr
);
1373 if (userptr
->dma_mapped
)
1374 hdev
->asic_funcs
->hl_dma_unmap_sg(hdev
, userptr
->sgt
->sgl
,
1375 userptr
->sgt
->nents
,
1378 pages
= frame_vector_pages(userptr
->vec
);
1379 if (!IS_ERR(pages
)) {
1382 for (i
= 0; i
< frame_vector_count(userptr
->vec
); i
++)
1383 set_page_dirty_lock(pages
[i
]);
1385 put_vaddr_frames(userptr
->vec
);
1386 frame_vector_destroy(userptr
->vec
);
1388 list_del(&userptr
->job_node
);
1390 sg_free_table(userptr
->sgt
);
1391 kfree(userptr
->sgt
);
1395 * hl_userptr_delete_list - clear userptr list
1397 * @hdev : pointer to the habanalabs device structure
1398 * @userptr_list : pointer to the list to clear
1400 * This function does the following:
1401 * - Iterates over the list and unpins the host memory and frees the userptr
1404 void hl_userptr_delete_list(struct hl_device
*hdev
,
1405 struct list_head
*userptr_list
)
1407 struct hl_userptr
*userptr
, *tmp
;
1409 list_for_each_entry_safe(userptr
, tmp
, userptr_list
, job_node
) {
1410 hl_unpin_host_memory(hdev
, userptr
);
1414 INIT_LIST_HEAD(userptr_list
);
1418 * hl_userptr_is_pinned - returns whether the given userptr is pinned
1420 * @hdev : pointer to the habanalabs device structure
1421 * @userptr_list : pointer to the list to clear
1422 * @userptr : pointer to userptr to check
1424 * This function does the following:
1425 * - Iterates over the list and checks if the given userptr is in it, means is
1426 * pinned. If so, returns true, otherwise returns false.
1428 bool hl_userptr_is_pinned(struct hl_device
*hdev
, u64 addr
,
1429 u32 size
, struct list_head
*userptr_list
,
1430 struct hl_userptr
**userptr
)
1432 list_for_each_entry((*userptr
), userptr_list
, job_node
) {
1433 if ((addr
== (*userptr
)->addr
) && (size
== (*userptr
)->size
))
1441 * hl_va_range_init - initialize virtual addresses range
1443 * @hdev : pointer to the habanalabs device structure
1444 * @va_range : pointer to the range to initialize
1445 * @start : range start address
1446 * @end : range end address
1448 * This function does the following:
1449 * - Initializes the virtual addresses list of the given range with the given
1452 static int hl_va_range_init(struct hl_device
*hdev
,
1453 struct hl_va_range
*va_range
, u64 start
, u64 end
)
1457 INIT_LIST_HEAD(&va_range
->list
);
1459 /* PAGE_SIZE alignment */
1461 if (start
& (PAGE_SIZE
- 1)) {
1466 if (end
& (PAGE_SIZE
- 1))
1470 dev_err(hdev
->dev
, "too small vm range for va list\n");
1474 rc
= add_va_block(hdev
, va_range
, start
, end
);
1477 dev_err(hdev
->dev
, "Failed to init host va list\n");
1481 va_range
->start_addr
= start
;
1482 va_range
->end_addr
= end
;
1488 * hl_vm_ctx_init_with_ranges - initialize virtual memory for context
1490 * @ctx : pointer to the habanalabs context structure
1491 * @host_range_start : host virtual addresses range start
1492 * @host_range_end : host virtual addresses range end
1493 * @dram_range_start : dram virtual addresses range start
1494 * @dram_range_end : dram virtual addresses range end
1496 * This function initializes the following:
1498 * - Virtual address to area descriptor hashtable
1499 * - Virtual block list of available virtual memory
1501 static int hl_vm_ctx_init_with_ranges(struct hl_ctx
*ctx
, u64 host_range_start
,
1502 u64 host_range_end
, u64 dram_range_start
,
1505 struct hl_device
*hdev
= ctx
->hdev
;
1508 rc
= hl_mmu_ctx_init(ctx
);
1510 dev_err(hdev
->dev
, "failed to init context %d\n", ctx
->asid
);
1514 mutex_init(&ctx
->mem_hash_lock
);
1515 hash_init(ctx
->mem_hash
);
1517 mutex_init(&ctx
->host_va_range
.lock
);
1519 rc
= hl_va_range_init(hdev
, &ctx
->host_va_range
, host_range_start
,
1522 dev_err(hdev
->dev
, "failed to init host vm range\n");
1526 mutex_init(&ctx
->dram_va_range
.lock
);
1528 rc
= hl_va_range_init(hdev
, &ctx
->dram_va_range
, dram_range_start
,
1531 dev_err(hdev
->dev
, "failed to init dram vm range\n");
1535 hl_debugfs_add_ctx_mem_hash(hdev
, ctx
);
1540 mutex_destroy(&ctx
->dram_va_range
.lock
);
1542 mutex_lock(&ctx
->host_va_range
.lock
);
1543 clear_va_list_locked(hdev
, &ctx
->host_va_range
.list
);
1544 mutex_unlock(&ctx
->host_va_range
.lock
);
1546 mutex_destroy(&ctx
->host_va_range
.lock
);
1547 mutex_destroy(&ctx
->mem_hash_lock
);
1548 hl_mmu_ctx_fini(ctx
);
1553 int hl_vm_ctx_init(struct hl_ctx
*ctx
)
1555 struct asic_fixed_properties
*prop
= &ctx
->hdev
->asic_prop
;
1556 u64 host_range_start
, host_range_end
, dram_range_start
,
1559 atomic64_set(&ctx
->dram_phys_mem
, 0);
1562 * - If MMU is enabled, init the ranges as usual.
1563 * - If MMU is disabled, in case of host mapping, the returned address
1565 * In case of DRAM mapping, the returned address is the physical
1566 * address of the memory related to the given handle.
1568 if (ctx
->hdev
->mmu_enable
) {
1569 dram_range_start
= prop
->va_space_dram_start_address
;
1570 dram_range_end
= prop
->va_space_dram_end_address
;
1571 host_range_start
= prop
->va_space_host_start_address
;
1572 host_range_end
= prop
->va_space_host_end_address
;
1574 dram_range_start
= prop
->dram_user_base_address
;
1575 dram_range_end
= prop
->dram_end_address
;
1576 host_range_start
= prop
->dram_user_base_address
;
1577 host_range_end
= prop
->dram_end_address
;
1580 return hl_vm_ctx_init_with_ranges(ctx
, host_range_start
, host_range_end
,
1581 dram_range_start
, dram_range_end
);
1585 * hl_va_range_fini - clear a virtual addresses range
1587 * @hdev : pointer to the habanalabs structure
1588 * va_range : pointer to virtual addresses range
1590 * This function does the following:
1591 * - Frees the virtual addresses block list and its lock
1593 static void hl_va_range_fini(struct hl_device
*hdev
,
1594 struct hl_va_range
*va_range
)
1596 mutex_lock(&va_range
->lock
);
1597 clear_va_list_locked(hdev
, &va_range
->list
);
1598 mutex_unlock(&va_range
->lock
);
1600 mutex_destroy(&va_range
->lock
);
1604 * hl_vm_ctx_fini - virtual memory teardown of context
1606 * @ctx : pointer to the habanalabs context structure
1608 * This function perform teardown the following:
1609 * - Virtual block list of available virtual memory
1610 * - Virtual address to area descriptor hashtable
1613 * In addition this function does the following:
1614 * - Unmaps the existing hashtable nodes if the hashtable is not empty. The
1615 * hashtable should be empty as no valid mappings should exist at this
1617 * - Frees any existing physical page list from the idr which relates to the
1618 * current context asid.
1619 * - This function checks the virtual block list for correctness. At this point
1620 * the list should contain one element which describes the whole virtual
1621 * memory range of the context. Otherwise, a warning is printed.
1623 void hl_vm_ctx_fini(struct hl_ctx
*ctx
)
1625 struct hl_device
*hdev
= ctx
->hdev
;
1626 struct hl_vm
*vm
= &hdev
->vm
;
1627 struct hl_vm_phys_pg_pack
*phys_pg_list
;
1628 struct hl_vm_hash_node
*hnode
;
1629 struct hlist_node
*tmp_node
;
1632 hl_debugfs_remove_ctx_mem_hash(hdev
, ctx
);
1635 * Clearly something went wrong on hard reset so no point in printing
1636 * another side effect error
1638 if (!hdev
->hard_reset_pending
&& !hash_empty(ctx
->mem_hash
))
1639 dev_notice(hdev
->dev
,
1640 "ctx %d is freed while it has va in use\n",
1643 hash_for_each_safe(ctx
->mem_hash
, i
, tmp_node
, hnode
, node
) {
1645 "hl_mem_hash_node of vaddr 0x%llx of asid %d is still alive\n",
1646 hnode
->vaddr
, ctx
->asid
);
1647 unmap_device_va(ctx
, hnode
->vaddr
, true);
1650 /* invalidate the cache once after the unmapping loop */
1651 hdev
->asic_funcs
->mmu_invalidate_cache(hdev
, true, VM_TYPE_USERPTR
);
1652 hdev
->asic_funcs
->mmu_invalidate_cache(hdev
, true, VM_TYPE_PHYS_PACK
);
1654 spin_lock(&vm
->idr_lock
);
1655 idr_for_each_entry(&vm
->phys_pg_pack_handles
, phys_pg_list
, i
)
1656 if (phys_pg_list
->asid
== ctx
->asid
) {
1658 "page list 0x%px of asid %d is still alive\n",
1659 phys_pg_list
, ctx
->asid
);
1660 atomic64_sub(phys_pg_list
->total_size
,
1661 &hdev
->dram_used_mem
);
1662 free_phys_pg_pack(hdev
, phys_pg_list
);
1663 idr_remove(&vm
->phys_pg_pack_handles
, i
);
1665 spin_unlock(&vm
->idr_lock
);
1667 hl_va_range_fini(hdev
, &ctx
->dram_va_range
);
1668 hl_va_range_fini(hdev
, &ctx
->host_va_range
);
1670 mutex_destroy(&ctx
->mem_hash_lock
);
1671 hl_mmu_ctx_fini(ctx
);
1675 * hl_vm_init - initialize virtual memory module
1677 * @hdev : pointer to the habanalabs device structure
1679 * This function initializes the following:
1681 * - DRAM physical pages pool of 2MB
1682 * - Idr for device memory allocation handles
1684 int hl_vm_init(struct hl_device
*hdev
)
1686 struct asic_fixed_properties
*prop
= &hdev
->asic_prop
;
1687 struct hl_vm
*vm
= &hdev
->vm
;
1690 vm
->dram_pg_pool
= gen_pool_create(__ffs(prop
->dram_page_size
), -1);
1691 if (!vm
->dram_pg_pool
) {
1692 dev_err(hdev
->dev
, "Failed to create dram page pool\n");
1696 kref_init(&vm
->dram_pg_pool_refcount
);
1698 rc
= gen_pool_add(vm
->dram_pg_pool
, prop
->dram_user_base_address
,
1699 prop
->dram_end_address
- prop
->dram_user_base_address
,
1704 "Failed to add memory to dram page pool %d\n", rc
);
1708 spin_lock_init(&vm
->idr_lock
);
1709 idr_init(&vm
->phys_pg_pack_handles
);
1711 atomic64_set(&hdev
->dram_used_mem
, 0);
1713 vm
->init_done
= true;
1718 gen_pool_destroy(vm
->dram_pg_pool
);
1724 * hl_vm_fini - virtual memory module teardown
1726 * @hdev : pointer to the habanalabs device structure
1728 * This function perform teardown to the following:
1729 * - Idr for device memory allocation handles
1730 * - DRAM physical pages pool of 2MB
1733 void hl_vm_fini(struct hl_device
*hdev
)
1735 struct hl_vm
*vm
= &hdev
->vm
;
1741 * At this point all the contexts should be freed and hence no DRAM
1742 * memory should be in use. Hence the DRAM pool should be freed here.
1744 if (kref_put(&vm
->dram_pg_pool_refcount
, dram_pg_pool_do_release
) != 1)
1745 dev_warn(hdev
->dev
, "dram_pg_pool was not destroyed on %s\n",
1748 vm
->init_done
= false;