Merge tag 'block-5.11-2021-01-10' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / misc / habanalabs / common / memory.c
blobcbe9da4e0211b5118b112b2bdf92f91cfef46360
1 // SPDX-License-Identifier: GPL-2.0
3 /*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
8 #include <uapi/misc/habanalabs.h>
9 #include "habanalabs.h"
10 #include "../include/hw_ip/mmu/mmu_general.h"
12 #include <linux/uaccess.h>
13 #include <linux/slab.h>
15 #define HL_MMU_DEBUG 0
18 * The va ranges in context object contain a list with the available chunks of
19 * device virtual memory.
20 * There is one range for host allocations and one for DRAM allocations.
22 * On initialization each range contains one chunk of all of its available
23 * virtual range which is a half of the total device virtual range.
25 * On each mapping of physical pages, a suitable virtual range chunk (with a
26 * minimum size) is selected from the list. If the chunk size equals the
27 * requested size, the chunk is returned. Otherwise, the chunk is split into
28 * two chunks - one to return as result and a remainder to stay in the list.
30 * On each Unmapping of a virtual address, the relevant virtual chunk is
31 * returned to the list. The chunk is added to the list and if its edges match
32 * the edges of the adjacent chunks (means a contiguous chunk can be created),
33 * the chunks are merged.
35 * On finish, the list is checked to have only one chunk of all the relevant
36 * virtual range (which is a half of the device total virtual range).
37 * If not (means not all mappings were unmapped), a warning is printed.
41 * alloc_device_memory - allocate device memory
43 * @ctx : current context
44 * @args : host parameters containing the requested size
45 * @ret_handle : result handle
47 * This function does the following:
48 * - Allocate the requested size rounded up to 'dram_page_size' pages
49 * - Return unique handle
51 static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
52 u32 *ret_handle)
54 struct hl_device *hdev = ctx->hdev;
55 struct hl_vm *vm = &hdev->vm;
56 struct hl_vm_phys_pg_pack *phys_pg_pack;
57 u64 paddr = 0, total_size, num_pgs, i;
58 u32 num_curr_pgs, page_size, page_shift;
59 int handle, rc;
60 bool contiguous;
62 num_curr_pgs = 0;
63 page_size = hdev->asic_prop.dram_page_size;
64 page_shift = __ffs(page_size);
65 num_pgs = (args->alloc.mem_size + (page_size - 1)) >> page_shift;
66 total_size = num_pgs << page_shift;
68 if (!total_size) {
69 dev_err(hdev->dev, "Cannot allocate 0 bytes\n");
70 return -EINVAL;
73 contiguous = args->flags & HL_MEM_CONTIGUOUS;
75 if (contiguous) {
76 paddr = (u64) gen_pool_alloc(vm->dram_pg_pool, total_size);
77 if (!paddr) {
78 dev_err(hdev->dev,
79 "failed to allocate %llu contiguous pages with total size of %llu\n",
80 num_pgs, total_size);
81 return -ENOMEM;
84 if (hdev->memory_scrub) {
85 rc = hdev->asic_funcs->scrub_device_mem(hdev, paddr,
86 total_size);
87 if (rc) {
88 dev_err(hdev->dev,
89 "Failed to scrub contiguous device memory\n");
90 goto pages_pack_err;
95 phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
96 if (!phys_pg_pack) {
97 rc = -ENOMEM;
98 goto pages_pack_err;
101 phys_pg_pack->vm_type = VM_TYPE_PHYS_PACK;
102 phys_pg_pack->asid = ctx->asid;
103 phys_pg_pack->npages = num_pgs;
104 phys_pg_pack->page_size = page_size;
105 phys_pg_pack->total_size = total_size;
106 phys_pg_pack->flags = args->flags;
107 phys_pg_pack->contiguous = contiguous;
109 phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL);
110 if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) {
111 rc = -ENOMEM;
112 goto pages_arr_err;
115 if (phys_pg_pack->contiguous) {
116 for (i = 0 ; i < num_pgs ; i++)
117 phys_pg_pack->pages[i] = paddr + i * page_size;
118 } else {
119 for (i = 0 ; i < num_pgs ; i++) {
120 phys_pg_pack->pages[i] = (u64) gen_pool_alloc(
121 vm->dram_pg_pool,
122 page_size);
123 if (!phys_pg_pack->pages[i]) {
124 dev_err(hdev->dev,
125 "Failed to allocate device memory (out of memory)\n");
126 rc = -ENOMEM;
127 goto page_err;
130 if (hdev->memory_scrub) {
131 rc = hdev->asic_funcs->scrub_device_mem(hdev,
132 phys_pg_pack->pages[i],
133 page_size);
134 if (rc) {
135 dev_err(hdev->dev,
136 "Failed to scrub device memory\n");
137 goto page_err;
141 num_curr_pgs++;
145 spin_lock(&vm->idr_lock);
146 handle = idr_alloc(&vm->phys_pg_pack_handles, phys_pg_pack, 1, 0,
147 GFP_ATOMIC);
148 spin_unlock(&vm->idr_lock);
150 if (handle < 0) {
151 dev_err(hdev->dev, "Failed to get handle for page\n");
152 rc = -EFAULT;
153 goto idr_err;
156 for (i = 0 ; i < num_pgs ; i++)
157 kref_get(&vm->dram_pg_pool_refcount);
159 phys_pg_pack->handle = handle;
161 atomic64_add(phys_pg_pack->total_size, &ctx->dram_phys_mem);
162 atomic64_add(phys_pg_pack->total_size, &hdev->dram_used_mem);
164 *ret_handle = handle;
166 return 0;
168 idr_err:
169 page_err:
170 if (!phys_pg_pack->contiguous)
171 for (i = 0 ; i < num_curr_pgs ; i++)
172 gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i],
173 page_size);
175 kvfree(phys_pg_pack->pages);
176 pages_arr_err:
177 kfree(phys_pg_pack);
178 pages_pack_err:
179 if (contiguous)
180 gen_pool_free(vm->dram_pg_pool, paddr, total_size);
182 return rc;
186 * dma_map_host_va - DMA mapping of the given host virtual address.
187 * @hdev: habanalabs device structure
188 * @addr: the host virtual address of the memory area
189 * @size: the size of the memory area
190 * @p_userptr: pointer to result userptr structure
192 * This function does the following:
193 * - Allocate userptr structure
194 * - Pin the given host memory using the userptr structure
195 * - Perform DMA mapping to have the DMA addresses of the pages
197 static int dma_map_host_va(struct hl_device *hdev, u64 addr, u64 size,
198 struct hl_userptr **p_userptr)
200 struct hl_userptr *userptr;
201 int rc;
203 userptr = kzalloc(sizeof(*userptr), GFP_KERNEL);
204 if (!userptr) {
205 rc = -ENOMEM;
206 goto userptr_err;
209 rc = hl_pin_host_memory(hdev, addr, size, userptr);
210 if (rc) {
211 dev_err(hdev->dev, "Failed to pin host memory\n");
212 goto pin_err;
215 rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl,
216 userptr->sgt->nents, DMA_BIDIRECTIONAL);
217 if (rc) {
218 dev_err(hdev->dev, "failed to map sgt with DMA region\n");
219 goto dma_map_err;
222 userptr->dma_mapped = true;
223 userptr->dir = DMA_BIDIRECTIONAL;
224 userptr->vm_type = VM_TYPE_USERPTR;
226 *p_userptr = userptr;
228 return 0;
230 dma_map_err:
231 hl_unpin_host_memory(hdev, userptr);
232 pin_err:
233 kfree(userptr);
234 userptr_err:
236 return rc;
240 * dma_unmap_host_va - DMA unmapping of the given host virtual address.
241 * @hdev: habanalabs device structure
242 * @userptr: userptr to free
244 * This function does the following:
245 * - Unpins the physical pages
246 * - Frees the userptr structure
248 static void dma_unmap_host_va(struct hl_device *hdev,
249 struct hl_userptr *userptr)
251 hl_unpin_host_memory(hdev, userptr);
252 kfree(userptr);
256 * dram_pg_pool_do_release - free DRAM pages pool
258 * @ref : pointer to reference object
260 * This function does the following:
261 * - Frees the idr structure of physical pages handles
262 * - Frees the generic pool of DRAM physical pages
264 static void dram_pg_pool_do_release(struct kref *ref)
266 struct hl_vm *vm = container_of(ref, struct hl_vm,
267 dram_pg_pool_refcount);
270 * free the idr here as only here we know for sure that there are no
271 * allocated physical pages and hence there are no handles in use
273 idr_destroy(&vm->phys_pg_pack_handles);
274 gen_pool_destroy(vm->dram_pg_pool);
278 * free_phys_pg_pack - free physical page pack
279 * @hdev: habanalabs device structure
280 * @phys_pg_pack: physical page pack to free
282 * This function does the following:
283 * - For DRAM memory only, iterate over the pack and free each physical block
284 * structure by returning it to the general pool
285 * - Free the hl_vm_phys_pg_pack structure
287 static void free_phys_pg_pack(struct hl_device *hdev,
288 struct hl_vm_phys_pg_pack *phys_pg_pack)
290 struct hl_vm *vm = &hdev->vm;
291 u64 i;
293 if (!phys_pg_pack->created_from_userptr) {
294 if (phys_pg_pack->contiguous) {
295 gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[0],
296 phys_pg_pack->total_size);
298 for (i = 0; i < phys_pg_pack->npages ; i++)
299 kref_put(&vm->dram_pg_pool_refcount,
300 dram_pg_pool_do_release);
301 } else {
302 for (i = 0 ; i < phys_pg_pack->npages ; i++) {
303 gen_pool_free(vm->dram_pg_pool,
304 phys_pg_pack->pages[i],
305 phys_pg_pack->page_size);
306 kref_put(&vm->dram_pg_pool_refcount,
307 dram_pg_pool_do_release);
312 kvfree(phys_pg_pack->pages);
313 kfree(phys_pg_pack);
317 * free_device_memory - free device memory
319 * @ctx : current context
320 * @handle : handle of the memory chunk to free
322 * This function does the following:
323 * - Free the device memory related to the given handle
325 static int free_device_memory(struct hl_ctx *ctx, u32 handle)
327 struct hl_device *hdev = ctx->hdev;
328 struct hl_vm *vm = &hdev->vm;
329 struct hl_vm_phys_pg_pack *phys_pg_pack;
331 spin_lock(&vm->idr_lock);
332 phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
333 if (phys_pg_pack) {
334 if (atomic_read(&phys_pg_pack->mapping_cnt) > 0) {
335 dev_err(hdev->dev, "handle %u is mapped, cannot free\n",
336 handle);
337 spin_unlock(&vm->idr_lock);
338 return -EINVAL;
342 * must remove from idr before the freeing of the physical
343 * pages as the refcount of the pool is also the trigger of the
344 * idr destroy
346 idr_remove(&vm->phys_pg_pack_handles, handle);
347 spin_unlock(&vm->idr_lock);
349 atomic64_sub(phys_pg_pack->total_size, &ctx->dram_phys_mem);
350 atomic64_sub(phys_pg_pack->total_size, &hdev->dram_used_mem);
352 free_phys_pg_pack(hdev, phys_pg_pack);
353 } else {
354 spin_unlock(&vm->idr_lock);
355 dev_err(hdev->dev,
356 "free device memory failed, no match for handle %u\n",
357 handle);
358 return -EINVAL;
361 return 0;
365 * clear_va_list_locked - free virtual addresses list
367 * @hdev : habanalabs device structure
368 * @va_list : list of virtual addresses to free
370 * This function does the following:
371 * - Iterate over the list and free each virtual addresses block
373 * This function should be called only when va_list lock is taken
375 static void clear_va_list_locked(struct hl_device *hdev,
376 struct list_head *va_list)
378 struct hl_vm_va_block *va_block, *tmp;
380 list_for_each_entry_safe(va_block, tmp, va_list, node) {
381 list_del(&va_block->node);
382 kfree(va_block);
387 * print_va_list_locked - print virtual addresses list
389 * @hdev : habanalabs device structure
390 * @va_list : list of virtual addresses to print
392 * This function does the following:
393 * - Iterate over the list and print each virtual addresses block
395 * This function should be called only when va_list lock is taken
397 static void print_va_list_locked(struct hl_device *hdev,
398 struct list_head *va_list)
400 #if HL_MMU_DEBUG
401 struct hl_vm_va_block *va_block;
403 dev_dbg(hdev->dev, "print va list:\n");
405 list_for_each_entry(va_block, va_list, node)
406 dev_dbg(hdev->dev,
407 "va block, start: 0x%llx, end: 0x%llx, size: %llu\n",
408 va_block->start, va_block->end, va_block->size);
409 #endif
413 * merge_va_blocks_locked - merge a virtual block if possible
415 * @hdev : pointer to the habanalabs device structure
416 * @va_list : pointer to the virtual addresses block list
417 * @va_block : virtual block to merge with adjacent blocks
419 * This function does the following:
420 * - Merge the given blocks with the adjacent blocks if their virtual ranges
421 * create a contiguous virtual range
423 * This Function should be called only when va_list lock is taken
425 static void merge_va_blocks_locked(struct hl_device *hdev,
426 struct list_head *va_list, struct hl_vm_va_block *va_block)
428 struct hl_vm_va_block *prev, *next;
430 prev = list_prev_entry(va_block, node);
431 if (&prev->node != va_list && prev->end + 1 == va_block->start) {
432 prev->end = va_block->end;
433 prev->size = prev->end - prev->start;
434 list_del(&va_block->node);
435 kfree(va_block);
436 va_block = prev;
439 next = list_next_entry(va_block, node);
440 if (&next->node != va_list && va_block->end + 1 == next->start) {
441 next->start = va_block->start;
442 next->size = next->end - next->start;
443 list_del(&va_block->node);
444 kfree(va_block);
449 * add_va_block_locked - add a virtual block to the virtual addresses list
451 * @hdev : pointer to the habanalabs device structure
452 * @va_list : pointer to the virtual addresses block list
453 * @start : start virtual address
454 * @end : end virtual address
456 * This function does the following:
457 * - Add the given block to the virtual blocks list and merge with other
458 * blocks if a contiguous virtual block can be created
460 * This Function should be called only when va_list lock is taken
462 static int add_va_block_locked(struct hl_device *hdev,
463 struct list_head *va_list, u64 start, u64 end)
465 struct hl_vm_va_block *va_block, *res = NULL;
466 u64 size = end - start;
468 print_va_list_locked(hdev, va_list);
470 list_for_each_entry(va_block, va_list, node) {
471 /* TODO: remove upon matureness */
472 if (hl_mem_area_crosses_range(start, size, va_block->start,
473 va_block->end)) {
474 dev_err(hdev->dev,
475 "block crossing ranges at start 0x%llx, end 0x%llx\n",
476 va_block->start, va_block->end);
477 return -EINVAL;
480 if (va_block->end < start)
481 res = va_block;
484 va_block = kmalloc(sizeof(*va_block), GFP_KERNEL);
485 if (!va_block)
486 return -ENOMEM;
488 va_block->start = start;
489 va_block->end = end;
490 va_block->size = size;
492 if (!res)
493 list_add(&va_block->node, va_list);
494 else
495 list_add(&va_block->node, &res->node);
497 merge_va_blocks_locked(hdev, va_list, va_block);
499 print_va_list_locked(hdev, va_list);
501 return 0;
505 * add_va_block - wrapper for add_va_block_locked
507 * @hdev : pointer to the habanalabs device structure
508 * @va_list : pointer to the virtual addresses block list
509 * @start : start virtual address
510 * @end : end virtual address
512 * This function does the following:
513 * - Takes the list lock and calls add_va_block_locked
515 static inline int add_va_block(struct hl_device *hdev,
516 struct hl_va_range *va_range, u64 start, u64 end)
518 int rc;
520 mutex_lock(&va_range->lock);
521 rc = add_va_block_locked(hdev, &va_range->list, start, end);
522 mutex_unlock(&va_range->lock);
524 return rc;
528 * get_va_block() - get a virtual block for the given size and alignment.
529 * @hdev: pointer to the habanalabs device structure.
530 * @va_range: pointer to the virtual addresses range.
531 * @size: requested block size.
532 * @hint_addr: hint for requested address by the user.
533 * @va_block_align: required alignment of the virtual block start address.
535 * This function does the following:
536 * - Iterate on the virtual block list to find a suitable virtual block for the
537 * given size and alignment.
538 * - Reserve the requested block and update the list.
539 * - Return the start address of the virtual block.
541 static u64 get_va_block(struct hl_device *hdev, struct hl_va_range *va_range,
542 u64 size, u64 hint_addr, u32 va_block_align)
544 struct hl_vm_va_block *va_block, *new_va_block = NULL;
545 u64 valid_start, valid_size, prev_start, prev_end, align_mask,
546 res_valid_start = 0, res_valid_size = 0;
547 bool add_prev = false;
549 align_mask = ~((u64)va_block_align - 1);
551 /* check if hint_addr is aligned */
552 if (hint_addr & (va_block_align - 1))
553 hint_addr = 0;
555 mutex_lock(&va_range->lock);
557 print_va_list_locked(hdev, &va_range->list);
559 list_for_each_entry(va_block, &va_range->list, node) {
560 /* calc the first possible aligned addr */
561 valid_start = va_block->start;
563 if (valid_start & (va_block_align - 1)) {
564 valid_start &= align_mask;
565 valid_start += va_block_align;
566 if (valid_start > va_block->end)
567 continue;
570 valid_size = va_block->end - valid_start;
572 if (valid_size >= size &&
573 (!new_va_block || valid_size < res_valid_size)) {
574 new_va_block = va_block;
575 res_valid_start = valid_start;
576 res_valid_size = valid_size;
579 if (hint_addr && hint_addr >= valid_start &&
580 ((hint_addr + size) <= va_block->end)) {
581 new_va_block = va_block;
582 res_valid_start = hint_addr;
583 res_valid_size = valid_size;
584 break;
588 if (!new_va_block) {
589 dev_err(hdev->dev, "no available va block for size %llu\n",
590 size);
591 goto out;
594 if (res_valid_start > new_va_block->start) {
595 prev_start = new_va_block->start;
596 prev_end = res_valid_start - 1;
598 new_va_block->start = res_valid_start;
599 new_va_block->size = res_valid_size;
601 add_prev = true;
604 if (new_va_block->size > size) {
605 new_va_block->start += size;
606 new_va_block->size = new_va_block->end - new_va_block->start;
607 } else {
608 list_del(&new_va_block->node);
609 kfree(new_va_block);
612 if (add_prev)
613 add_va_block_locked(hdev, &va_range->list, prev_start,
614 prev_end);
616 print_va_list_locked(hdev, &va_range->list);
617 out:
618 mutex_unlock(&va_range->lock);
620 return res_valid_start;
624 * hl_reserve_va_block() - reserve a virtual block of a given size.
625 * @hdev: pointer to the habanalabs device structure.
626 * @ctx: current context
627 * @type: virtual addresses range type.
628 * @size: requested block size.
629 * @alignment: required alignment in bytes of the virtual block start address,
630 * 0 means no alignment.
632 * This function does the following:
633 * - Iterate on the virtual block list to find a suitable virtual block for the
634 * given size and alignment.
635 * - Reserve the requested block and update the list.
636 * - Return the start address of the virtual block.
638 u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
639 enum hl_va_range_type type, u32 size, u32 alignment)
641 return get_va_block(hdev, ctx->va_range[type], size, 0,
642 max(alignment, ctx->va_range[type]->page_size));
646 * hl_get_va_range_type() - get va_range type for the given address and size.
647 * @address: The start address of the area we want to validate.
648 * @size: The size in bytes of the area we want to validate.
649 * @type: returned va_range type
651 * Return: true if the area is inside a valid range, false otherwise.
653 static int hl_get_va_range_type(struct hl_ctx *ctx, u64 address, u64 size,
654 enum hl_va_range_type *type)
656 int i;
658 for (i = 0 ; i < HL_VA_RANGE_TYPE_MAX; i++) {
659 if (hl_mem_area_inside_range(address, size,
660 ctx->va_range[i]->start_addr,
661 ctx->va_range[i]->end_addr)) {
662 *type = i;
663 return 0;
667 return -EINVAL;
671 * hl_unreserve_va_block - wrapper for add_va_block for unreserving a va block
673 * @hdev: pointer to the habanalabs device structure
674 * @ctx: current context
675 * @start: start virtual address
676 * @end: end virtual address
678 * This function does the following:
679 * - Takes the list lock and calls add_va_block_locked
681 int hl_unreserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
682 u64 start_addr, u64 size)
684 enum hl_va_range_type type;
685 int rc;
687 rc = hl_get_va_range_type(ctx, start_addr, size, &type);
688 if (rc) {
689 dev_err(hdev->dev,
690 "cannot find va_range for va %#llx size %llu",
691 start_addr, size);
692 return rc;
695 rc = add_va_block(hdev, ctx->va_range[type], start_addr,
696 start_addr + size - 1);
697 if (rc)
698 dev_warn(hdev->dev,
699 "add va block failed for vaddr: 0x%llx\n", start_addr);
701 return rc;
705 * get_sg_info - get number of pages and the DMA address from SG list
707 * @sg : the SG list
708 * @dma_addr : pointer to DMA address to return
710 * Calculate the number of consecutive pages described by the SG list. Take the
711 * offset of the address in the first page, add to it the length and round it up
712 * to the number of needed pages.
714 static u32 get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr)
716 *dma_addr = sg_dma_address(sg);
718 return ((((*dma_addr) & (PAGE_SIZE - 1)) + sg_dma_len(sg)) +
719 (PAGE_SIZE - 1)) >> PAGE_SHIFT;
723 * init_phys_pg_pack_from_userptr - initialize physical page pack from host
724 * memory
725 * @ctx: current context
726 * @userptr: userptr to initialize from
727 * @pphys_pg_pack: result pointer
729 * This function does the following:
730 * - Pin the physical pages related to the given virtual block
731 * - Create a physical page pack from the physical pages related to the given
732 * virtual block
734 static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
735 struct hl_userptr *userptr,
736 struct hl_vm_phys_pg_pack **pphys_pg_pack)
738 struct hl_vm_phys_pg_pack *phys_pg_pack;
739 struct scatterlist *sg;
740 dma_addr_t dma_addr;
741 u64 page_mask, total_npages;
742 u32 npages, page_size = PAGE_SIZE,
743 huge_page_size = ctx->hdev->asic_prop.pmmu_huge.page_size;
744 bool first = true, is_huge_page_opt = true;
745 int rc, i, j;
746 u32 pgs_in_huge_page = huge_page_size >> __ffs(page_size);
748 phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
749 if (!phys_pg_pack)
750 return -ENOMEM;
752 phys_pg_pack->vm_type = userptr->vm_type;
753 phys_pg_pack->created_from_userptr = true;
754 phys_pg_pack->asid = ctx->asid;
755 atomic_set(&phys_pg_pack->mapping_cnt, 1);
757 /* Only if all dma_addrs are aligned to 2MB and their
758 * sizes is at least 2MB, we can use huge page mapping.
759 * We limit the 2MB optimization to this condition,
760 * since later on we acquire the related VA range as one
761 * consecutive block.
763 total_npages = 0;
764 for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
765 npages = get_sg_info(sg, &dma_addr);
767 total_npages += npages;
769 if ((npages % pgs_in_huge_page) ||
770 (dma_addr & (huge_page_size - 1)))
771 is_huge_page_opt = false;
774 if (is_huge_page_opt) {
775 page_size = huge_page_size;
776 do_div(total_npages, pgs_in_huge_page);
779 page_mask = ~(((u64) page_size) - 1);
781 phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64),
782 GFP_KERNEL);
783 if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) {
784 rc = -ENOMEM;
785 goto page_pack_arr_mem_err;
788 phys_pg_pack->npages = total_npages;
789 phys_pg_pack->page_size = page_size;
790 phys_pg_pack->total_size = total_npages * page_size;
792 j = 0;
793 for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
794 npages = get_sg_info(sg, &dma_addr);
796 /* align down to physical page size and save the offset */
797 if (first) {
798 first = false;
799 phys_pg_pack->offset = dma_addr & (page_size - 1);
800 dma_addr &= page_mask;
803 while (npages) {
804 phys_pg_pack->pages[j++] = dma_addr;
805 dma_addr += page_size;
807 if (is_huge_page_opt)
808 npages -= pgs_in_huge_page;
809 else
810 npages--;
814 *pphys_pg_pack = phys_pg_pack;
816 return 0;
818 page_pack_arr_mem_err:
819 kfree(phys_pg_pack);
821 return rc;
825 * map_phys_pg_pack - maps the physical page pack.
826 * @ctx: current context
827 * @vaddr: start address of the virtual area to map from
828 * @phys_pg_pack: the pack of physical pages to map to
830 * This function does the following:
831 * - Maps each chunk of virtual memory to matching physical chunk
832 * - Stores number of successful mappings in the given argument
833 * - Returns 0 on success, error code otherwise
835 static int map_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
836 struct hl_vm_phys_pg_pack *phys_pg_pack)
838 struct hl_device *hdev = ctx->hdev;
839 u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i;
840 u32 page_size = phys_pg_pack->page_size;
841 int rc = 0;
843 for (i = 0 ; i < phys_pg_pack->npages ; i++) {
844 paddr = phys_pg_pack->pages[i];
846 rc = hl_mmu_map_page(ctx, next_vaddr, paddr, page_size,
847 (i + 1) == phys_pg_pack->npages);
848 if (rc) {
849 dev_err(hdev->dev,
850 "map failed for handle %u, npages: %llu, mapped: %llu",
851 phys_pg_pack->handle, phys_pg_pack->npages,
852 mapped_pg_cnt);
853 goto err;
856 mapped_pg_cnt++;
857 next_vaddr += page_size;
860 return 0;
862 err:
863 next_vaddr = vaddr;
864 for (i = 0 ; i < mapped_pg_cnt ; i++) {
865 if (hl_mmu_unmap_page(ctx, next_vaddr, page_size,
866 (i + 1) == mapped_pg_cnt))
867 dev_warn_ratelimited(hdev->dev,
868 "failed to unmap handle %u, va: 0x%llx, pa: 0x%llx, page size: %u\n",
869 phys_pg_pack->handle, next_vaddr,
870 phys_pg_pack->pages[i], page_size);
872 next_vaddr += page_size;
875 return rc;
879 * unmap_phys_pg_pack - unmaps the physical page pack
880 * @ctx: current context
881 * @vaddr: start address of the virtual area to unmap
882 * @phys_pg_pack: the pack of physical pages to unmap
884 static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
885 struct hl_vm_phys_pg_pack *phys_pg_pack)
887 struct hl_device *hdev = ctx->hdev;
888 u64 next_vaddr, i;
889 u32 page_size;
891 page_size = phys_pg_pack->page_size;
892 next_vaddr = vaddr;
894 for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) {
895 if (hl_mmu_unmap_page(ctx, next_vaddr, page_size,
896 (i + 1) == phys_pg_pack->npages))
897 dev_warn_ratelimited(hdev->dev,
898 "unmap failed for vaddr: 0x%llx\n", next_vaddr);
901 * unmapping on Palladium can be really long, so avoid a CPU
902 * soft lockup bug by sleeping a little between unmapping pages
904 if (hdev->pldm)
905 usleep_range(500, 1000);
909 static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args,
910 u64 *paddr)
912 struct hl_device *hdev = ctx->hdev;
913 struct hl_vm *vm = &hdev->vm;
914 struct hl_vm_phys_pg_pack *phys_pg_pack;
915 u32 handle;
917 handle = lower_32_bits(args->map_device.handle);
918 spin_lock(&vm->idr_lock);
919 phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
920 if (!phys_pg_pack) {
921 spin_unlock(&vm->idr_lock);
922 dev_err(hdev->dev, "no match for handle %u\n", handle);
923 return -EINVAL;
926 *paddr = phys_pg_pack->pages[0];
928 spin_unlock(&vm->idr_lock);
930 return 0;
934 * map_device_va - map the given memory
936 * @ctx : current context
937 * @args : host parameters with handle/host virtual address
938 * @device_addr : pointer to result device virtual address
940 * This function does the following:
941 * - If given a physical device memory handle, map to a device virtual block
942 * and return the start address of this block
943 * - If given a host virtual address and size, find the related physical pages,
944 * map a device virtual block to this pages and return the start address of
945 * this block
947 static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
948 u64 *device_addr)
950 struct hl_device *hdev = ctx->hdev;
951 struct hl_vm *vm = &hdev->vm;
952 struct hl_vm_phys_pg_pack *phys_pg_pack;
953 struct hl_userptr *userptr = NULL;
954 struct hl_vm_hash_node *hnode;
955 struct hl_va_range *va_range;
956 enum vm_type_t *vm_type;
957 u64 ret_vaddr, hint_addr;
958 u32 handle = 0, va_block_align;
959 int rc;
960 bool is_userptr = args->flags & HL_MEM_USERPTR;
962 /* Assume failure */
963 *device_addr = 0;
965 if (is_userptr) {
966 u64 addr = args->map_host.host_virt_addr,
967 size = args->map_host.mem_size;
968 u32 page_size = hdev->asic_prop.pmmu.page_size,
969 huge_page_size = hdev->asic_prop.pmmu_huge.page_size;
971 rc = dma_map_host_va(hdev, addr, size, &userptr);
972 if (rc) {
973 dev_err(hdev->dev, "failed to get userptr from va\n");
974 return rc;
977 rc = init_phys_pg_pack_from_userptr(ctx, userptr,
978 &phys_pg_pack);
979 if (rc) {
980 dev_err(hdev->dev,
981 "unable to init page pack for vaddr 0x%llx\n",
982 addr);
983 goto init_page_pack_err;
986 vm_type = (enum vm_type_t *) userptr;
987 hint_addr = args->map_host.hint_addr;
988 handle = phys_pg_pack->handle;
990 /* get required alignment */
991 if (phys_pg_pack->page_size == page_size) {
992 va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST];
995 * huge page alignment may be needed in case of regular
996 * page mapping, depending on the host VA alignment
998 if (addr & (huge_page_size - 1))
999 va_block_align = page_size;
1000 else
1001 va_block_align = huge_page_size;
1002 } else {
1004 * huge page alignment is needed in case of huge page
1005 * mapping
1007 va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE];
1008 va_block_align = huge_page_size;
1010 } else {
1011 handle = lower_32_bits(args->map_device.handle);
1013 spin_lock(&vm->idr_lock);
1014 phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
1015 if (!phys_pg_pack) {
1016 spin_unlock(&vm->idr_lock);
1017 dev_err(hdev->dev,
1018 "no match for handle %u\n", handle);
1019 return -EINVAL;
1022 /* increment now to avoid freeing device memory while mapping */
1023 atomic_inc(&phys_pg_pack->mapping_cnt);
1025 spin_unlock(&vm->idr_lock);
1027 vm_type = (enum vm_type_t *) phys_pg_pack;
1029 hint_addr = args->map_device.hint_addr;
1031 /* DRAM VA alignment is the same as the DRAM page size */
1032 va_range = ctx->va_range[HL_VA_RANGE_TYPE_DRAM];
1033 va_block_align = hdev->asic_prop.dmmu.page_size;
1037 * relevant for mapping device physical memory only, as host memory is
1038 * implicitly shared
1040 if (!is_userptr && !(phys_pg_pack->flags & HL_MEM_SHARED) &&
1041 phys_pg_pack->asid != ctx->asid) {
1042 dev_err(hdev->dev,
1043 "Failed to map memory, handle %u is not shared\n",
1044 handle);
1045 rc = -EPERM;
1046 goto shared_err;
1049 hnode = kzalloc(sizeof(*hnode), GFP_KERNEL);
1050 if (!hnode) {
1051 rc = -ENOMEM;
1052 goto hnode_err;
1055 ret_vaddr = get_va_block(hdev, va_range, phys_pg_pack->total_size,
1056 hint_addr, va_block_align);
1057 if (!ret_vaddr) {
1058 dev_err(hdev->dev, "no available va block for handle %u\n",
1059 handle);
1060 rc = -ENOMEM;
1061 goto va_block_err;
1064 mutex_lock(&ctx->mmu_lock);
1066 rc = map_phys_pg_pack(ctx, ret_vaddr, phys_pg_pack);
1067 if (rc) {
1068 mutex_unlock(&ctx->mmu_lock);
1069 dev_err(hdev->dev, "mapping page pack failed for handle %u\n",
1070 handle);
1071 goto map_err;
1074 rc = hdev->asic_funcs->mmu_invalidate_cache(hdev, false, *vm_type);
1076 mutex_unlock(&ctx->mmu_lock);
1078 if (rc) {
1079 dev_err(hdev->dev,
1080 "mapping handle %u failed due to MMU cache invalidation\n",
1081 handle);
1082 goto map_err;
1085 ret_vaddr += phys_pg_pack->offset;
1087 hnode->ptr = vm_type;
1088 hnode->vaddr = ret_vaddr;
1090 mutex_lock(&ctx->mem_hash_lock);
1091 hash_add(ctx->mem_hash, &hnode->node, ret_vaddr);
1092 mutex_unlock(&ctx->mem_hash_lock);
1094 *device_addr = ret_vaddr;
1096 if (is_userptr)
1097 free_phys_pg_pack(hdev, phys_pg_pack);
1099 return 0;
1101 map_err:
1102 if (add_va_block(hdev, va_range, ret_vaddr,
1103 ret_vaddr + phys_pg_pack->total_size - 1))
1104 dev_warn(hdev->dev,
1105 "release va block failed for handle 0x%x, vaddr: 0x%llx\n",
1106 handle, ret_vaddr);
1108 va_block_err:
1109 kfree(hnode);
1110 hnode_err:
1111 shared_err:
1112 atomic_dec(&phys_pg_pack->mapping_cnt);
1113 if (is_userptr)
1114 free_phys_pg_pack(hdev, phys_pg_pack);
1115 init_page_pack_err:
1116 if (is_userptr)
1117 dma_unmap_host_va(hdev, userptr);
1119 return rc;
1123 * unmap_device_va - unmap the given device virtual address
1125 * @ctx : current context
1126 * @vaddr : device virtual address to unmap
1127 * @ctx_free : true if in context free flow, false otherwise.
1129 * This function does the following:
1130 * - Unmap the physical pages related to the given virtual address
1131 * - return the device virtual block to the virtual block list
1133 static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr, bool ctx_free)
1135 struct hl_device *hdev = ctx->hdev;
1136 struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
1137 struct hl_vm_hash_node *hnode = NULL;
1138 struct hl_userptr *userptr = NULL;
1139 struct hl_va_range *va_range;
1140 enum vm_type_t *vm_type;
1141 bool is_userptr;
1142 int rc = 0;
1144 /* protect from double entrance */
1145 mutex_lock(&ctx->mem_hash_lock);
1146 hash_for_each_possible(ctx->mem_hash, hnode, node, (unsigned long)vaddr)
1147 if (vaddr == hnode->vaddr)
1148 break;
1150 if (!hnode) {
1151 mutex_unlock(&ctx->mem_hash_lock);
1152 dev_err(hdev->dev,
1153 "unmap failed, no mem hnode for vaddr 0x%llx\n",
1154 vaddr);
1155 return -EINVAL;
1158 hash_del(&hnode->node);
1159 mutex_unlock(&ctx->mem_hash_lock);
1161 vm_type = hnode->ptr;
1163 if (*vm_type == VM_TYPE_USERPTR) {
1164 is_userptr = true;
1165 userptr = hnode->ptr;
1166 rc = init_phys_pg_pack_from_userptr(ctx, userptr,
1167 &phys_pg_pack);
1168 if (rc) {
1169 dev_err(hdev->dev,
1170 "unable to init page pack for vaddr 0x%llx\n",
1171 vaddr);
1172 goto vm_type_err;
1175 if (phys_pg_pack->page_size ==
1176 hdev->asic_prop.pmmu.page_size)
1177 va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST];
1178 else
1179 va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE];
1180 } else if (*vm_type == VM_TYPE_PHYS_PACK) {
1181 is_userptr = false;
1182 va_range = ctx->va_range[HL_VA_RANGE_TYPE_DRAM];
1183 phys_pg_pack = hnode->ptr;
1184 } else {
1185 dev_warn(hdev->dev,
1186 "unmap failed, unknown vm desc for vaddr 0x%llx\n",
1187 vaddr);
1188 rc = -EFAULT;
1189 goto vm_type_err;
1192 if (atomic_read(&phys_pg_pack->mapping_cnt) == 0) {
1193 dev_err(hdev->dev, "vaddr 0x%llx is not mapped\n", vaddr);
1194 rc = -EINVAL;
1195 goto mapping_cnt_err;
1198 vaddr &= ~(((u64) phys_pg_pack->page_size) - 1);
1200 mutex_lock(&ctx->mmu_lock);
1202 unmap_phys_pg_pack(ctx, vaddr, phys_pg_pack);
1205 * During context free this function is called in a loop to clean all
1206 * the context mappings. Hence the cache invalidation can be called once
1207 * at the loop end rather than for each iteration
1209 if (!ctx_free)
1210 rc = hdev->asic_funcs->mmu_invalidate_cache(hdev, true,
1211 *vm_type);
1213 mutex_unlock(&ctx->mmu_lock);
1216 * If the context is closing we don't need to check for the MMU cache
1217 * invalidation return code and update the VA free list as in this flow
1218 * we invalidate the MMU cache outside of this unmap function and the VA
1219 * free list will be freed anyway.
1221 if (!ctx_free) {
1222 int tmp_rc;
1224 if (rc)
1225 dev_err(hdev->dev,
1226 "unmapping vaddr 0x%llx failed due to MMU cache invalidation\n",
1227 vaddr);
1229 tmp_rc = add_va_block(hdev, va_range, vaddr,
1230 vaddr + phys_pg_pack->total_size - 1);
1231 if (tmp_rc) {
1232 dev_warn(hdev->dev,
1233 "add va block failed for vaddr: 0x%llx\n",
1234 vaddr);
1235 if (!rc)
1236 rc = tmp_rc;
1240 atomic_dec(&phys_pg_pack->mapping_cnt);
1241 kfree(hnode);
1243 if (is_userptr) {
1244 free_phys_pg_pack(hdev, phys_pg_pack);
1245 dma_unmap_host_va(hdev, userptr);
1248 return rc;
1250 mapping_cnt_err:
1251 if (is_userptr)
1252 free_phys_pg_pack(hdev, phys_pg_pack);
1253 vm_type_err:
1254 mutex_lock(&ctx->mem_hash_lock);
1255 hash_add(ctx->mem_hash, &hnode->node, vaddr);
1256 mutex_unlock(&ctx->mem_hash_lock);
1258 return rc;
1261 static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args)
1263 struct hl_device *hdev = hpriv->hdev;
1264 struct hl_ctx *ctx = hpriv->ctx;
1265 u64 device_addr = 0;
1266 u32 handle = 0;
1267 int rc;
1269 switch (args->in.op) {
1270 case HL_MEM_OP_ALLOC:
1271 if (args->in.alloc.mem_size == 0) {
1272 dev_err(hdev->dev,
1273 "alloc size must be larger than 0\n");
1274 rc = -EINVAL;
1275 goto out;
1278 /* Force contiguous as there are no real MMU
1279 * translations to overcome physical memory gaps
1281 args->in.flags |= HL_MEM_CONTIGUOUS;
1282 rc = alloc_device_memory(ctx, &args->in, &handle);
1284 memset(args, 0, sizeof(*args));
1285 args->out.handle = (__u64) handle;
1286 break;
1288 case HL_MEM_OP_FREE:
1289 rc = free_device_memory(ctx, args->in.free.handle);
1290 break;
1292 case HL_MEM_OP_MAP:
1293 if (args->in.flags & HL_MEM_USERPTR) {
1294 device_addr = args->in.map_host.host_virt_addr;
1295 rc = 0;
1296 } else {
1297 rc = get_paddr_from_handle(ctx, &args->in,
1298 &device_addr);
1301 memset(args, 0, sizeof(*args));
1302 args->out.device_virt_addr = device_addr;
1303 break;
1305 case HL_MEM_OP_UNMAP:
1306 rc = 0;
1307 break;
1309 default:
1310 dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
1311 rc = -ENOTTY;
1312 break;
1315 out:
1316 return rc;
1319 int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
1321 enum hl_device_status status;
1322 union hl_mem_args *args = data;
1323 struct hl_device *hdev = hpriv->hdev;
1324 struct hl_ctx *ctx = hpriv->ctx;
1325 u64 device_addr = 0;
1326 u32 handle = 0;
1327 int rc;
1329 if (!hl_device_operational(hdev, &status)) {
1330 dev_warn_ratelimited(hdev->dev,
1331 "Device is %s. Can't execute MEMORY IOCTL\n",
1332 hdev->status[status]);
1333 return -EBUSY;
1336 if (!hdev->mmu_enable)
1337 return mem_ioctl_no_mmu(hpriv, args);
1339 switch (args->in.op) {
1340 case HL_MEM_OP_ALLOC:
1341 if (args->in.alloc.mem_size == 0) {
1342 dev_err(hdev->dev,
1343 "alloc size must be larger than 0\n");
1344 rc = -EINVAL;
1345 goto out;
1348 /* If DRAM does not support virtual memory the driver won't
1349 * handle the allocation/freeing of that memory. However, for
1350 * system administration/monitoring purposes, the driver will
1351 * keep track of the amount of DRAM memory that is allocated
1352 * and freed by the user. Because this code totally relies on
1353 * the user's input, the driver can't ensure the validity
1354 * of this accounting.
1356 if (!hdev->asic_prop.dram_supports_virtual_memory) {
1357 atomic64_add(args->in.alloc.mem_size,
1358 &ctx->dram_phys_mem);
1359 atomic64_add(args->in.alloc.mem_size,
1360 &hdev->dram_used_mem);
1362 dev_dbg(hdev->dev, "DRAM alloc is not supported\n");
1363 rc = 0;
1365 memset(args, 0, sizeof(*args));
1366 args->out.handle = 0;
1367 goto out;
1370 rc = alloc_device_memory(ctx, &args->in, &handle);
1372 memset(args, 0, sizeof(*args));
1373 args->out.handle = (__u64) handle;
1374 break;
1376 case HL_MEM_OP_FREE:
1377 /* If DRAM does not support virtual memory the driver won't
1378 * handle the allocation/freeing of that memory. However, for
1379 * system administration/monitoring purposes, the driver will
1380 * keep track of the amount of DRAM memory that is allocated
1381 * and freed by the user. Because this code totally relies on
1382 * the user's input, the driver can't ensure the validity
1383 * of this accounting.
1385 if (!hdev->asic_prop.dram_supports_virtual_memory) {
1386 atomic64_sub(args->in.alloc.mem_size,
1387 &ctx->dram_phys_mem);
1388 atomic64_sub(args->in.alloc.mem_size,
1389 &hdev->dram_used_mem);
1391 dev_dbg(hdev->dev, "DRAM alloc is not supported\n");
1392 rc = 0;
1394 goto out;
1397 rc = free_device_memory(ctx, args->in.free.handle);
1398 break;
1400 case HL_MEM_OP_MAP:
1401 rc = map_device_va(ctx, &args->in, &device_addr);
1403 memset(args, 0, sizeof(*args));
1404 args->out.device_virt_addr = device_addr;
1405 break;
1407 case HL_MEM_OP_UNMAP:
1408 rc = unmap_device_va(ctx, args->in.unmap.device_virt_addr,
1409 false);
1410 break;
1412 default:
1413 dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
1414 rc = -ENOTTY;
1415 break;
1418 out:
1419 return rc;
1422 static int get_user_memory(struct hl_device *hdev, u64 addr, u64 size,
1423 u32 npages, u64 start, u32 offset,
1424 struct hl_userptr *userptr)
1426 int rc;
1428 if (!access_ok((void __user *) (uintptr_t) addr, size)) {
1429 dev_err(hdev->dev, "user pointer is invalid - 0x%llx\n", addr);
1430 return -EFAULT;
1433 userptr->vec = frame_vector_create(npages);
1434 if (!userptr->vec) {
1435 dev_err(hdev->dev, "Failed to create frame vector\n");
1436 return -ENOMEM;
1439 rc = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
1440 userptr->vec);
1442 if (rc != npages) {
1443 dev_err(hdev->dev,
1444 "Failed to map host memory, user ptr probably wrong\n");
1445 if (rc < 0)
1446 goto destroy_framevec;
1447 rc = -EFAULT;
1448 goto put_framevec;
1451 if (frame_vector_to_pages(userptr->vec) < 0) {
1452 dev_err(hdev->dev,
1453 "Failed to translate frame vector to pages\n");
1454 rc = -EFAULT;
1455 goto put_framevec;
1458 rc = sg_alloc_table_from_pages(userptr->sgt,
1459 frame_vector_pages(userptr->vec),
1460 npages, offset, size, GFP_ATOMIC);
1461 if (rc < 0) {
1462 dev_err(hdev->dev, "failed to create SG table from pages\n");
1463 goto put_framevec;
1466 return 0;
1468 put_framevec:
1469 put_vaddr_frames(userptr->vec);
1470 destroy_framevec:
1471 frame_vector_destroy(userptr->vec);
1472 return rc;
1476 * hl_pin_host_memory - pins a chunk of host memory.
1477 * @hdev: pointer to the habanalabs device structure
1478 * @addr: the host virtual address of the memory area
1479 * @size: the size of the memory area
1480 * @userptr: pointer to hl_userptr structure
1482 * This function does the following:
1483 * - Pins the physical pages
1484 * - Create an SG list from those pages
1486 int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
1487 struct hl_userptr *userptr)
1489 u64 start, end;
1490 u32 npages, offset;
1491 int rc;
1493 if (!size) {
1494 dev_err(hdev->dev, "size to pin is invalid - %llu\n", size);
1495 return -EINVAL;
1499 * If the combination of the address and size requested for this memory
1500 * region causes an integer overflow, return error.
1502 if (((addr + size) < addr) ||
1503 PAGE_ALIGN(addr + size) < (addr + size)) {
1504 dev_err(hdev->dev,
1505 "user pointer 0x%llx + %llu causes integer overflow\n",
1506 addr, size);
1507 return -EINVAL;
1511 * This function can be called also from data path, hence use atomic
1512 * always as it is not a big allocation.
1514 userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_ATOMIC);
1515 if (!userptr->sgt)
1516 return -ENOMEM;
1518 start = addr & PAGE_MASK;
1519 offset = addr & ~PAGE_MASK;
1520 end = PAGE_ALIGN(addr + size);
1521 npages = (end - start) >> PAGE_SHIFT;
1523 userptr->size = size;
1524 userptr->addr = addr;
1525 userptr->dma_mapped = false;
1526 INIT_LIST_HEAD(&userptr->job_node);
1528 rc = get_user_memory(hdev, addr, size, npages, start, offset,
1529 userptr);
1530 if (rc) {
1531 dev_err(hdev->dev,
1532 "failed to get user memory for address 0x%llx\n",
1533 addr);
1534 goto free_sgt;
1537 hl_debugfs_add_userptr(hdev, userptr);
1539 return 0;
1541 free_sgt:
1542 kfree(userptr->sgt);
1543 return rc;
1547 * hl_unpin_host_memory - unpins a chunk of host memory.
1548 * @hdev: pointer to the habanalabs device structure
1549 * @userptr: pointer to hl_userptr structure
1551 * This function does the following:
1552 * - Unpins the physical pages related to the host memory
1553 * - Free the SG list
1555 void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
1557 struct page **pages;
1559 hl_debugfs_remove_userptr(hdev, userptr);
1561 if (userptr->dma_mapped)
1562 hdev->asic_funcs->hl_dma_unmap_sg(hdev, userptr->sgt->sgl,
1563 userptr->sgt->nents,
1564 userptr->dir);
1566 pages = frame_vector_pages(userptr->vec);
1567 if (!IS_ERR(pages)) {
1568 int i;
1570 for (i = 0; i < frame_vector_count(userptr->vec); i++)
1571 set_page_dirty_lock(pages[i]);
1573 put_vaddr_frames(userptr->vec);
1574 frame_vector_destroy(userptr->vec);
1576 list_del(&userptr->job_node);
1578 sg_free_table(userptr->sgt);
1579 kfree(userptr->sgt);
1583 * hl_userptr_delete_list - clear userptr list
1585 * @hdev : pointer to the habanalabs device structure
1586 * @userptr_list : pointer to the list to clear
1588 * This function does the following:
1589 * - Iterates over the list and unpins the host memory and frees the userptr
1590 * structure.
1592 void hl_userptr_delete_list(struct hl_device *hdev,
1593 struct list_head *userptr_list)
1595 struct hl_userptr *userptr, *tmp;
1597 list_for_each_entry_safe(userptr, tmp, userptr_list, job_node) {
1598 hl_unpin_host_memory(hdev, userptr);
1599 kfree(userptr);
1602 INIT_LIST_HEAD(userptr_list);
1606 * hl_userptr_is_pinned - returns whether the given userptr is pinned
1608 * @hdev : pointer to the habanalabs device structure
1609 * @userptr_list : pointer to the list to clear
1610 * @userptr : pointer to userptr to check
1612 * This function does the following:
1613 * - Iterates over the list and checks if the given userptr is in it, means is
1614 * pinned. If so, returns true, otherwise returns false.
1616 bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr,
1617 u32 size, struct list_head *userptr_list,
1618 struct hl_userptr **userptr)
1620 list_for_each_entry((*userptr), userptr_list, job_node) {
1621 if ((addr == (*userptr)->addr) && (size == (*userptr)->size))
1622 return true;
1625 return false;
1629 * va_range_init - initialize virtual addresses range
1630 * @hdev: pointer to the habanalabs device structure
1631 * @va_range: pointer to the range to initialize
1632 * @start: range start address
1633 * @end: range end address
1635 * This function does the following:
1636 * - Initializes the virtual addresses list of the given range with the given
1637 * addresses.
1639 static int va_range_init(struct hl_device *hdev, struct hl_va_range *va_range,
1640 u64 start, u64 end, u32 page_size)
1642 int rc;
1644 INIT_LIST_HEAD(&va_range->list);
1646 /* PAGE_SIZE alignment */
1648 if (start & (PAGE_SIZE - 1)) {
1649 start &= PAGE_MASK;
1650 start += PAGE_SIZE;
1653 if (end & (PAGE_SIZE - 1))
1654 end &= PAGE_MASK;
1656 if (start >= end) {
1657 dev_err(hdev->dev, "too small vm range for va list\n");
1658 return -EFAULT;
1661 rc = add_va_block(hdev, va_range, start, end);
1663 if (rc) {
1664 dev_err(hdev->dev, "Failed to init host va list\n");
1665 return rc;
1668 va_range->start_addr = start;
1669 va_range->end_addr = end;
1670 va_range->page_size = page_size;
1672 return 0;
1676 * va_range_fini() - clear a virtual addresses range
1677 * @hdev: pointer to the habanalabs structure
1678 * va_range: pointer to virtual addresses range
1680 * This function does the following:
1681 * - Frees the virtual addresses block list and its lock
1683 static void va_range_fini(struct hl_device *hdev, struct hl_va_range *va_range)
1685 mutex_lock(&va_range->lock);
1686 clear_va_list_locked(hdev, &va_range->list);
1687 mutex_unlock(&va_range->lock);
1689 mutex_destroy(&va_range->lock);
1690 kfree(va_range);
1694 * vm_ctx_init_with_ranges() - initialize virtual memory for context
1695 * @ctx: pointer to the habanalabs context structure
1696 * @host_range_start: host virtual addresses range start.
1697 * @host_range_end: host virtual addresses range end.
1698 * @host_huge_range_start: host virtual addresses range start for memory
1699 * allocated with huge pages.
1700 * @host_huge_range_end: host virtual addresses range end for memory allocated
1701 * with huge pages.
1702 * @dram_range_start: dram virtual addresses range start.
1703 * @dram_range_end: dram virtual addresses range end.
1705 * This function initializes the following:
1706 * - MMU for context
1707 * - Virtual address to area descriptor hashtable
1708 * - Virtual block list of available virtual memory
1710 static int vm_ctx_init_with_ranges(struct hl_ctx *ctx,
1711 u64 host_range_start,
1712 u64 host_range_end,
1713 u32 host_page_size,
1714 u64 host_huge_range_start,
1715 u64 host_huge_range_end,
1716 u32 host_huge_page_size,
1717 u64 dram_range_start,
1718 u64 dram_range_end,
1719 u32 dram_page_size)
1721 struct hl_device *hdev = ctx->hdev;
1722 int i, rc;
1724 for (i = 0 ; i < HL_VA_RANGE_TYPE_MAX ; i++) {
1725 ctx->va_range[i] =
1726 kzalloc(sizeof(struct hl_va_range), GFP_KERNEL);
1727 if (!ctx->va_range[i]) {
1728 rc = -ENOMEM;
1729 goto free_va_range;
1733 rc = hl_mmu_ctx_init(ctx);
1734 if (rc) {
1735 dev_err(hdev->dev, "failed to init context %d\n", ctx->asid);
1736 goto free_va_range;
1739 mutex_init(&ctx->mem_hash_lock);
1740 hash_init(ctx->mem_hash);
1742 mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
1744 rc = va_range_init(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST],
1745 host_range_start, host_range_end, host_page_size);
1746 if (rc) {
1747 dev_err(hdev->dev, "failed to init host vm range\n");
1748 goto mmu_ctx_fini;
1751 if (hdev->pmmu_huge_range) {
1752 mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
1754 rc = va_range_init(hdev,
1755 ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE],
1756 host_huge_range_start, host_huge_range_end,
1757 host_huge_page_size);
1758 if (rc) {
1759 dev_err(hdev->dev,
1760 "failed to init host huge vm range\n");
1761 goto clear_host_va_range;
1763 } else {
1764 kfree(ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]);
1765 ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE] =
1766 ctx->va_range[HL_VA_RANGE_TYPE_HOST];
1769 mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_DRAM]->lock);
1771 rc = va_range_init(hdev, ctx->va_range[HL_VA_RANGE_TYPE_DRAM],
1772 dram_range_start, dram_range_end, dram_page_size);
1773 if (rc) {
1774 dev_err(hdev->dev, "failed to init dram vm range\n");
1775 goto clear_host_huge_va_range;
1778 hl_debugfs_add_ctx_mem_hash(hdev, ctx);
1780 return 0;
1782 clear_host_huge_va_range:
1783 mutex_destroy(&ctx->va_range[HL_VA_RANGE_TYPE_DRAM]->lock);
1785 if (hdev->pmmu_huge_range) {
1786 mutex_lock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
1787 clear_va_list_locked(hdev,
1788 &ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->list);
1789 mutex_unlock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
1791 clear_host_va_range:
1792 if (hdev->pmmu_huge_range)
1793 mutex_destroy(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
1794 mutex_lock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
1795 clear_va_list_locked(hdev, &ctx->va_range[HL_VA_RANGE_TYPE_HOST]->list);
1796 mutex_unlock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
1797 mmu_ctx_fini:
1798 mutex_destroy(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
1799 mutex_destroy(&ctx->mem_hash_lock);
1800 hl_mmu_ctx_fini(ctx);
1801 free_va_range:
1802 for (i = 0 ; i < HL_VA_RANGE_TYPE_MAX ; i++)
1803 kfree(ctx->va_range[i]);
1805 return rc;
1808 int hl_vm_ctx_init(struct hl_ctx *ctx)
1810 struct asic_fixed_properties *prop = &ctx->hdev->asic_prop;
1811 u64 host_range_start, host_range_end, host_huge_range_start,
1812 host_huge_range_end, dram_range_start, dram_range_end;
1813 u32 host_page_size, host_huge_page_size, dram_page_size;
1815 atomic64_set(&ctx->dram_phys_mem, 0);
1818 * - If MMU is enabled, init the ranges as usual.
1819 * - If MMU is disabled, in case of host mapping, the returned address
1820 * is the given one.
1821 * In case of DRAM mapping, the returned address is the physical
1822 * address of the memory related to the given handle.
1824 if (!ctx->hdev->mmu_enable)
1825 return 0;
1827 dram_range_start = prop->dmmu.start_addr;
1828 dram_range_end = prop->dmmu.end_addr;
1829 dram_page_size = prop->dmmu.page_size;
1830 host_range_start = prop->pmmu.start_addr;
1831 host_range_end = prop->pmmu.end_addr;
1832 host_page_size = prop->pmmu.page_size;
1833 host_huge_range_start = prop->pmmu_huge.start_addr;
1834 host_huge_range_end = prop->pmmu_huge.end_addr;
1835 host_huge_page_size = prop->pmmu_huge.page_size;
1837 return vm_ctx_init_with_ranges(ctx, host_range_start, host_range_end,
1838 host_page_size, host_huge_range_start,
1839 host_huge_range_end, host_huge_page_size,
1840 dram_range_start, dram_range_end, dram_page_size);
1844 * hl_vm_ctx_fini - virtual memory teardown of context
1846 * @ctx : pointer to the habanalabs context structure
1848 * This function perform teardown the following:
1849 * - Virtual block list of available virtual memory
1850 * - Virtual address to area descriptor hashtable
1851 * - MMU for context
1853 * In addition this function does the following:
1854 * - Unmaps the existing hashtable nodes if the hashtable is not empty. The
1855 * hashtable should be empty as no valid mappings should exist at this
1856 * point.
1857 * - Frees any existing physical page list from the idr which relates to the
1858 * current context asid.
1859 * - This function checks the virtual block list for correctness. At this point
1860 * the list should contain one element which describes the whole virtual
1861 * memory range of the context. Otherwise, a warning is printed.
1863 void hl_vm_ctx_fini(struct hl_ctx *ctx)
1865 struct hl_device *hdev = ctx->hdev;
1866 struct hl_vm *vm = &hdev->vm;
1867 struct hl_vm_phys_pg_pack *phys_pg_list;
1868 struct hl_vm_hash_node *hnode;
1869 struct hlist_node *tmp_node;
1870 int i;
1872 if (!ctx->hdev->mmu_enable)
1873 return;
1875 hl_debugfs_remove_ctx_mem_hash(hdev, ctx);
1878 * Clearly something went wrong on hard reset so no point in printing
1879 * another side effect error
1881 if (!hdev->hard_reset_pending && !hash_empty(ctx->mem_hash))
1882 dev_notice(hdev->dev,
1883 "user released device without removing its memory mappings\n");
1885 hash_for_each_safe(ctx->mem_hash, i, tmp_node, hnode, node) {
1886 dev_dbg(hdev->dev,
1887 "hl_mem_hash_node of vaddr 0x%llx of asid %d is still alive\n",
1888 hnode->vaddr, ctx->asid);
1889 unmap_device_va(ctx, hnode->vaddr, true);
1892 /* invalidate the cache once after the unmapping loop */
1893 hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);
1894 hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_PHYS_PACK);
1896 spin_lock(&vm->idr_lock);
1897 idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_list, i)
1898 if (phys_pg_list->asid == ctx->asid) {
1899 dev_dbg(hdev->dev,
1900 "page list 0x%px of asid %d is still alive\n",
1901 phys_pg_list, ctx->asid);
1902 atomic64_sub(phys_pg_list->total_size,
1903 &hdev->dram_used_mem);
1904 free_phys_pg_pack(hdev, phys_pg_list);
1905 idr_remove(&vm->phys_pg_pack_handles, i);
1907 spin_unlock(&vm->idr_lock);
1909 va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_DRAM]);
1910 va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST]);
1912 if (hdev->pmmu_huge_range)
1913 va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]);
1915 mutex_destroy(&ctx->mem_hash_lock);
1916 hl_mmu_ctx_fini(ctx);
1918 /* In this case we need to clear the global accounting of DRAM usage
1919 * because the user notifies us on allocations. If the user is no more,
1920 * all DRAM is available
1922 if (!ctx->hdev->asic_prop.dram_supports_virtual_memory)
1923 atomic64_set(&ctx->hdev->dram_used_mem, 0);
1927 * hl_vm_init - initialize virtual memory module
1929 * @hdev : pointer to the habanalabs device structure
1931 * This function initializes the following:
1932 * - MMU module
1933 * - DRAM physical pages pool of 2MB
1934 * - Idr for device memory allocation handles
1936 int hl_vm_init(struct hl_device *hdev)
1938 struct asic_fixed_properties *prop = &hdev->asic_prop;
1939 struct hl_vm *vm = &hdev->vm;
1940 int rc;
1942 vm->dram_pg_pool = gen_pool_create(__ffs(prop->dram_page_size), -1);
1943 if (!vm->dram_pg_pool) {
1944 dev_err(hdev->dev, "Failed to create dram page pool\n");
1945 return -ENOMEM;
1948 kref_init(&vm->dram_pg_pool_refcount);
1950 rc = gen_pool_add(vm->dram_pg_pool, prop->dram_user_base_address,
1951 prop->dram_end_address - prop->dram_user_base_address,
1952 -1);
1954 if (rc) {
1955 dev_err(hdev->dev,
1956 "Failed to add memory to dram page pool %d\n", rc);
1957 goto pool_add_err;
1960 spin_lock_init(&vm->idr_lock);
1961 idr_init(&vm->phys_pg_pack_handles);
1963 atomic64_set(&hdev->dram_used_mem, 0);
1965 vm->init_done = true;
1967 return 0;
1969 pool_add_err:
1970 gen_pool_destroy(vm->dram_pg_pool);
1972 return rc;
1976 * hl_vm_fini - virtual memory module teardown
1978 * @hdev : pointer to the habanalabs device structure
1980 * This function perform teardown to the following:
1981 * - Idr for device memory allocation handles
1982 * - DRAM physical pages pool of 2MB
1983 * - MMU module
1985 void hl_vm_fini(struct hl_device *hdev)
1987 struct hl_vm *vm = &hdev->vm;
1989 if (!vm->init_done)
1990 return;
1993 * At this point all the contexts should be freed and hence no DRAM
1994 * memory should be in use. Hence the DRAM pool should be freed here.
1996 if (kref_put(&vm->dram_pg_pool_refcount, dram_pg_pool_do_release) != 1)
1997 dev_warn(hdev->dev, "dram_pg_pool was not destroyed on %s\n",
1998 __func__);
2000 vm->init_done = false;