2 * Copyright 2014-2018 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #define pr_fmt(fmt) "kfd2kgd: " fmt
25 #include <linux/list.h>
26 #include <linux/pagemap.h>
27 #include <linux/sched/mm.h>
28 #include <linux/dma-buf.h>
30 #include "amdgpu_object.h"
31 #include "amdgpu_vm.h"
32 #include "amdgpu_amdkfd.h"
34 /* Special VM and GART address alignment needed for VI pre-Fiji due to
37 #define VI_BO_SIZE_ALIGN (0x8000)
39 /* BO flag to indicate a KFD userptr BO */
40 #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
42 /* Userptr restore delay, just long enough to allow consecutive VM
43 * changes to accumulate
45 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
47 /* Impose limit on how much memory KFD can use */
49 uint64_t max_system_mem_limit
;
50 uint64_t max_ttm_mem_limit
;
51 int64_t system_mem_used
;
53 spinlock_t mem_limit_lock
;
56 /* Struct used for amdgpu_amdkfd_bo_validate */
57 struct amdgpu_vm_parser
{
62 static const char * const domain_bit_to_string
[] = {
71 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
73 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct
*work
);
76 static inline struct amdgpu_device
*get_amdgpu_device(struct kgd_dev
*kgd
)
78 return (struct amdgpu_device
*)kgd
;
81 static bool check_if_add_bo_to_vm(struct amdgpu_vm
*avm
,
84 struct kfd_bo_va_list
*entry
;
86 list_for_each_entry(entry
, &mem
->bo_va_list
, bo_list
)
87 if (entry
->bo_va
->base
.vm
== avm
)
93 /* Set memory usage limits. Current, limits are
94 * System (TTM + userptr) memory - 3/4th System RAM
95 * TTM memory - 3/8th System RAM
97 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
103 mem
= si
.totalram
- si
.totalhigh
;
106 spin_lock_init(&kfd_mem_limit
.mem_limit_lock
);
107 kfd_mem_limit
.max_system_mem_limit
= (mem
>> 1) + (mem
>> 2);
108 kfd_mem_limit
.max_ttm_mem_limit
= (mem
>> 1) - (mem
>> 3);
109 pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
110 (kfd_mem_limit
.max_system_mem_limit
>> 20),
111 (kfd_mem_limit
.max_ttm_mem_limit
>> 20));
114 static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device
*adev
,
115 uint64_t size
, u32 domain
, bool sg
)
117 size_t acc_size
, system_mem_needed
, ttm_mem_needed
, vram_needed
;
118 uint64_t reserved_for_pt
= amdgpu_amdkfd_total_mem_size
>> 9;
121 acc_size
= ttm_bo_dma_acc_size(&adev
->mman
.bdev
, size
,
122 sizeof(struct amdgpu_bo
));
125 if (domain
== AMDGPU_GEM_DOMAIN_GTT
) {
127 system_mem_needed
= acc_size
+ size
;
128 ttm_mem_needed
= acc_size
+ size
;
129 } else if (domain
== AMDGPU_GEM_DOMAIN_CPU
&& !sg
) {
131 system_mem_needed
= acc_size
+ size
;
132 ttm_mem_needed
= acc_size
;
135 system_mem_needed
= acc_size
;
136 ttm_mem_needed
= acc_size
;
137 if (domain
== AMDGPU_GEM_DOMAIN_VRAM
)
141 spin_lock(&kfd_mem_limit
.mem_limit_lock
);
143 if ((kfd_mem_limit
.system_mem_used
+ system_mem_needed
>
144 kfd_mem_limit
.max_system_mem_limit
) ||
145 (kfd_mem_limit
.ttm_mem_used
+ ttm_mem_needed
>
146 kfd_mem_limit
.max_ttm_mem_limit
) ||
147 (adev
->kfd
.vram_used
+ vram_needed
>
148 adev
->gmc
.real_vram_size
- reserved_for_pt
)) {
151 kfd_mem_limit
.system_mem_used
+= system_mem_needed
;
152 kfd_mem_limit
.ttm_mem_used
+= ttm_mem_needed
;
153 adev
->kfd
.vram_used
+= vram_needed
;
156 spin_unlock(&kfd_mem_limit
.mem_limit_lock
);
160 static void unreserve_mem_limit(struct amdgpu_device
*adev
,
161 uint64_t size
, u32 domain
, bool sg
)
165 acc_size
= ttm_bo_dma_acc_size(&adev
->mman
.bdev
, size
,
166 sizeof(struct amdgpu_bo
));
168 spin_lock(&kfd_mem_limit
.mem_limit_lock
);
169 if (domain
== AMDGPU_GEM_DOMAIN_GTT
) {
170 kfd_mem_limit
.system_mem_used
-= (acc_size
+ size
);
171 kfd_mem_limit
.ttm_mem_used
-= (acc_size
+ size
);
172 } else if (domain
== AMDGPU_GEM_DOMAIN_CPU
&& !sg
) {
173 kfd_mem_limit
.system_mem_used
-= (acc_size
+ size
);
174 kfd_mem_limit
.ttm_mem_used
-= acc_size
;
176 kfd_mem_limit
.system_mem_used
-= acc_size
;
177 kfd_mem_limit
.ttm_mem_used
-= acc_size
;
178 if (domain
== AMDGPU_GEM_DOMAIN_VRAM
) {
179 adev
->kfd
.vram_used
-= size
;
180 WARN_ONCE(adev
->kfd
.vram_used
< 0,
181 "kfd VRAM memory accounting unbalanced");
184 WARN_ONCE(kfd_mem_limit
.system_mem_used
< 0,
185 "kfd system memory accounting unbalanced");
186 WARN_ONCE(kfd_mem_limit
.ttm_mem_used
< 0,
187 "kfd TTM memory accounting unbalanced");
189 spin_unlock(&kfd_mem_limit
.mem_limit_lock
);
192 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo
*bo
)
194 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->tbo
.bdev
);
195 u32 domain
= bo
->preferred_domains
;
196 bool sg
= (bo
->preferred_domains
== AMDGPU_GEM_DOMAIN_CPU
);
198 if (bo
->flags
& AMDGPU_AMDKFD_USERPTR_BO
) {
199 domain
= AMDGPU_GEM_DOMAIN_CPU
;
203 unreserve_mem_limit(adev
, amdgpu_bo_size(bo
), domain
, sg
);
207 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
208 * reservation object.
210 * @bo: [IN] Remove eviction fence(s) from this BO
211 * @ef: [IN] This eviction fence is removed if it
212 * is present in the shared list.
214 * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
216 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo
*bo
,
217 struct amdgpu_amdkfd_fence
*ef
)
219 struct reservation_object
*resv
= bo
->tbo
.resv
;
220 struct reservation_object_list
*old
, *new;
221 unsigned int i
, j
, k
;
226 old
= reservation_object_get_list(resv
);
230 new = kmalloc(offsetof(typeof(*new), shared
[old
->shared_max
]),
235 /* Go through all the shared fences in the resevation object and sort
236 * the interesting ones to the end of the list.
238 for (i
= 0, j
= old
->shared_count
, k
= 0; i
< old
->shared_count
; ++i
) {
241 f
= rcu_dereference_protected(old
->shared
[i
],
242 reservation_object_held(resv
));
244 if (f
->context
== ef
->base
.context
)
245 RCU_INIT_POINTER(new->shared
[--j
], f
);
247 RCU_INIT_POINTER(new->shared
[k
++], f
);
249 new->shared_max
= old
->shared_max
;
250 new->shared_count
= k
;
252 /* Install the new fence list, seqcount provides the barriers */
254 write_seqcount_begin(&resv
->seq
);
255 RCU_INIT_POINTER(resv
->fence
, new);
256 write_seqcount_end(&resv
->seq
);
259 /* Drop the references to the removed fences or move them to ef_list */
260 for (i
= j
, k
= 0; i
< old
->shared_count
; ++i
) {
263 f
= rcu_dereference_protected(new->shared
[i
],
264 reservation_object_held(resv
));
272 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo
*bo
, uint32_t domain
,
275 struct ttm_operation_ctx ctx
= { false, false };
278 if (WARN(amdgpu_ttm_tt_get_usermm(bo
->tbo
.ttm
),
279 "Called with userptr BO"))
282 amdgpu_bo_placement_from_domain(bo
, domain
);
284 ret
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, &ctx
);
288 amdgpu_bo_sync_wait(bo
, AMDGPU_FENCE_OWNER_KFD
, false);
294 static int amdgpu_amdkfd_validate(void *param
, struct amdgpu_bo
*bo
)
296 struct amdgpu_vm_parser
*p
= param
;
298 return amdgpu_amdkfd_bo_validate(bo
, p
->domain
, p
->wait
);
301 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
303 * Page directories are not updated here because huge page handling
304 * during page table updates can invalidate page directory entries
305 * again. Page directories are only updated after updating page
308 static int vm_validate_pt_pd_bos(struct amdgpu_vm
*vm
)
310 struct amdgpu_bo
*pd
= vm
->root
.base
.bo
;
311 struct amdgpu_device
*adev
= amdgpu_ttm_adev(pd
->tbo
.bdev
);
312 struct amdgpu_vm_parser param
;
315 param
.domain
= AMDGPU_GEM_DOMAIN_VRAM
;
318 ret
= amdgpu_vm_validate_pt_bos(adev
, vm
, amdgpu_amdkfd_validate
,
321 pr_err("amdgpu: failed to validate PT BOs\n");
325 ret
= amdgpu_amdkfd_validate(¶m
, pd
);
327 pr_err("amdgpu: failed to validate PD\n");
331 vm
->pd_phys_addr
= amdgpu_gmc_pd_addr(vm
->root
.base
.bo
);
333 if (vm
->use_cpu_for_update
) {
334 ret
= amdgpu_bo_kmap(pd
, NULL
);
336 pr_err("amdgpu: failed to kmap PD, ret=%d\n", ret
);
344 static int vm_update_pds(struct amdgpu_vm
*vm
, struct amdgpu_sync
*sync
)
346 struct amdgpu_bo
*pd
= vm
->root
.base
.bo
;
347 struct amdgpu_device
*adev
= amdgpu_ttm_adev(pd
->tbo
.bdev
);
350 ret
= amdgpu_vm_update_directories(adev
, vm
);
354 return amdgpu_sync_fence(NULL
, sync
, vm
->last_update
, false);
357 /* add_bo_to_vm - Add a BO to a VM
359 * Everything that needs to bo done only once when a BO is first added
360 * to a VM. It can later be mapped and unmapped many times without
361 * repeating these steps.
363 * 1. Allocate and initialize BO VA entry data structure
364 * 2. Add BO to the VM
365 * 3. Determine ASIC-specific PTE flags
366 * 4. Alloc page tables and directories if needed
367 * 4a. Validate new page tables and directories
369 static int add_bo_to_vm(struct amdgpu_device
*adev
, struct kgd_mem
*mem
,
370 struct amdgpu_vm
*vm
, bool is_aql
,
371 struct kfd_bo_va_list
**p_bo_va_entry
)
374 struct kfd_bo_va_list
*bo_va_entry
;
375 struct amdgpu_bo
*bo
= mem
->bo
;
376 uint64_t va
= mem
->va
;
377 struct list_head
*list_bo_va
= &mem
->bo_va_list
;
378 unsigned long bo_size
= bo
->tbo
.mem
.size
;
381 pr_err("Invalid VA when adding BO to VM\n");
388 bo_va_entry
= kzalloc(sizeof(*bo_va_entry
), GFP_KERNEL
);
392 pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va
,
395 /* Add BO to VM internal data structures*/
396 bo_va_entry
->bo_va
= amdgpu_vm_bo_add(adev
, vm
, bo
);
397 if (!bo_va_entry
->bo_va
) {
399 pr_err("Failed to add BO object to VM. ret == %d\n",
404 bo_va_entry
->va
= va
;
405 bo_va_entry
->pte_flags
= amdgpu_gmc_get_pte_flags(adev
,
407 bo_va_entry
->kgd_dev
= (void *)adev
;
408 list_add(&bo_va_entry
->bo_list
, list_bo_va
);
411 *p_bo_va_entry
= bo_va_entry
;
413 /* Allocate new page tables if needed and validate
416 ret
= amdgpu_vm_alloc_pts(adev
, vm
, va
, amdgpu_bo_size(bo
));
418 pr_err("Failed to allocate pts, err=%d\n", ret
);
422 ret
= vm_validate_pt_pd_bos(vm
);
424 pr_err("validate_pt_pd_bos() failed\n");
431 amdgpu_vm_bo_rmv(adev
, bo_va_entry
->bo_va
);
432 list_del(&bo_va_entry
->bo_list
);
438 static void remove_bo_from_vm(struct amdgpu_device
*adev
,
439 struct kfd_bo_va_list
*entry
, unsigned long size
)
441 pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n",
443 entry
->va
+ size
, entry
);
444 amdgpu_vm_bo_rmv(adev
, entry
->bo_va
);
445 list_del(&entry
->bo_list
);
449 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem
*mem
,
450 struct amdkfd_process_info
*process_info
,
453 struct ttm_validate_buffer
*entry
= &mem
->validate_list
;
454 struct amdgpu_bo
*bo
= mem
->bo
;
456 INIT_LIST_HEAD(&entry
->head
);
457 entry
->num_shared
= 1;
458 entry
->bo
= &bo
->tbo
;
459 mutex_lock(&process_info
->lock
);
461 list_add_tail(&entry
->head
, &process_info
->userptr_valid_list
);
463 list_add_tail(&entry
->head
, &process_info
->kfd_bo_list
);
464 mutex_unlock(&process_info
->lock
);
467 /* Initializes user pages. It registers the MMU notifier and validates
468 * the userptr BO in the GTT domain.
470 * The BO must already be on the userptr_valid_list. Otherwise an
471 * eviction and restore may happen that leaves the new BO unmapped
472 * with the user mode queues running.
474 * Takes the process_info->lock to protect against concurrent restore
477 * Returns 0 for success, negative errno for errors.
479 static int init_user_pages(struct kgd_mem
*mem
, struct mm_struct
*mm
,
482 struct amdkfd_process_info
*process_info
= mem
->process_info
;
483 struct amdgpu_bo
*bo
= mem
->bo
;
484 struct ttm_operation_ctx ctx
= { true, false };
487 mutex_lock(&process_info
->lock
);
489 ret
= amdgpu_ttm_tt_set_userptr(bo
->tbo
.ttm
, user_addr
, 0);
491 pr_err("%s: Failed to set userptr: %d\n", __func__
, ret
);
495 ret
= amdgpu_mn_register(bo
, user_addr
);
497 pr_err("%s: Failed to register MMU notifier: %d\n",
502 /* If no restore worker is running concurrently, user_pages
503 * should not be allocated
505 WARN(mem
->user_pages
, "Leaking user_pages array");
507 mem
->user_pages
= kvmalloc_array(bo
->tbo
.ttm
->num_pages
,
508 sizeof(struct page
*),
509 GFP_KERNEL
| __GFP_ZERO
);
510 if (!mem
->user_pages
) {
511 pr_err("%s: Failed to allocate pages array\n", __func__
);
516 ret
= amdgpu_ttm_tt_get_user_pages(bo
->tbo
.ttm
, mem
->user_pages
);
518 pr_err("%s: Failed to get user pages: %d\n", __func__
, ret
);
522 amdgpu_ttm_tt_set_user_pages(bo
->tbo
.ttm
, mem
->user_pages
);
524 ret
= amdgpu_bo_reserve(bo
, true);
526 pr_err("%s: Failed to reserve BO\n", __func__
);
529 amdgpu_bo_placement_from_domain(bo
, mem
->domain
);
530 ret
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, &ctx
);
532 pr_err("%s: failed to validate BO\n", __func__
);
533 amdgpu_bo_unreserve(bo
);
537 release_pages(mem
->user_pages
, bo
->tbo
.ttm
->num_pages
);
539 kvfree(mem
->user_pages
);
540 mem
->user_pages
= NULL
;
543 amdgpu_mn_unregister(bo
);
545 mutex_unlock(&process_info
->lock
);
549 /* Reserving a BO and its page table BOs must happen atomically to
550 * avoid deadlocks. Some operations update multiple VMs at once. Track
551 * all the reservation info in a context structure. Optionally a sync
552 * object can track VM updates.
554 struct bo_vm_reservation_context
{
555 struct amdgpu_bo_list_entry kfd_bo
; /* BO list entry for the KFD BO */
556 unsigned int n_vms
; /* Number of VMs reserved */
557 struct amdgpu_bo_list_entry
*vm_pd
; /* Array of VM BO list entries */
558 struct ww_acquire_ctx ticket
; /* Reservation ticket */
559 struct list_head list
, duplicates
; /* BO lists */
560 struct amdgpu_sync
*sync
; /* Pointer to sync object */
561 bool reserved
; /* Whether BOs are reserved */
565 BO_VM_NOT_MAPPED
= 0, /* Match VMs where a BO is not mapped */
566 BO_VM_MAPPED
, /* Match VMs where a BO is mapped */
567 BO_VM_ALL
, /* Match all VMs a BO was added to */
571 * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
572 * @mem: KFD BO structure.
573 * @vm: the VM to reserve.
574 * @ctx: the struct that will be used in unreserve_bo_and_vms().
576 static int reserve_bo_and_vm(struct kgd_mem
*mem
,
577 struct amdgpu_vm
*vm
,
578 struct bo_vm_reservation_context
*ctx
)
580 struct amdgpu_bo
*bo
= mem
->bo
;
585 ctx
->reserved
= false;
587 ctx
->sync
= &mem
->sync
;
589 INIT_LIST_HEAD(&ctx
->list
);
590 INIT_LIST_HEAD(&ctx
->duplicates
);
592 ctx
->vm_pd
= kcalloc(ctx
->n_vms
, sizeof(*ctx
->vm_pd
), GFP_KERNEL
);
596 ctx
->kfd_bo
.priority
= 0;
597 ctx
->kfd_bo
.tv
.bo
= &bo
->tbo
;
598 ctx
->kfd_bo
.tv
.num_shared
= 1;
599 ctx
->kfd_bo
.user_pages
= NULL
;
600 list_add(&ctx
->kfd_bo
.tv
.head
, &ctx
->list
);
602 amdgpu_vm_get_pd_bo(vm
, &ctx
->list
, &ctx
->vm_pd
[0]);
604 ret
= ttm_eu_reserve_buffers(&ctx
->ticket
, &ctx
->list
,
605 false, &ctx
->duplicates
);
607 ctx
->reserved
= true;
609 pr_err("Failed to reserve buffers in ttm\n");
618 * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
619 * @mem: KFD BO structure.
620 * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
621 * is used. Otherwise, a single VM associated with the BO.
622 * @map_type: the mapping status that will be used to filter the VMs.
623 * @ctx: the struct that will be used in unreserve_bo_and_vms().
625 * Returns 0 for success, negative for failure.
627 static int reserve_bo_and_cond_vms(struct kgd_mem
*mem
,
628 struct amdgpu_vm
*vm
, enum bo_vm_match map_type
,
629 struct bo_vm_reservation_context
*ctx
)
631 struct amdgpu_bo
*bo
= mem
->bo
;
632 struct kfd_bo_va_list
*entry
;
636 ctx
->reserved
= false;
639 ctx
->sync
= &mem
->sync
;
641 INIT_LIST_HEAD(&ctx
->list
);
642 INIT_LIST_HEAD(&ctx
->duplicates
);
644 list_for_each_entry(entry
, &mem
->bo_va_list
, bo_list
) {
645 if ((vm
&& vm
!= entry
->bo_va
->base
.vm
) ||
646 (entry
->is_mapped
!= map_type
647 && map_type
!= BO_VM_ALL
))
653 if (ctx
->n_vms
!= 0) {
654 ctx
->vm_pd
= kcalloc(ctx
->n_vms
, sizeof(*ctx
->vm_pd
),
660 ctx
->kfd_bo
.priority
= 0;
661 ctx
->kfd_bo
.tv
.bo
= &bo
->tbo
;
662 ctx
->kfd_bo
.tv
.num_shared
= 1;
663 ctx
->kfd_bo
.user_pages
= NULL
;
664 list_add(&ctx
->kfd_bo
.tv
.head
, &ctx
->list
);
667 list_for_each_entry(entry
, &mem
->bo_va_list
, bo_list
) {
668 if ((vm
&& vm
!= entry
->bo_va
->base
.vm
) ||
669 (entry
->is_mapped
!= map_type
670 && map_type
!= BO_VM_ALL
))
673 amdgpu_vm_get_pd_bo(entry
->bo_va
->base
.vm
, &ctx
->list
,
678 ret
= ttm_eu_reserve_buffers(&ctx
->ticket
, &ctx
->list
,
679 false, &ctx
->duplicates
);
681 ctx
->reserved
= true;
683 pr_err("Failed to reserve buffers in ttm.\n");
694 * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
695 * @ctx: Reservation context to unreserve
696 * @wait: Optionally wait for a sync object representing pending VM updates
697 * @intr: Whether the wait is interruptible
699 * Also frees any resources allocated in
700 * reserve_bo_and_(cond_)vm(s). Returns the status from
703 static int unreserve_bo_and_vms(struct bo_vm_reservation_context
*ctx
,
704 bool wait
, bool intr
)
709 ret
= amdgpu_sync_wait(ctx
->sync
, intr
);
712 ttm_eu_backoff_reservation(&ctx
->ticket
, &ctx
->list
);
717 ctx
->reserved
= false;
723 static int unmap_bo_from_gpuvm(struct amdgpu_device
*adev
,
724 struct kfd_bo_va_list
*entry
,
725 struct amdgpu_sync
*sync
)
727 struct amdgpu_bo_va
*bo_va
= entry
->bo_va
;
728 struct amdgpu_vm
*vm
= bo_va
->base
.vm
;
730 amdgpu_vm_bo_unmap(adev
, bo_va
, entry
->va
);
732 amdgpu_vm_clear_freed(adev
, vm
, &bo_va
->last_pt_update
);
734 amdgpu_sync_fence(NULL
, sync
, bo_va
->last_pt_update
, false);
739 static int update_gpuvm_pte(struct amdgpu_device
*adev
,
740 struct kfd_bo_va_list
*entry
,
741 struct amdgpu_sync
*sync
)
744 struct amdgpu_vm
*vm
;
745 struct amdgpu_bo_va
*bo_va
;
746 struct amdgpu_bo
*bo
;
748 bo_va
= entry
->bo_va
;
752 /* Update the page tables */
753 ret
= amdgpu_vm_bo_update(adev
, bo_va
, false);
755 pr_err("amdgpu_vm_bo_update failed\n");
759 return amdgpu_sync_fence(NULL
, sync
, bo_va
->last_pt_update
, false);
762 static int map_bo_to_gpuvm(struct amdgpu_device
*adev
,
763 struct kfd_bo_va_list
*entry
, struct amdgpu_sync
*sync
,
768 /* Set virtual address for the allocation */
769 ret
= amdgpu_vm_bo_map(adev
, entry
->bo_va
, entry
->va
, 0,
770 amdgpu_bo_size(entry
->bo_va
->base
.bo
),
773 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
781 ret
= update_gpuvm_pte(adev
, entry
, sync
);
783 pr_err("update_gpuvm_pte() failed\n");
784 goto update_gpuvm_pte_failed
;
789 update_gpuvm_pte_failed
:
790 unmap_bo_from_gpuvm(adev
, entry
, sync
);
794 static struct sg_table
*create_doorbell_sg(uint64_t addr
, uint32_t size
)
796 struct sg_table
*sg
= kmalloc(sizeof(*sg
), GFP_KERNEL
);
800 if (sg_alloc_table(sg
, 1, GFP_KERNEL
)) {
804 sg
->sgl
->dma_address
= addr
;
805 sg
->sgl
->length
= size
;
806 #ifdef CONFIG_NEED_SG_DMA_LENGTH
807 sg
->sgl
->dma_length
= size
;
812 static int process_validate_vms(struct amdkfd_process_info
*process_info
)
814 struct amdgpu_vm
*peer_vm
;
817 list_for_each_entry(peer_vm
, &process_info
->vm_list_head
,
819 ret
= vm_validate_pt_pd_bos(peer_vm
);
827 static int process_sync_pds_resv(struct amdkfd_process_info
*process_info
,
828 struct amdgpu_sync
*sync
)
830 struct amdgpu_vm
*peer_vm
;
833 list_for_each_entry(peer_vm
, &process_info
->vm_list_head
,
835 struct amdgpu_bo
*pd
= peer_vm
->root
.base
.bo
;
837 ret
= amdgpu_sync_resv(NULL
,
839 AMDGPU_FENCE_OWNER_UNDEFINED
, false);
847 static int process_update_pds(struct amdkfd_process_info
*process_info
,
848 struct amdgpu_sync
*sync
)
850 struct amdgpu_vm
*peer_vm
;
853 list_for_each_entry(peer_vm
, &process_info
->vm_list_head
,
855 ret
= vm_update_pds(peer_vm
, sync
);
863 static int init_kfd_vm(struct amdgpu_vm
*vm
, void **process_info
,
864 struct dma_fence
**ef
)
866 struct amdkfd_process_info
*info
= NULL
;
869 if (!*process_info
) {
870 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
874 mutex_init(&info
->lock
);
875 INIT_LIST_HEAD(&info
->vm_list_head
);
876 INIT_LIST_HEAD(&info
->kfd_bo_list
);
877 INIT_LIST_HEAD(&info
->userptr_valid_list
);
878 INIT_LIST_HEAD(&info
->userptr_inval_list
);
880 info
->eviction_fence
=
881 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
883 if (!info
->eviction_fence
) {
884 pr_err("Failed to create eviction fence\n");
886 goto create_evict_fence_fail
;
889 info
->pid
= get_task_pid(current
->group_leader
, PIDTYPE_PID
);
890 atomic_set(&info
->evicted_bos
, 0);
891 INIT_DELAYED_WORK(&info
->restore_userptr_work
,
892 amdgpu_amdkfd_restore_userptr_worker
);
894 *process_info
= info
;
895 *ef
= dma_fence_get(&info
->eviction_fence
->base
);
898 vm
->process_info
= *process_info
;
900 /* Validate page directory and attach eviction fence */
901 ret
= amdgpu_bo_reserve(vm
->root
.base
.bo
, true);
903 goto reserve_pd_fail
;
904 ret
= vm_validate_pt_pd_bos(vm
);
906 pr_err("validate_pt_pd_bos() failed\n");
907 goto validate_pd_fail
;
909 amdgpu_bo_sync_wait(vm
->root
.base
.bo
, AMDGPU_FENCE_OWNER_KFD
, false);
912 amdgpu_bo_fence(vm
->root
.base
.bo
,
913 &vm
->process_info
->eviction_fence
->base
, true);
914 amdgpu_bo_unreserve(vm
->root
.base
.bo
);
916 /* Update process info */
917 mutex_lock(&vm
->process_info
->lock
);
918 list_add_tail(&vm
->vm_list_node
,
919 &(vm
->process_info
->vm_list_head
));
920 vm
->process_info
->n_vms
++;
921 mutex_unlock(&vm
->process_info
->lock
);
927 amdgpu_bo_unreserve(vm
->root
.base
.bo
);
929 vm
->process_info
= NULL
;
931 /* Two fence references: one in info and one in *ef */
932 dma_fence_put(&info
->eviction_fence
->base
);
935 *process_info
= NULL
;
937 create_evict_fence_fail
:
938 mutex_destroy(&info
->lock
);
944 int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev
*kgd
, unsigned int pasid
,
945 void **vm
, void **process_info
,
946 struct dma_fence
**ef
)
948 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
949 struct amdgpu_vm
*new_vm
;
952 new_vm
= kzalloc(sizeof(*new_vm
), GFP_KERNEL
);
956 /* Initialize AMDGPU part of the VM */
957 ret
= amdgpu_vm_init(adev
, new_vm
, AMDGPU_VM_CONTEXT_COMPUTE
, pasid
);
959 pr_err("Failed init vm ret %d\n", ret
);
960 goto amdgpu_vm_init_fail
;
963 /* Initialize KFD part of the VM and process info */
964 ret
= init_kfd_vm(new_vm
, process_info
, ef
);
966 goto init_kfd_vm_fail
;
968 *vm
= (void *) new_vm
;
973 amdgpu_vm_fini(adev
, new_vm
);
979 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev
*kgd
,
980 struct file
*filp
, unsigned int pasid
,
981 void **vm
, void **process_info
,
982 struct dma_fence
**ef
)
984 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
985 struct drm_file
*drm_priv
= filp
->private_data
;
986 struct amdgpu_fpriv
*drv_priv
= drm_priv
->driver_priv
;
987 struct amdgpu_vm
*avm
= &drv_priv
->vm
;
990 /* Already a compute VM? */
991 if (avm
->process_info
)
994 /* Convert VM into a compute VM */
995 ret
= amdgpu_vm_make_compute(adev
, avm
, pasid
);
999 /* Initialize KFD part of the VM and process info */
1000 ret
= init_kfd_vm(avm
, process_info
, ef
);
1009 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device
*adev
,
1010 struct amdgpu_vm
*vm
)
1012 struct amdkfd_process_info
*process_info
= vm
->process_info
;
1013 struct amdgpu_bo
*pd
= vm
->root
.base
.bo
;
1018 /* Release eviction fence from PD */
1019 amdgpu_bo_reserve(pd
, false);
1020 amdgpu_bo_fence(pd
, NULL
, false);
1021 amdgpu_bo_unreserve(pd
);
1023 /* Update process info */
1024 mutex_lock(&process_info
->lock
);
1025 process_info
->n_vms
--;
1026 list_del(&vm
->vm_list_node
);
1027 mutex_unlock(&process_info
->lock
);
1029 /* Release per-process resources when last compute VM is destroyed */
1030 if (!process_info
->n_vms
) {
1031 WARN_ON(!list_empty(&process_info
->kfd_bo_list
));
1032 WARN_ON(!list_empty(&process_info
->userptr_valid_list
));
1033 WARN_ON(!list_empty(&process_info
->userptr_inval_list
));
1035 dma_fence_put(&process_info
->eviction_fence
->base
);
1036 cancel_delayed_work_sync(&process_info
->restore_userptr_work
);
1037 put_pid(process_info
->pid
);
1038 mutex_destroy(&process_info
->lock
);
1039 kfree(process_info
);
1043 void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev
*kgd
, void *vm
)
1045 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
1046 struct amdgpu_vm
*avm
= (struct amdgpu_vm
*)vm
;
1048 if (WARN_ON(!kgd
|| !vm
))
1051 pr_debug("Destroying process vm %p\n", vm
);
1053 /* Release the VM context */
1054 amdgpu_vm_fini(adev
, avm
);
1058 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev
*kgd
, void *vm
)
1060 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
1061 struct amdgpu_vm
*avm
= (struct amdgpu_vm
*)vm
;
1063 if (WARN_ON(!kgd
|| !vm
))
1066 pr_debug("Releasing process vm %p\n", vm
);
1068 /* The original pasid of amdgpu vm has already been
1069 * released during making a amdgpu vm to a compute vm
1070 * The current pasid is managed by kfd and will be
1071 * released on kfd process destroy. Set amdgpu pasid
1072 * to 0 to avoid duplicate release.
1074 amdgpu_vm_release_compute(adev
, avm
);
1077 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm
)
1079 struct amdgpu_vm
*avm
= (struct amdgpu_vm
*)vm
;
1080 struct amdgpu_bo
*pd
= avm
->root
.base
.bo
;
1081 struct amdgpu_device
*adev
= amdgpu_ttm_adev(pd
->tbo
.bdev
);
1083 if (adev
->asic_type
< CHIP_VEGA10
)
1084 return avm
->pd_phys_addr
>> AMDGPU_GPU_PAGE_SHIFT
;
1085 return avm
->pd_phys_addr
;
1088 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1089 struct kgd_dev
*kgd
, uint64_t va
, uint64_t size
,
1090 void *vm
, struct kgd_mem
**mem
,
1091 uint64_t *offset
, uint32_t flags
)
1093 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
1094 struct amdgpu_vm
*avm
= (struct amdgpu_vm
*)vm
;
1095 enum ttm_bo_type bo_type
= ttm_bo_type_device
;
1096 struct sg_table
*sg
= NULL
;
1097 uint64_t user_addr
= 0;
1098 struct amdgpu_bo
*bo
;
1099 struct amdgpu_bo_param bp
;
1101 u32 domain
, alloc_domain
;
1103 uint32_t mapping_flags
;
1107 * Check on which domain to allocate BO
1109 if (flags
& ALLOC_MEM_FLAGS_VRAM
) {
1110 domain
= alloc_domain
= AMDGPU_GEM_DOMAIN_VRAM
;
1111 alloc_flags
= AMDGPU_GEM_CREATE_VRAM_CLEARED
;
1112 alloc_flags
|= (flags
& ALLOC_MEM_FLAGS_PUBLIC
) ?
1113 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
:
1114 AMDGPU_GEM_CREATE_NO_CPU_ACCESS
;
1115 } else if (flags
& ALLOC_MEM_FLAGS_GTT
) {
1116 domain
= alloc_domain
= AMDGPU_GEM_DOMAIN_GTT
;
1118 } else if (flags
& ALLOC_MEM_FLAGS_USERPTR
) {
1119 domain
= AMDGPU_GEM_DOMAIN_GTT
;
1120 alloc_domain
= AMDGPU_GEM_DOMAIN_CPU
;
1122 if (!offset
|| !*offset
)
1124 user_addr
= *offset
;
1125 } else if (flags
& ALLOC_MEM_FLAGS_DOORBELL
) {
1126 domain
= AMDGPU_GEM_DOMAIN_GTT
;
1127 alloc_domain
= AMDGPU_GEM_DOMAIN_CPU
;
1128 bo_type
= ttm_bo_type_sg
;
1130 if (size
> UINT_MAX
)
1132 sg
= create_doorbell_sg(*offset
, size
);
1139 *mem
= kzalloc(sizeof(struct kgd_mem
), GFP_KERNEL
);
1144 INIT_LIST_HEAD(&(*mem
)->bo_va_list
);
1145 mutex_init(&(*mem
)->lock
);
1146 (*mem
)->aql_queue
= !!(flags
& ALLOC_MEM_FLAGS_AQL_QUEUE_MEM
);
1148 /* Workaround for AQL queue wraparound bug. Map the same
1149 * memory twice. That means we only actually allocate half
1152 if ((*mem
)->aql_queue
)
1155 /* Workaround for TLB bug on older VI chips */
1156 byte_align
= (adev
->family
== AMDGPU_FAMILY_VI
&&
1157 adev
->asic_type
!= CHIP_FIJI
&&
1158 adev
->asic_type
!= CHIP_POLARIS10
&&
1159 adev
->asic_type
!= CHIP_POLARIS11
&&
1160 adev
->asic_type
!= CHIP_POLARIS12
) ?
1161 VI_BO_SIZE_ALIGN
: 1;
1163 mapping_flags
= AMDGPU_VM_PAGE_READABLE
;
1164 if (flags
& ALLOC_MEM_FLAGS_WRITABLE
)
1165 mapping_flags
|= AMDGPU_VM_PAGE_WRITEABLE
;
1166 if (flags
& ALLOC_MEM_FLAGS_EXECUTABLE
)
1167 mapping_flags
|= AMDGPU_VM_PAGE_EXECUTABLE
;
1168 if (flags
& ALLOC_MEM_FLAGS_COHERENT
)
1169 mapping_flags
|= AMDGPU_VM_MTYPE_UC
;
1171 mapping_flags
|= AMDGPU_VM_MTYPE_NC
;
1172 (*mem
)->mapping_flags
= mapping_flags
;
1174 amdgpu_sync_create(&(*mem
)->sync
);
1176 ret
= amdgpu_amdkfd_reserve_mem_limit(adev
, size
, alloc_domain
, !!sg
);
1178 pr_debug("Insufficient system memory\n");
1179 goto err_reserve_limit
;
1182 pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1183 va
, size
, domain_string(alloc_domain
));
1185 memset(&bp
, 0, sizeof(bp
));
1187 bp
.byte_align
= byte_align
;
1188 bp
.domain
= alloc_domain
;
1189 bp
.flags
= alloc_flags
;
1192 ret
= amdgpu_bo_create(adev
, &bp
, &bo
);
1194 pr_debug("Failed to create BO on domain %s. ret %d\n",
1195 domain_string(alloc_domain
), ret
);
1198 if (bo_type
== ttm_bo_type_sg
) {
1200 bo
->tbo
.ttm
->sg
= sg
;
1205 bo
->flags
|= AMDGPU_AMDKFD_USERPTR_BO
;
1208 (*mem
)->domain
= domain
;
1209 (*mem
)->mapped_to_gpu_memory
= 0;
1210 (*mem
)->process_info
= avm
->process_info
;
1211 add_kgd_mem_to_kfd_bo_list(*mem
, avm
->process_info
, user_addr
);
1214 ret
= init_user_pages(*mem
, current
->mm
, user_addr
);
1216 mutex_lock(&avm
->process_info
->lock
);
1217 list_del(&(*mem
)->validate_list
.head
);
1218 mutex_unlock(&avm
->process_info
->lock
);
1219 goto allocate_init_user_pages_failed
;
1224 *offset
= amdgpu_bo_mmap_offset(bo
);
1228 allocate_init_user_pages_failed
:
1229 amdgpu_bo_unref(&bo
);
1230 /* Don't unreserve system mem limit twice */
1231 goto err_reserve_limit
;
1233 unreserve_mem_limit(adev
, size
, alloc_domain
, !!sg
);
1235 mutex_destroy(&(*mem
)->lock
);
1245 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1246 struct kgd_dev
*kgd
, struct kgd_mem
*mem
)
1248 struct amdkfd_process_info
*process_info
= mem
->process_info
;
1249 unsigned long bo_size
= mem
->bo
->tbo
.mem
.size
;
1250 struct kfd_bo_va_list
*entry
, *tmp
;
1251 struct bo_vm_reservation_context ctx
;
1252 struct ttm_validate_buffer
*bo_list_entry
;
1255 mutex_lock(&mem
->lock
);
1257 if (mem
->mapped_to_gpu_memory
> 0) {
1258 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1260 mutex_unlock(&mem
->lock
);
1264 mutex_unlock(&mem
->lock
);
1265 /* lock is not needed after this, since mem is unused and will
1269 /* No more MMU notifiers */
1270 amdgpu_mn_unregister(mem
->bo
);
1272 /* Make sure restore workers don't access the BO any more */
1273 bo_list_entry
= &mem
->validate_list
;
1274 mutex_lock(&process_info
->lock
);
1275 list_del(&bo_list_entry
->head
);
1276 mutex_unlock(&process_info
->lock
);
1278 /* Free user pages if necessary */
1279 if (mem
->user_pages
) {
1280 pr_debug("%s: Freeing user_pages array\n", __func__
);
1281 if (mem
->user_pages
[0])
1282 release_pages(mem
->user_pages
,
1283 mem
->bo
->tbo
.ttm
->num_pages
);
1284 kvfree(mem
->user_pages
);
1287 ret
= reserve_bo_and_cond_vms(mem
, NULL
, BO_VM_ALL
, &ctx
);
1291 /* The eviction fence should be removed by the last unmap.
1292 * TODO: Log an error condition if the bo still has the eviction fence
1295 amdgpu_amdkfd_remove_eviction_fence(mem
->bo
,
1296 process_info
->eviction_fence
);
1297 pr_debug("Release VA 0x%llx - 0x%llx\n", mem
->va
,
1298 mem
->va
+ bo_size
* (1 + mem
->aql_queue
));
1300 /* Remove from VM internal data structures */
1301 list_for_each_entry_safe(entry
, tmp
, &mem
->bo_va_list
, bo_list
)
1302 remove_bo_from_vm((struct amdgpu_device
*)entry
->kgd_dev
,
1305 ret
= unreserve_bo_and_vms(&ctx
, false, false);
1307 /* Free the sync object */
1308 amdgpu_sync_free(&mem
->sync
);
1310 /* If the SG is not NULL, it's one we created for a doorbell
1311 * BO. We need to free it.
1313 if (mem
->bo
->tbo
.sg
) {
1314 sg_free_table(mem
->bo
->tbo
.sg
);
1315 kfree(mem
->bo
->tbo
.sg
);
1319 amdgpu_bo_unref(&mem
->bo
);
1320 mutex_destroy(&mem
->lock
);
1326 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1327 struct kgd_dev
*kgd
, struct kgd_mem
*mem
, void *vm
)
1329 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
1330 struct amdgpu_vm
*avm
= (struct amdgpu_vm
*)vm
;
1332 struct amdgpu_bo
*bo
;
1334 struct kfd_bo_va_list
*entry
;
1335 struct bo_vm_reservation_context ctx
;
1336 struct kfd_bo_va_list
*bo_va_entry
= NULL
;
1337 struct kfd_bo_va_list
*bo_va_entry_aql
= NULL
;
1338 unsigned long bo_size
;
1339 bool is_invalid_userptr
= false;
1343 pr_err("Invalid BO when mapping memory to GPU\n");
1347 /* Make sure restore is not running concurrently. Since we
1348 * don't map invalid userptr BOs, we rely on the next restore
1349 * worker to do the mapping
1351 mutex_lock(&mem
->process_info
->lock
);
1353 /* Lock mmap-sem. If we find an invalid userptr BO, we can be
1354 * sure that the MMU notifier is no longer running
1355 * concurrently and the queues are actually stopped
1357 if (amdgpu_ttm_tt_get_usermm(bo
->tbo
.ttm
)) {
1358 down_write(¤t
->mm
->mmap_sem
);
1359 is_invalid_userptr
= atomic_read(&mem
->invalid
);
1360 up_write(¤t
->mm
->mmap_sem
);
1363 mutex_lock(&mem
->lock
);
1365 domain
= mem
->domain
;
1366 bo_size
= bo
->tbo
.mem
.size
;
1368 pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1370 mem
->va
+ bo_size
* (1 + mem
->aql_queue
),
1371 vm
, domain_string(domain
));
1373 ret
= reserve_bo_and_vm(mem
, vm
, &ctx
);
1377 /* Userptr can be marked as "not invalid", but not actually be
1378 * validated yet (still in the system domain). In that case
1379 * the queues are still stopped and we can leave mapping for
1380 * the next restore worker
1382 if (amdgpu_ttm_tt_get_usermm(bo
->tbo
.ttm
) &&
1383 bo
->tbo
.mem
.mem_type
== TTM_PL_SYSTEM
)
1384 is_invalid_userptr
= true;
1386 if (check_if_add_bo_to_vm(avm
, mem
)) {
1387 ret
= add_bo_to_vm(adev
, mem
, avm
, false,
1390 goto add_bo_to_vm_failed
;
1391 if (mem
->aql_queue
) {
1392 ret
= add_bo_to_vm(adev
, mem
, avm
,
1393 true, &bo_va_entry_aql
);
1395 goto add_bo_to_vm_failed_aql
;
1398 ret
= vm_validate_pt_pd_bos(avm
);
1400 goto add_bo_to_vm_failed
;
1403 if (mem
->mapped_to_gpu_memory
== 0 &&
1404 !amdgpu_ttm_tt_get_usermm(bo
->tbo
.ttm
)) {
1405 /* Validate BO only once. The eviction fence gets added to BO
1406 * the first time it is mapped. Validate will wait for all
1407 * background evictions to complete.
1409 ret
= amdgpu_amdkfd_bo_validate(bo
, domain
, true);
1411 pr_debug("Validate failed\n");
1412 goto map_bo_to_gpuvm_failed
;
1416 list_for_each_entry(entry
, &mem
->bo_va_list
, bo_list
) {
1417 if (entry
->bo_va
->base
.vm
== vm
&& !entry
->is_mapped
) {
1418 pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1419 entry
->va
, entry
->va
+ bo_size
,
1422 ret
= map_bo_to_gpuvm(adev
, entry
, ctx
.sync
,
1423 is_invalid_userptr
);
1425 pr_err("Failed to map radeon bo to gpuvm\n");
1426 goto map_bo_to_gpuvm_failed
;
1429 ret
= vm_update_pds(vm
, ctx
.sync
);
1431 pr_err("Failed to update page directories\n");
1432 goto map_bo_to_gpuvm_failed
;
1435 entry
->is_mapped
= true;
1436 mem
->mapped_to_gpu_memory
++;
1437 pr_debug("\t INC mapping count %d\n",
1438 mem
->mapped_to_gpu_memory
);
1442 if (!amdgpu_ttm_tt_get_usermm(bo
->tbo
.ttm
) && !bo
->pin_count
)
1444 &avm
->process_info
->eviction_fence
->base
,
1446 ret
= unreserve_bo_and_vms(&ctx
, false, false);
1450 map_bo_to_gpuvm_failed
:
1451 if (bo_va_entry_aql
)
1452 remove_bo_from_vm(adev
, bo_va_entry_aql
, bo_size
);
1453 add_bo_to_vm_failed_aql
:
1455 remove_bo_from_vm(adev
, bo_va_entry
, bo_size
);
1456 add_bo_to_vm_failed
:
1457 unreserve_bo_and_vms(&ctx
, false, false);
1459 mutex_unlock(&mem
->process_info
->lock
);
1460 mutex_unlock(&mem
->lock
);
1464 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1465 struct kgd_dev
*kgd
, struct kgd_mem
*mem
, void *vm
)
1467 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
1468 struct amdkfd_process_info
*process_info
=
1469 ((struct amdgpu_vm
*)vm
)->process_info
;
1470 unsigned long bo_size
= mem
->bo
->tbo
.mem
.size
;
1471 struct kfd_bo_va_list
*entry
;
1472 struct bo_vm_reservation_context ctx
;
1475 mutex_lock(&mem
->lock
);
1477 ret
= reserve_bo_and_cond_vms(mem
, vm
, BO_VM_MAPPED
, &ctx
);
1480 /* If no VMs were reserved, it means the BO wasn't actually mapped */
1481 if (ctx
.n_vms
== 0) {
1486 ret
= vm_validate_pt_pd_bos((struct amdgpu_vm
*)vm
);
1490 pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1492 mem
->va
+ bo_size
* (1 + mem
->aql_queue
),
1495 list_for_each_entry(entry
, &mem
->bo_va_list
, bo_list
) {
1496 if (entry
->bo_va
->base
.vm
== vm
&& entry
->is_mapped
) {
1497 pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1499 entry
->va
+ bo_size
,
1502 ret
= unmap_bo_from_gpuvm(adev
, entry
, ctx
.sync
);
1504 entry
->is_mapped
= false;
1506 pr_err("failed to unmap VA 0x%llx\n",
1511 mem
->mapped_to_gpu_memory
--;
1512 pr_debug("\t DEC mapping count %d\n",
1513 mem
->mapped_to_gpu_memory
);
1517 /* If BO is unmapped from all VMs, unfence it. It can be evicted if
1520 if (mem
->mapped_to_gpu_memory
== 0 &&
1521 !amdgpu_ttm_tt_get_usermm(mem
->bo
->tbo
.ttm
) && !mem
->bo
->pin_count
)
1522 amdgpu_amdkfd_remove_eviction_fence(mem
->bo
,
1523 process_info
->eviction_fence
);
1526 unreserve_bo_and_vms(&ctx
, false, false);
1528 mutex_unlock(&mem
->lock
);
1532 int amdgpu_amdkfd_gpuvm_sync_memory(
1533 struct kgd_dev
*kgd
, struct kgd_mem
*mem
, bool intr
)
1535 struct amdgpu_sync sync
;
1538 amdgpu_sync_create(&sync
);
1540 mutex_lock(&mem
->lock
);
1541 amdgpu_sync_clone(&mem
->sync
, &sync
);
1542 mutex_unlock(&mem
->lock
);
1544 ret
= amdgpu_sync_wait(&sync
, intr
);
1545 amdgpu_sync_free(&sync
);
1549 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev
*kgd
,
1550 struct kgd_mem
*mem
, void **kptr
, uint64_t *size
)
1553 struct amdgpu_bo
*bo
= mem
->bo
;
1555 if (amdgpu_ttm_tt_get_usermm(bo
->tbo
.ttm
)) {
1556 pr_err("userptr can't be mapped to kernel\n");
1560 /* delete kgd_mem from kfd_bo_list to avoid re-validating
1561 * this BO in BO's restoring after eviction.
1563 mutex_lock(&mem
->process_info
->lock
);
1565 ret
= amdgpu_bo_reserve(bo
, true);
1567 pr_err("Failed to reserve bo. ret %d\n", ret
);
1568 goto bo_reserve_failed
;
1571 ret
= amdgpu_bo_pin(bo
, AMDGPU_GEM_DOMAIN_GTT
);
1573 pr_err("Failed to pin bo. ret %d\n", ret
);
1577 ret
= amdgpu_bo_kmap(bo
, kptr
);
1579 pr_err("Failed to map bo to kernel. ret %d\n", ret
);
1583 amdgpu_amdkfd_remove_eviction_fence(
1584 bo
, mem
->process_info
->eviction_fence
);
1585 list_del_init(&mem
->validate_list
.head
);
1588 *size
= amdgpu_bo_size(bo
);
1590 amdgpu_bo_unreserve(bo
);
1592 mutex_unlock(&mem
->process_info
->lock
);
1596 amdgpu_bo_unpin(bo
);
1598 amdgpu_bo_unreserve(bo
);
1600 mutex_unlock(&mem
->process_info
->lock
);
1605 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev
*kgd
,
1606 struct kfd_vm_fault_info
*mem
)
1608 struct amdgpu_device
*adev
;
1610 adev
= (struct amdgpu_device
*)kgd
;
1611 if (atomic_read(&adev
->gmc
.vm_fault_info_updated
) == 1) {
1612 *mem
= *adev
->gmc
.vm_fault_info
;
1614 atomic_set(&adev
->gmc
.vm_fault_info_updated
, 0);
1619 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev
*kgd
,
1620 struct dma_buf
*dma_buf
,
1621 uint64_t va
, void *vm
,
1622 struct kgd_mem
**mem
, uint64_t *size
,
1623 uint64_t *mmap_offset
)
1625 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
1626 struct drm_gem_object
*obj
;
1627 struct amdgpu_bo
*bo
;
1628 struct amdgpu_vm
*avm
= (struct amdgpu_vm
*)vm
;
1630 if (dma_buf
->ops
!= &amdgpu_dmabuf_ops
)
1631 /* Can't handle non-graphics buffers */
1634 obj
= dma_buf
->priv
;
1635 if (obj
->dev
->dev_private
!= adev
)
1636 /* Can't handle buffers from other devices */
1639 bo
= gem_to_amdgpu_bo(obj
);
1640 if (!(bo
->preferred_domains
& (AMDGPU_GEM_DOMAIN_VRAM
|
1641 AMDGPU_GEM_DOMAIN_GTT
)))
1642 /* Only VRAM and GTT BOs are supported */
1645 *mem
= kzalloc(sizeof(struct kgd_mem
), GFP_KERNEL
);
1650 *size
= amdgpu_bo_size(bo
);
1653 *mmap_offset
= amdgpu_bo_mmap_offset(bo
);
1655 INIT_LIST_HEAD(&(*mem
)->bo_va_list
);
1656 mutex_init(&(*mem
)->lock
);
1657 (*mem
)->mapping_flags
=
1658 AMDGPU_VM_PAGE_READABLE
| AMDGPU_VM_PAGE_WRITEABLE
|
1659 AMDGPU_VM_PAGE_EXECUTABLE
| AMDGPU_VM_MTYPE_NC
;
1661 (*mem
)->bo
= amdgpu_bo_ref(bo
);
1663 (*mem
)->domain
= (bo
->preferred_domains
& AMDGPU_GEM_DOMAIN_VRAM
) ?
1664 AMDGPU_GEM_DOMAIN_VRAM
: AMDGPU_GEM_DOMAIN_GTT
;
1665 (*mem
)->mapped_to_gpu_memory
= 0;
1666 (*mem
)->process_info
= avm
->process_info
;
1667 add_kgd_mem_to_kfd_bo_list(*mem
, avm
->process_info
, false);
1668 amdgpu_sync_create(&(*mem
)->sync
);
1673 /* Evict a userptr BO by stopping the queues if necessary
1675 * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
1676 * cannot do any memory allocations, and cannot take any locks that
1677 * are held elsewhere while allocating memory. Therefore this is as
1678 * simple as possible, using atomic counters.
1680 * It doesn't do anything to the BO itself. The real work happens in
1681 * restore, where we get updated page addresses. This function only
1682 * ensures that GPU access to the BO is stopped.
1684 int amdgpu_amdkfd_evict_userptr(struct kgd_mem
*mem
,
1685 struct mm_struct
*mm
)
1687 struct amdkfd_process_info
*process_info
= mem
->process_info
;
1688 int invalid
, evicted_bos
;
1691 invalid
= atomic_inc_return(&mem
->invalid
);
1692 evicted_bos
= atomic_inc_return(&process_info
->evicted_bos
);
1693 if (evicted_bos
== 1) {
1694 /* First eviction, stop the queues */
1695 r
= kgd2kfd_quiesce_mm(mm
);
1697 pr_err("Failed to quiesce KFD\n");
1698 schedule_delayed_work(&process_info
->restore_userptr_work
,
1699 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS
));
1705 /* Update invalid userptr BOs
1707 * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
1708 * userptr_inval_list and updates user pages for all BOs that have
1709 * been invalidated since their last update.
1711 static int update_invalid_user_pages(struct amdkfd_process_info
*process_info
,
1712 struct mm_struct
*mm
)
1714 struct kgd_mem
*mem
, *tmp_mem
;
1715 struct amdgpu_bo
*bo
;
1716 struct ttm_operation_ctx ctx
= { false, false };
1719 /* Move all invalidated BOs to the userptr_inval_list and
1720 * release their user pages by migration to the CPU domain
1722 list_for_each_entry_safe(mem
, tmp_mem
,
1723 &process_info
->userptr_valid_list
,
1724 validate_list
.head
) {
1725 if (!atomic_read(&mem
->invalid
))
1726 continue; /* BO is still valid */
1730 if (amdgpu_bo_reserve(bo
, true))
1732 amdgpu_bo_placement_from_domain(bo
, AMDGPU_GEM_DOMAIN_CPU
);
1733 ret
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, &ctx
);
1734 amdgpu_bo_unreserve(bo
);
1736 pr_err("%s: Failed to invalidate userptr BO\n",
1741 list_move_tail(&mem
->validate_list
.head
,
1742 &process_info
->userptr_inval_list
);
1745 if (list_empty(&process_info
->userptr_inval_list
))
1746 return 0; /* All evicted userptr BOs were freed */
1748 /* Go through userptr_inval_list and update any invalid user_pages */
1749 list_for_each_entry(mem
, &process_info
->userptr_inval_list
,
1750 validate_list
.head
) {
1751 invalid
= atomic_read(&mem
->invalid
);
1753 /* BO hasn't been invalidated since the last
1754 * revalidation attempt. Keep its BO list.
1760 if (!mem
->user_pages
) {
1762 kvmalloc_array(bo
->tbo
.ttm
->num_pages
,
1763 sizeof(struct page
*),
1764 GFP_KERNEL
| __GFP_ZERO
);
1765 if (!mem
->user_pages
) {
1766 pr_err("%s: Failed to allocate pages array\n",
1770 } else if (mem
->user_pages
[0]) {
1771 release_pages(mem
->user_pages
, bo
->tbo
.ttm
->num_pages
);
1774 /* Get updated user pages */
1775 ret
= amdgpu_ttm_tt_get_user_pages(bo
->tbo
.ttm
,
1778 mem
->user_pages
[0] = NULL
;
1779 pr_info("%s: Failed to get user pages: %d\n",
1781 /* Pretend it succeeded. It will fail later
1782 * with a VM fault if the GPU tries to access
1783 * it. Better than hanging indefinitely with
1784 * stalled user mode queues.
1788 /* Mark the BO as valid unless it was invalidated
1789 * again concurrently
1791 if (atomic_cmpxchg(&mem
->invalid
, invalid
, 0) != invalid
)
1798 /* Validate invalid userptr BOs
1800 * Validates BOs on the userptr_inval_list, and moves them back to the
1801 * userptr_valid_list. Also updates GPUVM page tables with new page
1802 * addresses and waits for the page table updates to complete.
1804 static int validate_invalid_user_pages(struct amdkfd_process_info
*process_info
)
1806 struct amdgpu_bo_list_entry
*pd_bo_list_entries
;
1807 struct list_head resv_list
, duplicates
;
1808 struct ww_acquire_ctx ticket
;
1809 struct amdgpu_sync sync
;
1811 struct amdgpu_vm
*peer_vm
;
1812 struct kgd_mem
*mem
, *tmp_mem
;
1813 struct amdgpu_bo
*bo
;
1814 struct ttm_operation_ctx ctx
= { false, false };
1817 pd_bo_list_entries
= kcalloc(process_info
->n_vms
,
1818 sizeof(struct amdgpu_bo_list_entry
),
1820 if (!pd_bo_list_entries
) {
1821 pr_err("%s: Failed to allocate PD BO list entries\n", __func__
);
1825 INIT_LIST_HEAD(&resv_list
);
1826 INIT_LIST_HEAD(&duplicates
);
1828 /* Get all the page directory BOs that need to be reserved */
1830 list_for_each_entry(peer_vm
, &process_info
->vm_list_head
,
1832 amdgpu_vm_get_pd_bo(peer_vm
, &resv_list
,
1833 &pd_bo_list_entries
[i
++]);
1834 /* Add the userptr_inval_list entries to resv_list */
1835 list_for_each_entry(mem
, &process_info
->userptr_inval_list
,
1836 validate_list
.head
) {
1837 list_add_tail(&mem
->resv_list
.head
, &resv_list
);
1838 mem
->resv_list
.bo
= mem
->validate_list
.bo
;
1839 mem
->resv_list
.num_shared
= mem
->validate_list
.num_shared
;
1842 /* Reserve all BOs and page tables for validation */
1843 ret
= ttm_eu_reserve_buffers(&ticket
, &resv_list
, false, &duplicates
);
1844 WARN(!list_empty(&duplicates
), "Duplicates should be empty");
1848 amdgpu_sync_create(&sync
);
1850 ret
= process_validate_vms(process_info
);
1854 /* Validate BOs and update GPUVM page tables */
1855 list_for_each_entry_safe(mem
, tmp_mem
,
1856 &process_info
->userptr_inval_list
,
1857 validate_list
.head
) {
1858 struct kfd_bo_va_list
*bo_va_entry
;
1862 /* Copy pages array and validate the BO if we got user pages */
1863 if (mem
->user_pages
[0]) {
1864 amdgpu_ttm_tt_set_user_pages(bo
->tbo
.ttm
,
1866 amdgpu_bo_placement_from_domain(bo
, mem
->domain
);
1867 ret
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, &ctx
);
1869 pr_err("%s: failed to validate BO\n", __func__
);
1874 /* Validate succeeded, now the BO owns the pages, free
1875 * our copy of the pointer array. Put this BO back on
1876 * the userptr_valid_list. If we need to revalidate
1877 * it, we need to start from scratch.
1879 kvfree(mem
->user_pages
);
1880 mem
->user_pages
= NULL
;
1881 list_move_tail(&mem
->validate_list
.head
,
1882 &process_info
->userptr_valid_list
);
1884 /* Update mapping. If the BO was not validated
1885 * (because we couldn't get user pages), this will
1886 * clear the page table entries, which will result in
1887 * VM faults if the GPU tries to access the invalid
1890 list_for_each_entry(bo_va_entry
, &mem
->bo_va_list
, bo_list
) {
1891 if (!bo_va_entry
->is_mapped
)
1894 ret
= update_gpuvm_pte((struct amdgpu_device
*)
1895 bo_va_entry
->kgd_dev
,
1896 bo_va_entry
, &sync
);
1898 pr_err("%s: update PTE failed\n", __func__
);
1899 /* make sure this gets validated again */
1900 atomic_inc(&mem
->invalid
);
1906 /* Update page directories */
1907 ret
= process_update_pds(process_info
, &sync
);
1910 ttm_eu_backoff_reservation(&ticket
, &resv_list
);
1911 amdgpu_sync_wait(&sync
, false);
1912 amdgpu_sync_free(&sync
);
1914 kfree(pd_bo_list_entries
);
1919 /* Worker callback to restore evicted userptr BOs
1921 * Tries to update and validate all userptr BOs. If successful and no
1922 * concurrent evictions happened, the queues are restarted. Otherwise,
1923 * reschedule for another attempt later.
1925 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct
*work
)
1927 struct delayed_work
*dwork
= to_delayed_work(work
);
1928 struct amdkfd_process_info
*process_info
=
1929 container_of(dwork
, struct amdkfd_process_info
,
1930 restore_userptr_work
);
1931 struct task_struct
*usertask
;
1932 struct mm_struct
*mm
;
1935 evicted_bos
= atomic_read(&process_info
->evicted_bos
);
1939 /* Reference task and mm in case of concurrent process termination */
1940 usertask
= get_pid_task(process_info
->pid
, PIDTYPE_PID
);
1943 mm
= get_task_mm(usertask
);
1945 put_task_struct(usertask
);
1949 mutex_lock(&process_info
->lock
);
1951 if (update_invalid_user_pages(process_info
, mm
))
1953 /* userptr_inval_list can be empty if all evicted userptr BOs
1954 * have been freed. In that case there is nothing to validate
1955 * and we can just restart the queues.
1957 if (!list_empty(&process_info
->userptr_inval_list
)) {
1958 if (atomic_read(&process_info
->evicted_bos
) != evicted_bos
)
1959 goto unlock_out
; /* Concurrent eviction, try again */
1961 if (validate_invalid_user_pages(process_info
))
1964 /* Final check for concurrent evicton and atomic update. If
1965 * another eviction happens after successful update, it will
1966 * be a first eviction that calls quiesce_mm. The eviction
1967 * reference counting inside KFD will handle this case.
1969 if (atomic_cmpxchg(&process_info
->evicted_bos
, evicted_bos
, 0) !=
1973 if (kgd2kfd_resume_mm(mm
)) {
1974 pr_err("%s: Failed to resume KFD\n", __func__
);
1975 /* No recovery from this failure. Probably the CP is
1976 * hanging. No point trying again.
1980 mutex_unlock(&process_info
->lock
);
1982 put_task_struct(usertask
);
1984 /* If validation failed, reschedule another attempt */
1986 schedule_delayed_work(&process_info
->restore_userptr_work
,
1987 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS
));
1990 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
1991 * KFD process identified by process_info
1993 * @process_info: amdkfd_process_info of the KFD process
1995 * After memory eviction, restore thread calls this function. The function
1996 * should be called when the Process is still valid. BO restore involves -
1998 * 1. Release old eviction fence and create new one
1999 * 2. Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
2000 * 3 Use the second PD list and kfd_bo_list to create a list (ctx.list) of
2001 * BOs that need to be reserved.
2002 * 4. Reserve all the BOs
2003 * 5. Validate of PD and PT BOs.
2004 * 6. Validate all KFD BOs using kfd_bo_list and Map them and add new fence
2005 * 7. Add fence to all PD and PT BOs.
2006 * 8. Unreserve all BOs
2008 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info
, struct dma_fence
**ef
)
2010 struct amdgpu_bo_list_entry
*pd_bo_list
;
2011 struct amdkfd_process_info
*process_info
= info
;
2012 struct amdgpu_vm
*peer_vm
;
2013 struct kgd_mem
*mem
;
2014 struct bo_vm_reservation_context ctx
;
2015 struct amdgpu_amdkfd_fence
*new_fence
;
2017 struct list_head duplicate_save
;
2018 struct amdgpu_sync sync_obj
;
2020 INIT_LIST_HEAD(&duplicate_save
);
2021 INIT_LIST_HEAD(&ctx
.list
);
2022 INIT_LIST_HEAD(&ctx
.duplicates
);
2024 pd_bo_list
= kcalloc(process_info
->n_vms
,
2025 sizeof(struct amdgpu_bo_list_entry
),
2031 mutex_lock(&process_info
->lock
);
2032 list_for_each_entry(peer_vm
, &process_info
->vm_list_head
,
2034 amdgpu_vm_get_pd_bo(peer_vm
, &ctx
.list
, &pd_bo_list
[i
++]);
2036 /* Reserve all BOs and page tables/directory. Add all BOs from
2037 * kfd_bo_list to ctx.list
2039 list_for_each_entry(mem
, &process_info
->kfd_bo_list
,
2040 validate_list
.head
) {
2042 list_add_tail(&mem
->resv_list
.head
, &ctx
.list
);
2043 mem
->resv_list
.bo
= mem
->validate_list
.bo
;
2044 mem
->resv_list
.num_shared
= mem
->validate_list
.num_shared
;
2047 ret
= ttm_eu_reserve_buffers(&ctx
.ticket
, &ctx
.list
,
2048 false, &duplicate_save
);
2050 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2051 goto ttm_reserve_fail
;
2054 amdgpu_sync_create(&sync_obj
);
2056 /* Validate PDs and PTs */
2057 ret
= process_validate_vms(process_info
);
2059 goto validate_map_fail
;
2061 ret
= process_sync_pds_resv(process_info
, &sync_obj
);
2063 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
2064 goto validate_map_fail
;
2067 /* Validate BOs and map them to GPUVM (update VM page tables). */
2068 list_for_each_entry(mem
, &process_info
->kfd_bo_list
,
2069 validate_list
.head
) {
2071 struct amdgpu_bo
*bo
= mem
->bo
;
2072 uint32_t domain
= mem
->domain
;
2073 struct kfd_bo_va_list
*bo_va_entry
;
2075 ret
= amdgpu_amdkfd_bo_validate(bo
, domain
, false);
2077 pr_debug("Memory eviction: Validate BOs failed. Try again\n");
2078 goto validate_map_fail
;
2080 ret
= amdgpu_sync_fence(NULL
, &sync_obj
, bo
->tbo
.moving
, false);
2082 pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2083 goto validate_map_fail
;
2085 list_for_each_entry(bo_va_entry
, &mem
->bo_va_list
,
2087 ret
= update_gpuvm_pte((struct amdgpu_device
*)
2088 bo_va_entry
->kgd_dev
,
2092 pr_debug("Memory eviction: update PTE failed. Try again\n");
2093 goto validate_map_fail
;
2098 /* Update page directories */
2099 ret
= process_update_pds(process_info
, &sync_obj
);
2101 pr_debug("Memory eviction: update PDs failed. Try again\n");
2102 goto validate_map_fail
;
2105 /* Wait for validate and PT updates to finish */
2106 amdgpu_sync_wait(&sync_obj
, false);
2108 /* Release old eviction fence and create new one, because fence only
2109 * goes from unsignaled to signaled, fence cannot be reused.
2110 * Use context and mm from the old fence.
2112 new_fence
= amdgpu_amdkfd_fence_create(
2113 process_info
->eviction_fence
->base
.context
,
2114 process_info
->eviction_fence
->mm
);
2116 pr_err("Failed to create eviction fence\n");
2118 goto validate_map_fail
;
2120 dma_fence_put(&process_info
->eviction_fence
->base
);
2121 process_info
->eviction_fence
= new_fence
;
2122 *ef
= dma_fence_get(&new_fence
->base
);
2124 /* Attach new eviction fence to all BOs */
2125 list_for_each_entry(mem
, &process_info
->kfd_bo_list
,
2127 amdgpu_bo_fence(mem
->bo
,
2128 &process_info
->eviction_fence
->base
, true);
2130 /* Attach eviction fence to PD / PT BOs */
2131 list_for_each_entry(peer_vm
, &process_info
->vm_list_head
,
2133 struct amdgpu_bo
*bo
= peer_vm
->root
.base
.bo
;
2135 amdgpu_bo_fence(bo
, &process_info
->eviction_fence
->base
, true);
2139 ttm_eu_backoff_reservation(&ctx
.ticket
, &ctx
.list
);
2140 amdgpu_sync_free(&sync_obj
);
2142 mutex_unlock(&process_info
->lock
);