2 * Copyright 2014-2018 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #define pr_fmt(fmt) "kfd2kgd: " fmt
25 #include <linux/list.h>
26 #include <linux/pagemap.h>
27 #include <linux/sched/mm.h>
29 #include "amdgpu_object.h"
30 #include "amdgpu_vm.h"
31 #include "amdgpu_amdkfd.h"
33 /* Special VM and GART address alignment needed for VI pre-Fiji due to
36 #define VI_BO_SIZE_ALIGN (0x8000)
38 /* BO flag to indicate a KFD userptr BO */
39 #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
41 /* Userptr restore delay, just long enough to allow consecutive VM
42 * changes to accumulate
44 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
46 /* Impose limit on how much memory KFD can use */
48 uint64_t max_system_mem_limit
;
49 uint64_t max_userptr_mem_limit
;
50 int64_t system_mem_used
;
51 int64_t userptr_mem_used
;
52 spinlock_t mem_limit_lock
;
55 /* Struct used for amdgpu_amdkfd_bo_validate */
56 struct amdgpu_vm_parser
{
61 static const char * const domain_bit_to_string
[] = {
70 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
72 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct
*work
);
75 static inline struct amdgpu_device
*get_amdgpu_device(struct kgd_dev
*kgd
)
77 return (struct amdgpu_device
*)kgd
;
80 static bool check_if_add_bo_to_vm(struct amdgpu_vm
*avm
,
83 struct kfd_bo_va_list
*entry
;
85 list_for_each_entry(entry
, &mem
->bo_va_list
, bo_list
)
86 if (entry
->bo_va
->base
.vm
== avm
)
92 /* Set memory usage limits. Current, limits are
93 * System (kernel) memory - 3/8th System RAM
94 * Userptr memory - 3/4th System RAM
96 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
102 mem
= si
.totalram
- si
.totalhigh
;
105 spin_lock_init(&kfd_mem_limit
.mem_limit_lock
);
106 kfd_mem_limit
.max_system_mem_limit
= (mem
>> 1) - (mem
>> 3);
107 kfd_mem_limit
.max_userptr_mem_limit
= mem
- (mem
>> 2);
108 pr_debug("Kernel memory limit %lluM, userptr limit %lluM\n",
109 (kfd_mem_limit
.max_system_mem_limit
>> 20),
110 (kfd_mem_limit
.max_userptr_mem_limit
>> 20));
113 static int amdgpu_amdkfd_reserve_system_mem_limit(struct amdgpu_device
*adev
,
114 uint64_t size
, u32 domain
)
119 acc_size
= ttm_bo_dma_acc_size(&adev
->mman
.bdev
, size
,
120 sizeof(struct amdgpu_bo
));
122 spin_lock(&kfd_mem_limit
.mem_limit_lock
);
123 if (domain
== AMDGPU_GEM_DOMAIN_GTT
) {
124 if (kfd_mem_limit
.system_mem_used
+ (acc_size
+ size
) >
125 kfd_mem_limit
.max_system_mem_limit
) {
129 kfd_mem_limit
.system_mem_used
+= (acc_size
+ size
);
130 } else if (domain
== AMDGPU_GEM_DOMAIN_CPU
) {
131 if ((kfd_mem_limit
.system_mem_used
+ acc_size
>
132 kfd_mem_limit
.max_system_mem_limit
) ||
133 (kfd_mem_limit
.userptr_mem_used
+ (size
+ acc_size
) >
134 kfd_mem_limit
.max_userptr_mem_limit
)) {
138 kfd_mem_limit
.system_mem_used
+= acc_size
;
139 kfd_mem_limit
.userptr_mem_used
+= size
;
142 spin_unlock(&kfd_mem_limit
.mem_limit_lock
);
146 static void unreserve_system_mem_limit(struct amdgpu_device
*adev
,
147 uint64_t size
, u32 domain
)
151 acc_size
= ttm_bo_dma_acc_size(&adev
->mman
.bdev
, size
,
152 sizeof(struct amdgpu_bo
));
154 spin_lock(&kfd_mem_limit
.mem_limit_lock
);
155 if (domain
== AMDGPU_GEM_DOMAIN_GTT
) {
156 kfd_mem_limit
.system_mem_used
-= (acc_size
+ size
);
157 } else if (domain
== AMDGPU_GEM_DOMAIN_CPU
) {
158 kfd_mem_limit
.system_mem_used
-= acc_size
;
159 kfd_mem_limit
.userptr_mem_used
-= size
;
161 WARN_ONCE(kfd_mem_limit
.system_mem_used
< 0,
162 "kfd system memory accounting unbalanced");
163 WARN_ONCE(kfd_mem_limit
.userptr_mem_used
< 0,
164 "kfd userptr memory accounting unbalanced");
166 spin_unlock(&kfd_mem_limit
.mem_limit_lock
);
169 void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo
*bo
)
171 spin_lock(&kfd_mem_limit
.mem_limit_lock
);
173 if (bo
->flags
& AMDGPU_AMDKFD_USERPTR_BO
) {
174 kfd_mem_limit
.system_mem_used
-= bo
->tbo
.acc_size
;
175 kfd_mem_limit
.userptr_mem_used
-= amdgpu_bo_size(bo
);
176 } else if (bo
->preferred_domains
== AMDGPU_GEM_DOMAIN_GTT
) {
177 kfd_mem_limit
.system_mem_used
-=
178 (bo
->tbo
.acc_size
+ amdgpu_bo_size(bo
));
180 WARN_ONCE(kfd_mem_limit
.system_mem_used
< 0,
181 "kfd system memory accounting unbalanced");
182 WARN_ONCE(kfd_mem_limit
.userptr_mem_used
< 0,
183 "kfd userptr memory accounting unbalanced");
185 spin_unlock(&kfd_mem_limit
.mem_limit_lock
);
189 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence(s) from BO's
190 * reservation object.
192 * @bo: [IN] Remove eviction fence(s) from this BO
193 * @ef: [IN] If ef is specified, then this eviction fence is removed if it
194 * is present in the shared list.
195 * @ef_list: [OUT] Returns list of eviction fences. These fences are removed
196 * from BO's reservation object shared list.
197 * @ef_count: [OUT] Number of fences in ef_list.
199 * NOTE: If called with ef_list, then amdgpu_amdkfd_add_eviction_fence must be
200 * called to restore the eviction fences and to avoid memory leak. This is
201 * useful for shared BOs.
202 * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
204 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo
*bo
,
205 struct amdgpu_amdkfd_fence
*ef
,
206 struct amdgpu_amdkfd_fence
***ef_list
,
207 unsigned int *ef_count
)
209 struct reservation_object
*resv
= bo
->tbo
.resv
;
210 struct reservation_object_list
*old
, *new;
211 unsigned int i
, j
, k
;
221 old
= reservation_object_get_list(resv
);
225 new = kmalloc(offsetof(typeof(*new), shared
[old
->shared_max
]),
230 /* Go through all the shared fences in the resevation object and sort
231 * the interesting ones to the end of the list.
233 for (i
= 0, j
= old
->shared_count
, k
= 0; i
< old
->shared_count
; ++i
) {
236 f
= rcu_dereference_protected(old
->shared
[i
],
237 reservation_object_held(resv
));
239 if ((ef
&& f
->context
== ef
->base
.context
) ||
240 (!ef
&& to_amdgpu_amdkfd_fence(f
)))
241 RCU_INIT_POINTER(new->shared
[--j
], f
);
243 RCU_INIT_POINTER(new->shared
[k
++], f
);
245 new->shared_max
= old
->shared_max
;
246 new->shared_count
= k
;
249 unsigned int count
= old
->shared_count
- j
;
251 /* Alloc memory for count number of eviction fence pointers.
252 * Fill the ef_list array and ef_count
254 *ef_list
= kcalloc(count
, sizeof(**ef_list
), GFP_KERNEL
);
263 /* Install the new fence list, seqcount provides the barriers */
265 write_seqcount_begin(&resv
->seq
);
266 RCU_INIT_POINTER(resv
->fence
, new);
267 write_seqcount_end(&resv
->seq
);
270 /* Drop the references to the removed fences or move them to ef_list */
271 for (i
= j
, k
= 0; i
< old
->shared_count
; ++i
) {
274 f
= rcu_dereference_protected(new->shared
[i
],
275 reservation_object_held(resv
));
277 (*ef_list
)[k
++] = to_amdgpu_amdkfd_fence(f
);
286 /* amdgpu_amdkfd_add_eviction_fence - Adds eviction fence(s) back into BO's
287 * reservation object.
289 * @bo: [IN] Add eviction fences to this BO
290 * @ef_list: [IN] List of eviction fences to be added
291 * @ef_count: [IN] Number of fences in ef_list.
293 * NOTE: Must call amdgpu_amdkfd_remove_eviction_fence before calling this
296 static void amdgpu_amdkfd_add_eviction_fence(struct amdgpu_bo
*bo
,
297 struct amdgpu_amdkfd_fence
**ef_list
,
298 unsigned int ef_count
)
302 if (!ef_list
|| !ef_count
)
305 for (i
= 0; i
< ef_count
; i
++) {
306 amdgpu_bo_fence(bo
, &ef_list
[i
]->base
, true);
307 /* Re-adding the fence takes an additional reference. Drop that
310 dma_fence_put(&ef_list
[i
]->base
);
316 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo
*bo
, uint32_t domain
,
319 struct ttm_operation_ctx ctx
= { false, false };
322 if (WARN(amdgpu_ttm_tt_get_usermm(bo
->tbo
.ttm
),
323 "Called with userptr BO"))
326 amdgpu_bo_placement_from_domain(bo
, domain
);
328 ret
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, &ctx
);
332 struct amdgpu_amdkfd_fence
**ef_list
;
333 unsigned int ef_count
;
335 ret
= amdgpu_amdkfd_remove_eviction_fence(bo
, NULL
, &ef_list
,
340 ttm_bo_wait(&bo
->tbo
, false, false);
341 amdgpu_amdkfd_add_eviction_fence(bo
, ef_list
, ef_count
);
348 static int amdgpu_amdkfd_validate(void *param
, struct amdgpu_bo
*bo
)
350 struct amdgpu_vm_parser
*p
= param
;
352 return amdgpu_amdkfd_bo_validate(bo
, p
->domain
, p
->wait
);
355 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
357 * Page directories are not updated here because huge page handling
358 * during page table updates can invalidate page directory entries
359 * again. Page directories are only updated after updating page
362 static int vm_validate_pt_pd_bos(struct amdgpu_vm
*vm
)
364 struct amdgpu_bo
*pd
= vm
->root
.base
.bo
;
365 struct amdgpu_device
*adev
= amdgpu_ttm_adev(pd
->tbo
.bdev
);
366 struct amdgpu_vm_parser param
;
367 uint64_t addr
, flags
= AMDGPU_PTE_VALID
;
370 param
.domain
= AMDGPU_GEM_DOMAIN_VRAM
;
373 ret
= amdgpu_vm_validate_pt_bos(adev
, vm
, amdgpu_amdkfd_validate
,
376 pr_err("amdgpu: failed to validate PT BOs\n");
380 ret
= amdgpu_amdkfd_validate(¶m
, pd
);
382 pr_err("amdgpu: failed to validate PD\n");
386 addr
= amdgpu_bo_gpu_offset(vm
->root
.base
.bo
);
387 amdgpu_gmc_get_vm_pde(adev
, -1, &addr
, &flags
);
388 vm
->pd_phys_addr
= addr
;
390 if (vm
->use_cpu_for_update
) {
391 ret
= amdgpu_bo_kmap(pd
, NULL
);
393 pr_err("amdgpu: failed to kmap PD, ret=%d\n", ret
);
401 static int sync_vm_fence(struct amdgpu_device
*adev
, struct amdgpu_sync
*sync
,
404 int ret
= amdgpu_sync_fence(adev
, sync
, f
, false);
406 /* Sync objects can't handle multiple GPUs (contexts) updating
407 * sync->last_vm_update. Fortunately we don't need it for
408 * KFD's purposes, so we can just drop that fence.
410 if (sync
->last_vm_update
) {
411 dma_fence_put(sync
->last_vm_update
);
412 sync
->last_vm_update
= NULL
;
418 static int vm_update_pds(struct amdgpu_vm
*vm
, struct amdgpu_sync
*sync
)
420 struct amdgpu_bo
*pd
= vm
->root
.base
.bo
;
421 struct amdgpu_device
*adev
= amdgpu_ttm_adev(pd
->tbo
.bdev
);
424 ret
= amdgpu_vm_update_directories(adev
, vm
);
428 return sync_vm_fence(adev
, sync
, vm
->last_update
);
431 /* add_bo_to_vm - Add a BO to a VM
433 * Everything that needs to bo done only once when a BO is first added
434 * to a VM. It can later be mapped and unmapped many times without
435 * repeating these steps.
437 * 1. Allocate and initialize BO VA entry data structure
438 * 2. Add BO to the VM
439 * 3. Determine ASIC-specific PTE flags
440 * 4. Alloc page tables and directories if needed
441 * 4a. Validate new page tables and directories
443 static int add_bo_to_vm(struct amdgpu_device
*adev
, struct kgd_mem
*mem
,
444 struct amdgpu_vm
*vm
, bool is_aql
,
445 struct kfd_bo_va_list
**p_bo_va_entry
)
448 struct kfd_bo_va_list
*bo_va_entry
;
449 struct amdgpu_bo
*pd
= vm
->root
.base
.bo
;
450 struct amdgpu_bo
*bo
= mem
->bo
;
451 uint64_t va
= mem
->va
;
452 struct list_head
*list_bo_va
= &mem
->bo_va_list
;
453 unsigned long bo_size
= bo
->tbo
.mem
.size
;
456 pr_err("Invalid VA when adding BO to VM\n");
463 bo_va_entry
= kzalloc(sizeof(*bo_va_entry
), GFP_KERNEL
);
467 pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va
,
470 /* Add BO to VM internal data structures*/
471 bo_va_entry
->bo_va
= amdgpu_vm_bo_add(adev
, vm
, bo
);
472 if (!bo_va_entry
->bo_va
) {
474 pr_err("Failed to add BO object to VM. ret == %d\n",
479 bo_va_entry
->va
= va
;
480 bo_va_entry
->pte_flags
= amdgpu_gmc_get_pte_flags(adev
,
482 bo_va_entry
->kgd_dev
= (void *)adev
;
483 list_add(&bo_va_entry
->bo_list
, list_bo_va
);
486 *p_bo_va_entry
= bo_va_entry
;
488 /* Allocate new page tables if needed and validate
489 * them. Clearing of new page tables and validate need to wait
490 * on move fences. We don't want that to trigger the eviction
491 * fence, so remove it temporarily.
493 amdgpu_amdkfd_remove_eviction_fence(pd
,
494 vm
->process_info
->eviction_fence
,
497 ret
= amdgpu_vm_alloc_pts(adev
, vm
, va
, amdgpu_bo_size(bo
));
499 pr_err("Failed to allocate pts, err=%d\n", ret
);
503 ret
= vm_validate_pt_pd_bos(vm
);
505 pr_err("validate_pt_pd_bos() failed\n");
509 /* Add the eviction fence back */
510 amdgpu_bo_fence(pd
, &vm
->process_info
->eviction_fence
->base
, true);
515 amdgpu_bo_fence(pd
, &vm
->process_info
->eviction_fence
->base
, true);
516 amdgpu_vm_bo_rmv(adev
, bo_va_entry
->bo_va
);
517 list_del(&bo_va_entry
->bo_list
);
523 static void remove_bo_from_vm(struct amdgpu_device
*adev
,
524 struct kfd_bo_va_list
*entry
, unsigned long size
)
526 pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n",
528 entry
->va
+ size
, entry
);
529 amdgpu_vm_bo_rmv(adev
, entry
->bo_va
);
530 list_del(&entry
->bo_list
);
534 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem
*mem
,
535 struct amdkfd_process_info
*process_info
,
538 struct ttm_validate_buffer
*entry
= &mem
->validate_list
;
539 struct amdgpu_bo
*bo
= mem
->bo
;
541 INIT_LIST_HEAD(&entry
->head
);
542 entry
->shared
= true;
543 entry
->bo
= &bo
->tbo
;
544 mutex_lock(&process_info
->lock
);
546 list_add_tail(&entry
->head
, &process_info
->userptr_valid_list
);
548 list_add_tail(&entry
->head
, &process_info
->kfd_bo_list
);
549 mutex_unlock(&process_info
->lock
);
552 /* Initializes user pages. It registers the MMU notifier and validates
553 * the userptr BO in the GTT domain.
555 * The BO must already be on the userptr_valid_list. Otherwise an
556 * eviction and restore may happen that leaves the new BO unmapped
557 * with the user mode queues running.
559 * Takes the process_info->lock to protect against concurrent restore
562 * Returns 0 for success, negative errno for errors.
564 static int init_user_pages(struct kgd_mem
*mem
, struct mm_struct
*mm
,
567 struct amdkfd_process_info
*process_info
= mem
->process_info
;
568 struct amdgpu_bo
*bo
= mem
->bo
;
569 struct ttm_operation_ctx ctx
= { true, false };
572 mutex_lock(&process_info
->lock
);
574 ret
= amdgpu_ttm_tt_set_userptr(bo
->tbo
.ttm
, user_addr
, 0);
576 pr_err("%s: Failed to set userptr: %d\n", __func__
, ret
);
580 ret
= amdgpu_mn_register(bo
, user_addr
);
582 pr_err("%s: Failed to register MMU notifier: %d\n",
587 /* If no restore worker is running concurrently, user_pages
588 * should not be allocated
590 WARN(mem
->user_pages
, "Leaking user_pages array");
592 mem
->user_pages
= kvmalloc_array(bo
->tbo
.ttm
->num_pages
,
593 sizeof(struct page
*),
594 GFP_KERNEL
| __GFP_ZERO
);
595 if (!mem
->user_pages
) {
596 pr_err("%s: Failed to allocate pages array\n", __func__
);
601 ret
= amdgpu_ttm_tt_get_user_pages(bo
->tbo
.ttm
, mem
->user_pages
);
603 pr_err("%s: Failed to get user pages: %d\n", __func__
, ret
);
607 amdgpu_ttm_tt_set_user_pages(bo
->tbo
.ttm
, mem
->user_pages
);
609 ret
= amdgpu_bo_reserve(bo
, true);
611 pr_err("%s: Failed to reserve BO\n", __func__
);
614 amdgpu_bo_placement_from_domain(bo
, mem
->domain
);
615 ret
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, &ctx
);
617 pr_err("%s: failed to validate BO\n", __func__
);
618 amdgpu_bo_unreserve(bo
);
622 release_pages(mem
->user_pages
, bo
->tbo
.ttm
->num_pages
);
624 kvfree(mem
->user_pages
);
625 mem
->user_pages
= NULL
;
628 amdgpu_mn_unregister(bo
);
630 mutex_unlock(&process_info
->lock
);
634 /* Reserving a BO and its page table BOs must happen atomically to
635 * avoid deadlocks. Some operations update multiple VMs at once. Track
636 * all the reservation info in a context structure. Optionally a sync
637 * object can track VM updates.
639 struct bo_vm_reservation_context
{
640 struct amdgpu_bo_list_entry kfd_bo
; /* BO list entry for the KFD BO */
641 unsigned int n_vms
; /* Number of VMs reserved */
642 struct amdgpu_bo_list_entry
*vm_pd
; /* Array of VM BO list entries */
643 struct ww_acquire_ctx ticket
; /* Reservation ticket */
644 struct list_head list
, duplicates
; /* BO lists */
645 struct amdgpu_sync
*sync
; /* Pointer to sync object */
646 bool reserved
; /* Whether BOs are reserved */
650 BO_VM_NOT_MAPPED
= 0, /* Match VMs where a BO is not mapped */
651 BO_VM_MAPPED
, /* Match VMs where a BO is mapped */
652 BO_VM_ALL
, /* Match all VMs a BO was added to */
656 * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
657 * @mem: KFD BO structure.
658 * @vm: the VM to reserve.
659 * @ctx: the struct that will be used in unreserve_bo_and_vms().
661 static int reserve_bo_and_vm(struct kgd_mem
*mem
,
662 struct amdgpu_vm
*vm
,
663 struct bo_vm_reservation_context
*ctx
)
665 struct amdgpu_bo
*bo
= mem
->bo
;
670 ctx
->reserved
= false;
672 ctx
->sync
= &mem
->sync
;
674 INIT_LIST_HEAD(&ctx
->list
);
675 INIT_LIST_HEAD(&ctx
->duplicates
);
677 ctx
->vm_pd
= kcalloc(ctx
->n_vms
, sizeof(*ctx
->vm_pd
), GFP_KERNEL
);
681 ctx
->kfd_bo
.robj
= bo
;
682 ctx
->kfd_bo
.priority
= 0;
683 ctx
->kfd_bo
.tv
.bo
= &bo
->tbo
;
684 ctx
->kfd_bo
.tv
.shared
= true;
685 ctx
->kfd_bo
.user_pages
= NULL
;
686 list_add(&ctx
->kfd_bo
.tv
.head
, &ctx
->list
);
688 amdgpu_vm_get_pd_bo(vm
, &ctx
->list
, &ctx
->vm_pd
[0]);
690 ret
= ttm_eu_reserve_buffers(&ctx
->ticket
, &ctx
->list
,
691 false, &ctx
->duplicates
);
693 ctx
->reserved
= true;
695 pr_err("Failed to reserve buffers in ttm\n");
704 * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
705 * @mem: KFD BO structure.
706 * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
707 * is used. Otherwise, a single VM associated with the BO.
708 * @map_type: the mapping status that will be used to filter the VMs.
709 * @ctx: the struct that will be used in unreserve_bo_and_vms().
711 * Returns 0 for success, negative for failure.
713 static int reserve_bo_and_cond_vms(struct kgd_mem
*mem
,
714 struct amdgpu_vm
*vm
, enum bo_vm_match map_type
,
715 struct bo_vm_reservation_context
*ctx
)
717 struct amdgpu_bo
*bo
= mem
->bo
;
718 struct kfd_bo_va_list
*entry
;
722 ctx
->reserved
= false;
725 ctx
->sync
= &mem
->sync
;
727 INIT_LIST_HEAD(&ctx
->list
);
728 INIT_LIST_HEAD(&ctx
->duplicates
);
730 list_for_each_entry(entry
, &mem
->bo_va_list
, bo_list
) {
731 if ((vm
&& vm
!= entry
->bo_va
->base
.vm
) ||
732 (entry
->is_mapped
!= map_type
733 && map_type
!= BO_VM_ALL
))
739 if (ctx
->n_vms
!= 0) {
740 ctx
->vm_pd
= kcalloc(ctx
->n_vms
, sizeof(*ctx
->vm_pd
),
746 ctx
->kfd_bo
.robj
= bo
;
747 ctx
->kfd_bo
.priority
= 0;
748 ctx
->kfd_bo
.tv
.bo
= &bo
->tbo
;
749 ctx
->kfd_bo
.tv
.shared
= true;
750 ctx
->kfd_bo
.user_pages
= NULL
;
751 list_add(&ctx
->kfd_bo
.tv
.head
, &ctx
->list
);
754 list_for_each_entry(entry
, &mem
->bo_va_list
, bo_list
) {
755 if ((vm
&& vm
!= entry
->bo_va
->base
.vm
) ||
756 (entry
->is_mapped
!= map_type
757 && map_type
!= BO_VM_ALL
))
760 amdgpu_vm_get_pd_bo(entry
->bo_va
->base
.vm
, &ctx
->list
,
765 ret
= ttm_eu_reserve_buffers(&ctx
->ticket
, &ctx
->list
,
766 false, &ctx
->duplicates
);
768 ctx
->reserved
= true;
770 pr_err("Failed to reserve buffers in ttm.\n");
781 * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
782 * @ctx: Reservation context to unreserve
783 * @wait: Optionally wait for a sync object representing pending VM updates
784 * @intr: Whether the wait is interruptible
786 * Also frees any resources allocated in
787 * reserve_bo_and_(cond_)vm(s). Returns the status from
790 static int unreserve_bo_and_vms(struct bo_vm_reservation_context
*ctx
,
791 bool wait
, bool intr
)
796 ret
= amdgpu_sync_wait(ctx
->sync
, intr
);
799 ttm_eu_backoff_reservation(&ctx
->ticket
, &ctx
->list
);
804 ctx
->reserved
= false;
810 static int unmap_bo_from_gpuvm(struct amdgpu_device
*adev
,
811 struct kfd_bo_va_list
*entry
,
812 struct amdgpu_sync
*sync
)
814 struct amdgpu_bo_va
*bo_va
= entry
->bo_va
;
815 struct amdgpu_vm
*vm
= bo_va
->base
.vm
;
816 struct amdgpu_bo
*pd
= vm
->root
.base
.bo
;
818 /* Remove eviction fence from PD (and thereby from PTs too as
819 * they share the resv. object). Otherwise during PT update
820 * job (see amdgpu_vm_bo_update_mapping), eviction fence would
821 * get added to job->sync object and job execution would
822 * trigger the eviction fence.
824 amdgpu_amdkfd_remove_eviction_fence(pd
,
825 vm
->process_info
->eviction_fence
,
827 amdgpu_vm_bo_unmap(adev
, bo_va
, entry
->va
);
829 amdgpu_vm_clear_freed(adev
, vm
, &bo_va
->last_pt_update
);
831 /* Add the eviction fence back */
832 amdgpu_bo_fence(pd
, &vm
->process_info
->eviction_fence
->base
, true);
834 sync_vm_fence(adev
, sync
, bo_va
->last_pt_update
);
839 static int update_gpuvm_pte(struct amdgpu_device
*adev
,
840 struct kfd_bo_va_list
*entry
,
841 struct amdgpu_sync
*sync
)
844 struct amdgpu_vm
*vm
;
845 struct amdgpu_bo_va
*bo_va
;
846 struct amdgpu_bo
*bo
;
848 bo_va
= entry
->bo_va
;
852 /* Update the page tables */
853 ret
= amdgpu_vm_bo_update(adev
, bo_va
, false);
855 pr_err("amdgpu_vm_bo_update failed\n");
859 return sync_vm_fence(adev
, sync
, bo_va
->last_pt_update
);
862 static int map_bo_to_gpuvm(struct amdgpu_device
*adev
,
863 struct kfd_bo_va_list
*entry
, struct amdgpu_sync
*sync
,
868 /* Set virtual address for the allocation */
869 ret
= amdgpu_vm_bo_map(adev
, entry
->bo_va
, entry
->va
, 0,
870 amdgpu_bo_size(entry
->bo_va
->base
.bo
),
873 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
881 ret
= update_gpuvm_pte(adev
, entry
, sync
);
883 pr_err("update_gpuvm_pte() failed\n");
884 goto update_gpuvm_pte_failed
;
889 update_gpuvm_pte_failed
:
890 unmap_bo_from_gpuvm(adev
, entry
, sync
);
894 static int process_validate_vms(struct amdkfd_process_info
*process_info
)
896 struct amdgpu_vm
*peer_vm
;
899 list_for_each_entry(peer_vm
, &process_info
->vm_list_head
,
901 ret
= vm_validate_pt_pd_bos(peer_vm
);
909 static int process_update_pds(struct amdkfd_process_info
*process_info
,
910 struct amdgpu_sync
*sync
)
912 struct amdgpu_vm
*peer_vm
;
915 list_for_each_entry(peer_vm
, &process_info
->vm_list_head
,
917 ret
= vm_update_pds(peer_vm
, sync
);
925 static int init_kfd_vm(struct amdgpu_vm
*vm
, void **process_info
,
926 struct dma_fence
**ef
)
928 struct amdkfd_process_info
*info
= NULL
;
931 if (!*process_info
) {
932 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
936 mutex_init(&info
->lock
);
937 INIT_LIST_HEAD(&info
->vm_list_head
);
938 INIT_LIST_HEAD(&info
->kfd_bo_list
);
939 INIT_LIST_HEAD(&info
->userptr_valid_list
);
940 INIT_LIST_HEAD(&info
->userptr_inval_list
);
942 info
->eviction_fence
=
943 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
945 if (!info
->eviction_fence
) {
946 pr_err("Failed to create eviction fence\n");
948 goto create_evict_fence_fail
;
951 info
->pid
= get_task_pid(current
->group_leader
, PIDTYPE_PID
);
952 atomic_set(&info
->evicted_bos
, 0);
953 INIT_DELAYED_WORK(&info
->restore_userptr_work
,
954 amdgpu_amdkfd_restore_userptr_worker
);
956 *process_info
= info
;
957 *ef
= dma_fence_get(&info
->eviction_fence
->base
);
960 vm
->process_info
= *process_info
;
962 /* Validate page directory and attach eviction fence */
963 ret
= amdgpu_bo_reserve(vm
->root
.base
.bo
, true);
965 goto reserve_pd_fail
;
966 ret
= vm_validate_pt_pd_bos(vm
);
968 pr_err("validate_pt_pd_bos() failed\n");
969 goto validate_pd_fail
;
971 ret
= ttm_bo_wait(&vm
->root
.base
.bo
->tbo
, false, false);
974 amdgpu_bo_fence(vm
->root
.base
.bo
,
975 &vm
->process_info
->eviction_fence
->base
, true);
976 amdgpu_bo_unreserve(vm
->root
.base
.bo
);
978 /* Update process info */
979 mutex_lock(&vm
->process_info
->lock
);
980 list_add_tail(&vm
->vm_list_node
,
981 &(vm
->process_info
->vm_list_head
));
982 vm
->process_info
->n_vms
++;
983 mutex_unlock(&vm
->process_info
->lock
);
989 amdgpu_bo_unreserve(vm
->root
.base
.bo
);
991 vm
->process_info
= NULL
;
993 /* Two fence references: one in info and one in *ef */
994 dma_fence_put(&info
->eviction_fence
->base
);
997 *process_info
= NULL
;
999 create_evict_fence_fail
:
1000 mutex_destroy(&info
->lock
);
1006 int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev
*kgd
, void **vm
,
1007 void **process_info
,
1008 struct dma_fence
**ef
)
1010 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
1011 struct amdgpu_vm
*new_vm
;
1014 new_vm
= kzalloc(sizeof(*new_vm
), GFP_KERNEL
);
1018 /* Initialize AMDGPU part of the VM */
1019 ret
= amdgpu_vm_init(adev
, new_vm
, AMDGPU_VM_CONTEXT_COMPUTE
, 0);
1021 pr_err("Failed init vm ret %d\n", ret
);
1022 goto amdgpu_vm_init_fail
;
1025 /* Initialize KFD part of the VM and process info */
1026 ret
= init_kfd_vm(new_vm
, process_info
, ef
);
1028 goto init_kfd_vm_fail
;
1030 *vm
= (void *) new_vm
;
1035 amdgpu_vm_fini(adev
, new_vm
);
1036 amdgpu_vm_init_fail
:
1041 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev
*kgd
,
1043 void **vm
, void **process_info
,
1044 struct dma_fence
**ef
)
1046 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
1047 struct drm_file
*drm_priv
= filp
->private_data
;
1048 struct amdgpu_fpriv
*drv_priv
= drm_priv
->driver_priv
;
1049 struct amdgpu_vm
*avm
= &drv_priv
->vm
;
1052 /* Already a compute VM? */
1053 if (avm
->process_info
)
1056 /* Convert VM into a compute VM */
1057 ret
= amdgpu_vm_make_compute(adev
, avm
);
1061 /* Initialize KFD part of the VM and process info */
1062 ret
= init_kfd_vm(avm
, process_info
, ef
);
1071 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device
*adev
,
1072 struct amdgpu_vm
*vm
)
1074 struct amdkfd_process_info
*process_info
= vm
->process_info
;
1075 struct amdgpu_bo
*pd
= vm
->root
.base
.bo
;
1080 /* Release eviction fence from PD */
1081 amdgpu_bo_reserve(pd
, false);
1082 amdgpu_bo_fence(pd
, NULL
, false);
1083 amdgpu_bo_unreserve(pd
);
1085 /* Update process info */
1086 mutex_lock(&process_info
->lock
);
1087 process_info
->n_vms
--;
1088 list_del(&vm
->vm_list_node
);
1089 mutex_unlock(&process_info
->lock
);
1091 /* Release per-process resources when last compute VM is destroyed */
1092 if (!process_info
->n_vms
) {
1093 WARN_ON(!list_empty(&process_info
->kfd_bo_list
));
1094 WARN_ON(!list_empty(&process_info
->userptr_valid_list
));
1095 WARN_ON(!list_empty(&process_info
->userptr_inval_list
));
1097 dma_fence_put(&process_info
->eviction_fence
->base
);
1098 cancel_delayed_work_sync(&process_info
->restore_userptr_work
);
1099 put_pid(process_info
->pid
);
1100 mutex_destroy(&process_info
->lock
);
1101 kfree(process_info
);
1105 void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev
*kgd
, void *vm
)
1107 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
1108 struct amdgpu_vm
*avm
= (struct amdgpu_vm
*)vm
;
1110 if (WARN_ON(!kgd
|| !vm
))
1113 pr_debug("Destroying process vm %p\n", vm
);
1115 /* Release the VM context */
1116 amdgpu_vm_fini(adev
, avm
);
1120 uint32_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm
)
1122 struct amdgpu_vm
*avm
= (struct amdgpu_vm
*)vm
;
1124 return avm
->pd_phys_addr
>> AMDGPU_GPU_PAGE_SHIFT
;
1127 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1128 struct kgd_dev
*kgd
, uint64_t va
, uint64_t size
,
1129 void *vm
, struct kgd_mem
**mem
,
1130 uint64_t *offset
, uint32_t flags
)
1132 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
1133 struct amdgpu_vm
*avm
= (struct amdgpu_vm
*)vm
;
1134 uint64_t user_addr
= 0;
1135 struct amdgpu_bo
*bo
;
1136 struct amdgpu_bo_param bp
;
1138 u32 domain
, alloc_domain
;
1140 uint32_t mapping_flags
;
1144 * Check on which domain to allocate BO
1146 if (flags
& ALLOC_MEM_FLAGS_VRAM
) {
1147 domain
= alloc_domain
= AMDGPU_GEM_DOMAIN_VRAM
;
1148 alloc_flags
= AMDGPU_GEM_CREATE_VRAM_CLEARED
;
1149 alloc_flags
|= (flags
& ALLOC_MEM_FLAGS_PUBLIC
) ?
1150 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
:
1151 AMDGPU_GEM_CREATE_NO_CPU_ACCESS
;
1152 } else if (flags
& ALLOC_MEM_FLAGS_GTT
) {
1153 domain
= alloc_domain
= AMDGPU_GEM_DOMAIN_GTT
;
1155 } else if (flags
& ALLOC_MEM_FLAGS_USERPTR
) {
1156 domain
= AMDGPU_GEM_DOMAIN_GTT
;
1157 alloc_domain
= AMDGPU_GEM_DOMAIN_CPU
;
1159 if (!offset
|| !*offset
)
1161 user_addr
= *offset
;
1166 *mem
= kzalloc(sizeof(struct kgd_mem
), GFP_KERNEL
);
1169 INIT_LIST_HEAD(&(*mem
)->bo_va_list
);
1170 mutex_init(&(*mem
)->lock
);
1171 (*mem
)->aql_queue
= !!(flags
& ALLOC_MEM_FLAGS_AQL_QUEUE_MEM
);
1173 /* Workaround for AQL queue wraparound bug. Map the same
1174 * memory twice. That means we only actually allocate half
1177 if ((*mem
)->aql_queue
)
1180 /* Workaround for TLB bug on older VI chips */
1181 byte_align
= (adev
->family
== AMDGPU_FAMILY_VI
&&
1182 adev
->asic_type
!= CHIP_FIJI
&&
1183 adev
->asic_type
!= CHIP_POLARIS10
&&
1184 adev
->asic_type
!= CHIP_POLARIS11
) ?
1185 VI_BO_SIZE_ALIGN
: 1;
1187 mapping_flags
= AMDGPU_VM_PAGE_READABLE
;
1188 if (flags
& ALLOC_MEM_FLAGS_WRITABLE
)
1189 mapping_flags
|= AMDGPU_VM_PAGE_WRITEABLE
;
1190 if (flags
& ALLOC_MEM_FLAGS_EXECUTABLE
)
1191 mapping_flags
|= AMDGPU_VM_PAGE_EXECUTABLE
;
1192 if (flags
& ALLOC_MEM_FLAGS_COHERENT
)
1193 mapping_flags
|= AMDGPU_VM_MTYPE_UC
;
1195 mapping_flags
|= AMDGPU_VM_MTYPE_NC
;
1196 (*mem
)->mapping_flags
= mapping_flags
;
1198 amdgpu_sync_create(&(*mem
)->sync
);
1200 ret
= amdgpu_amdkfd_reserve_system_mem_limit(adev
, size
, alloc_domain
);
1202 pr_debug("Insufficient system memory\n");
1203 goto err_reserve_system_mem
;
1206 pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1207 va
, size
, domain_string(alloc_domain
));
1209 memset(&bp
, 0, sizeof(bp
));
1211 bp
.byte_align
= byte_align
;
1212 bp
.domain
= alloc_domain
;
1213 bp
.flags
= alloc_flags
;
1214 bp
.type
= ttm_bo_type_device
;
1216 ret
= amdgpu_bo_create(adev
, &bp
, &bo
);
1218 pr_debug("Failed to create BO on domain %s. ret %d\n",
1219 domain_string(alloc_domain
), ret
);
1225 bo
->flags
|= AMDGPU_AMDKFD_USERPTR_BO
;
1228 (*mem
)->domain
= domain
;
1229 (*mem
)->mapped_to_gpu_memory
= 0;
1230 (*mem
)->process_info
= avm
->process_info
;
1231 add_kgd_mem_to_kfd_bo_list(*mem
, avm
->process_info
, user_addr
);
1234 ret
= init_user_pages(*mem
, current
->mm
, user_addr
);
1236 mutex_lock(&avm
->process_info
->lock
);
1237 list_del(&(*mem
)->validate_list
.head
);
1238 mutex_unlock(&avm
->process_info
->lock
);
1239 goto allocate_init_user_pages_failed
;
1244 *offset
= amdgpu_bo_mmap_offset(bo
);
1248 allocate_init_user_pages_failed
:
1249 amdgpu_bo_unref(&bo
);
1250 /* Don't unreserve system mem limit twice */
1251 goto err_reserve_system_mem
;
1253 unreserve_system_mem_limit(adev
, size
, alloc_domain
);
1254 err_reserve_system_mem
:
1255 mutex_destroy(&(*mem
)->lock
);
1260 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1261 struct kgd_dev
*kgd
, struct kgd_mem
*mem
)
1263 struct amdkfd_process_info
*process_info
= mem
->process_info
;
1264 unsigned long bo_size
= mem
->bo
->tbo
.mem
.size
;
1265 struct kfd_bo_va_list
*entry
, *tmp
;
1266 struct bo_vm_reservation_context ctx
;
1267 struct ttm_validate_buffer
*bo_list_entry
;
1270 mutex_lock(&mem
->lock
);
1272 if (mem
->mapped_to_gpu_memory
> 0) {
1273 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1275 mutex_unlock(&mem
->lock
);
1279 mutex_unlock(&mem
->lock
);
1280 /* lock is not needed after this, since mem is unused and will
1284 /* No more MMU notifiers */
1285 amdgpu_mn_unregister(mem
->bo
);
1287 /* Make sure restore workers don't access the BO any more */
1288 bo_list_entry
= &mem
->validate_list
;
1289 mutex_lock(&process_info
->lock
);
1290 list_del(&bo_list_entry
->head
);
1291 mutex_unlock(&process_info
->lock
);
1293 /* Free user pages if necessary */
1294 if (mem
->user_pages
) {
1295 pr_debug("%s: Freeing user_pages array\n", __func__
);
1296 if (mem
->user_pages
[0])
1297 release_pages(mem
->user_pages
,
1298 mem
->bo
->tbo
.ttm
->num_pages
);
1299 kvfree(mem
->user_pages
);
1302 ret
= reserve_bo_and_cond_vms(mem
, NULL
, BO_VM_ALL
, &ctx
);
1306 /* The eviction fence should be removed by the last unmap.
1307 * TODO: Log an error condition if the bo still has the eviction fence
1310 amdgpu_amdkfd_remove_eviction_fence(mem
->bo
,
1311 process_info
->eviction_fence
,
1313 pr_debug("Release VA 0x%llx - 0x%llx\n", mem
->va
,
1314 mem
->va
+ bo_size
* (1 + mem
->aql_queue
));
1316 /* Remove from VM internal data structures */
1317 list_for_each_entry_safe(entry
, tmp
, &mem
->bo_va_list
, bo_list
)
1318 remove_bo_from_vm((struct amdgpu_device
*)entry
->kgd_dev
,
1321 ret
= unreserve_bo_and_vms(&ctx
, false, false);
1323 /* Free the sync object */
1324 amdgpu_sync_free(&mem
->sync
);
1327 amdgpu_bo_unref(&mem
->bo
);
1328 mutex_destroy(&mem
->lock
);
1334 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1335 struct kgd_dev
*kgd
, struct kgd_mem
*mem
, void *vm
)
1337 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
1338 struct amdgpu_vm
*avm
= (struct amdgpu_vm
*)vm
;
1340 struct amdgpu_bo
*bo
;
1342 struct kfd_bo_va_list
*entry
;
1343 struct bo_vm_reservation_context ctx
;
1344 struct kfd_bo_va_list
*bo_va_entry
= NULL
;
1345 struct kfd_bo_va_list
*bo_va_entry_aql
= NULL
;
1346 unsigned long bo_size
;
1347 bool is_invalid_userptr
= false;
1351 pr_err("Invalid BO when mapping memory to GPU\n");
1355 /* Make sure restore is not running concurrently. Since we
1356 * don't map invalid userptr BOs, we rely on the next restore
1357 * worker to do the mapping
1359 mutex_lock(&mem
->process_info
->lock
);
1361 /* Lock mmap-sem. If we find an invalid userptr BO, we can be
1362 * sure that the MMU notifier is no longer running
1363 * concurrently and the queues are actually stopped
1365 if (amdgpu_ttm_tt_get_usermm(bo
->tbo
.ttm
)) {
1366 down_write(¤t
->mm
->mmap_sem
);
1367 is_invalid_userptr
= atomic_read(&mem
->invalid
);
1368 up_write(¤t
->mm
->mmap_sem
);
1371 mutex_lock(&mem
->lock
);
1373 domain
= mem
->domain
;
1374 bo_size
= bo
->tbo
.mem
.size
;
1376 pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1378 mem
->va
+ bo_size
* (1 + mem
->aql_queue
),
1379 vm
, domain_string(domain
));
1381 ret
= reserve_bo_and_vm(mem
, vm
, &ctx
);
1385 /* Userptr can be marked as "not invalid", but not actually be
1386 * validated yet (still in the system domain). In that case
1387 * the queues are still stopped and we can leave mapping for
1388 * the next restore worker
1390 if (bo
->tbo
.mem
.mem_type
== TTM_PL_SYSTEM
)
1391 is_invalid_userptr
= true;
1393 if (check_if_add_bo_to_vm(avm
, mem
)) {
1394 ret
= add_bo_to_vm(adev
, mem
, avm
, false,
1397 goto add_bo_to_vm_failed
;
1398 if (mem
->aql_queue
) {
1399 ret
= add_bo_to_vm(adev
, mem
, avm
,
1400 true, &bo_va_entry_aql
);
1402 goto add_bo_to_vm_failed_aql
;
1405 ret
= vm_validate_pt_pd_bos(avm
);
1407 goto add_bo_to_vm_failed
;
1410 if (mem
->mapped_to_gpu_memory
== 0 &&
1411 !amdgpu_ttm_tt_get_usermm(bo
->tbo
.ttm
)) {
1412 /* Validate BO only once. The eviction fence gets added to BO
1413 * the first time it is mapped. Validate will wait for all
1414 * background evictions to complete.
1416 ret
= amdgpu_amdkfd_bo_validate(bo
, domain
, true);
1418 pr_debug("Validate failed\n");
1419 goto map_bo_to_gpuvm_failed
;
1423 list_for_each_entry(entry
, &mem
->bo_va_list
, bo_list
) {
1424 if (entry
->bo_va
->base
.vm
== vm
&& !entry
->is_mapped
) {
1425 pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1426 entry
->va
, entry
->va
+ bo_size
,
1429 ret
= map_bo_to_gpuvm(adev
, entry
, ctx
.sync
,
1430 is_invalid_userptr
);
1432 pr_err("Failed to map radeon bo to gpuvm\n");
1433 goto map_bo_to_gpuvm_failed
;
1436 ret
= vm_update_pds(vm
, ctx
.sync
);
1438 pr_err("Failed to update page directories\n");
1439 goto map_bo_to_gpuvm_failed
;
1442 entry
->is_mapped
= true;
1443 mem
->mapped_to_gpu_memory
++;
1444 pr_debug("\t INC mapping count %d\n",
1445 mem
->mapped_to_gpu_memory
);
1449 if (!amdgpu_ttm_tt_get_usermm(bo
->tbo
.ttm
) && !bo
->pin_count
)
1451 &avm
->process_info
->eviction_fence
->base
,
1453 ret
= unreserve_bo_and_vms(&ctx
, false, false);
1457 map_bo_to_gpuvm_failed
:
1458 if (bo_va_entry_aql
)
1459 remove_bo_from_vm(adev
, bo_va_entry_aql
, bo_size
);
1460 add_bo_to_vm_failed_aql
:
1462 remove_bo_from_vm(adev
, bo_va_entry
, bo_size
);
1463 add_bo_to_vm_failed
:
1464 unreserve_bo_and_vms(&ctx
, false, false);
1466 mutex_unlock(&mem
->process_info
->lock
);
1467 mutex_unlock(&mem
->lock
);
1471 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1472 struct kgd_dev
*kgd
, struct kgd_mem
*mem
, void *vm
)
1474 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
1475 struct amdkfd_process_info
*process_info
=
1476 ((struct amdgpu_vm
*)vm
)->process_info
;
1477 unsigned long bo_size
= mem
->bo
->tbo
.mem
.size
;
1478 struct kfd_bo_va_list
*entry
;
1479 struct bo_vm_reservation_context ctx
;
1482 mutex_lock(&mem
->lock
);
1484 ret
= reserve_bo_and_cond_vms(mem
, vm
, BO_VM_MAPPED
, &ctx
);
1487 /* If no VMs were reserved, it means the BO wasn't actually mapped */
1488 if (ctx
.n_vms
== 0) {
1493 ret
= vm_validate_pt_pd_bos((struct amdgpu_vm
*)vm
);
1497 pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1499 mem
->va
+ bo_size
* (1 + mem
->aql_queue
),
1502 list_for_each_entry(entry
, &mem
->bo_va_list
, bo_list
) {
1503 if (entry
->bo_va
->base
.vm
== vm
&& entry
->is_mapped
) {
1504 pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1506 entry
->va
+ bo_size
,
1509 ret
= unmap_bo_from_gpuvm(adev
, entry
, ctx
.sync
);
1511 entry
->is_mapped
= false;
1513 pr_err("failed to unmap VA 0x%llx\n",
1518 mem
->mapped_to_gpu_memory
--;
1519 pr_debug("\t DEC mapping count %d\n",
1520 mem
->mapped_to_gpu_memory
);
1524 /* If BO is unmapped from all VMs, unfence it. It can be evicted if
1527 if (mem
->mapped_to_gpu_memory
== 0 &&
1528 !amdgpu_ttm_tt_get_usermm(mem
->bo
->tbo
.ttm
) && !mem
->bo
->pin_count
)
1529 amdgpu_amdkfd_remove_eviction_fence(mem
->bo
,
1530 process_info
->eviction_fence
,
1534 unreserve_bo_and_vms(&ctx
, false, false);
1536 mutex_unlock(&mem
->lock
);
1540 int amdgpu_amdkfd_gpuvm_sync_memory(
1541 struct kgd_dev
*kgd
, struct kgd_mem
*mem
, bool intr
)
1543 struct amdgpu_sync sync
;
1546 amdgpu_sync_create(&sync
);
1548 mutex_lock(&mem
->lock
);
1549 amdgpu_sync_clone(&mem
->sync
, &sync
);
1550 mutex_unlock(&mem
->lock
);
1552 ret
= amdgpu_sync_wait(&sync
, intr
);
1553 amdgpu_sync_free(&sync
);
1557 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev
*kgd
,
1558 struct kgd_mem
*mem
, void **kptr
, uint64_t *size
)
1561 struct amdgpu_bo
*bo
= mem
->bo
;
1563 if (amdgpu_ttm_tt_get_usermm(bo
->tbo
.ttm
)) {
1564 pr_err("userptr can't be mapped to kernel\n");
1568 /* delete kgd_mem from kfd_bo_list to avoid re-validating
1569 * this BO in BO's restoring after eviction.
1571 mutex_lock(&mem
->process_info
->lock
);
1573 ret
= amdgpu_bo_reserve(bo
, true);
1575 pr_err("Failed to reserve bo. ret %d\n", ret
);
1576 goto bo_reserve_failed
;
1579 ret
= amdgpu_bo_pin(bo
, AMDGPU_GEM_DOMAIN_GTT
);
1581 pr_err("Failed to pin bo. ret %d\n", ret
);
1585 ret
= amdgpu_bo_kmap(bo
, kptr
);
1587 pr_err("Failed to map bo to kernel. ret %d\n", ret
);
1591 amdgpu_amdkfd_remove_eviction_fence(
1592 bo
, mem
->process_info
->eviction_fence
, NULL
, NULL
);
1593 list_del_init(&mem
->validate_list
.head
);
1596 *size
= amdgpu_bo_size(bo
);
1598 amdgpu_bo_unreserve(bo
);
1600 mutex_unlock(&mem
->process_info
->lock
);
1604 amdgpu_bo_unpin(bo
);
1606 amdgpu_bo_unreserve(bo
);
1608 mutex_unlock(&mem
->process_info
->lock
);
1613 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev
*kgd
,
1614 struct kfd_vm_fault_info
*mem
)
1616 struct amdgpu_device
*adev
;
1618 adev
= (struct amdgpu_device
*)kgd
;
1619 if (atomic_read(&adev
->gmc
.vm_fault_info_updated
) == 1) {
1620 *mem
= *adev
->gmc
.vm_fault_info
;
1622 atomic_set(&adev
->gmc
.vm_fault_info_updated
, 0);
1627 /* Evict a userptr BO by stopping the queues if necessary
1629 * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
1630 * cannot do any memory allocations, and cannot take any locks that
1631 * are held elsewhere while allocating memory. Therefore this is as
1632 * simple as possible, using atomic counters.
1634 * It doesn't do anything to the BO itself. The real work happens in
1635 * restore, where we get updated page addresses. This function only
1636 * ensures that GPU access to the BO is stopped.
1638 int amdgpu_amdkfd_evict_userptr(struct kgd_mem
*mem
,
1639 struct mm_struct
*mm
)
1641 struct amdkfd_process_info
*process_info
= mem
->process_info
;
1642 int invalid
, evicted_bos
;
1645 invalid
= atomic_inc_return(&mem
->invalid
);
1646 evicted_bos
= atomic_inc_return(&process_info
->evicted_bos
);
1647 if (evicted_bos
== 1) {
1648 /* First eviction, stop the queues */
1649 r
= kgd2kfd
->quiesce_mm(mm
);
1651 pr_err("Failed to quiesce KFD\n");
1652 schedule_delayed_work(&process_info
->restore_userptr_work
,
1653 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS
));
1659 /* Update invalid userptr BOs
1661 * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
1662 * userptr_inval_list and updates user pages for all BOs that have
1663 * been invalidated since their last update.
1665 static int update_invalid_user_pages(struct amdkfd_process_info
*process_info
,
1666 struct mm_struct
*mm
)
1668 struct kgd_mem
*mem
, *tmp_mem
;
1669 struct amdgpu_bo
*bo
;
1670 struct ttm_operation_ctx ctx
= { false, false };
1673 /* Move all invalidated BOs to the userptr_inval_list and
1674 * release their user pages by migration to the CPU domain
1676 list_for_each_entry_safe(mem
, tmp_mem
,
1677 &process_info
->userptr_valid_list
,
1678 validate_list
.head
) {
1679 if (!atomic_read(&mem
->invalid
))
1680 continue; /* BO is still valid */
1684 if (amdgpu_bo_reserve(bo
, true))
1686 amdgpu_bo_placement_from_domain(bo
, AMDGPU_GEM_DOMAIN_CPU
);
1687 ret
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, &ctx
);
1688 amdgpu_bo_unreserve(bo
);
1690 pr_err("%s: Failed to invalidate userptr BO\n",
1695 list_move_tail(&mem
->validate_list
.head
,
1696 &process_info
->userptr_inval_list
);
1699 if (list_empty(&process_info
->userptr_inval_list
))
1700 return 0; /* All evicted userptr BOs were freed */
1702 /* Go through userptr_inval_list and update any invalid user_pages */
1703 list_for_each_entry(mem
, &process_info
->userptr_inval_list
,
1704 validate_list
.head
) {
1705 invalid
= atomic_read(&mem
->invalid
);
1707 /* BO hasn't been invalidated since the last
1708 * revalidation attempt. Keep its BO list.
1714 if (!mem
->user_pages
) {
1716 kvmalloc_array(bo
->tbo
.ttm
->num_pages
,
1717 sizeof(struct page
*),
1718 GFP_KERNEL
| __GFP_ZERO
);
1719 if (!mem
->user_pages
) {
1720 pr_err("%s: Failed to allocate pages array\n",
1724 } else if (mem
->user_pages
[0]) {
1725 release_pages(mem
->user_pages
, bo
->tbo
.ttm
->num_pages
);
1728 /* Get updated user pages */
1729 ret
= amdgpu_ttm_tt_get_user_pages(bo
->tbo
.ttm
,
1732 mem
->user_pages
[0] = NULL
;
1733 pr_info("%s: Failed to get user pages: %d\n",
1735 /* Pretend it succeeded. It will fail later
1736 * with a VM fault if the GPU tries to access
1737 * it. Better than hanging indefinitely with
1738 * stalled user mode queues.
1742 /* Mark the BO as valid unless it was invalidated
1743 * again concurrently
1745 if (atomic_cmpxchg(&mem
->invalid
, invalid
, 0) != invalid
)
1752 /* Validate invalid userptr BOs
1754 * Validates BOs on the userptr_inval_list, and moves them back to the
1755 * userptr_valid_list. Also updates GPUVM page tables with new page
1756 * addresses and waits for the page table updates to complete.
1758 static int validate_invalid_user_pages(struct amdkfd_process_info
*process_info
)
1760 struct amdgpu_bo_list_entry
*pd_bo_list_entries
;
1761 struct list_head resv_list
, duplicates
;
1762 struct ww_acquire_ctx ticket
;
1763 struct amdgpu_sync sync
;
1765 struct amdgpu_vm
*peer_vm
;
1766 struct kgd_mem
*mem
, *tmp_mem
;
1767 struct amdgpu_bo
*bo
;
1768 struct ttm_operation_ctx ctx
= { false, false };
1771 pd_bo_list_entries
= kcalloc(process_info
->n_vms
,
1772 sizeof(struct amdgpu_bo_list_entry
),
1774 if (!pd_bo_list_entries
) {
1775 pr_err("%s: Failed to allocate PD BO list entries\n", __func__
);
1779 INIT_LIST_HEAD(&resv_list
);
1780 INIT_LIST_HEAD(&duplicates
);
1782 /* Get all the page directory BOs that need to be reserved */
1784 list_for_each_entry(peer_vm
, &process_info
->vm_list_head
,
1786 amdgpu_vm_get_pd_bo(peer_vm
, &resv_list
,
1787 &pd_bo_list_entries
[i
++]);
1788 /* Add the userptr_inval_list entries to resv_list */
1789 list_for_each_entry(mem
, &process_info
->userptr_inval_list
,
1790 validate_list
.head
) {
1791 list_add_tail(&mem
->resv_list
.head
, &resv_list
);
1792 mem
->resv_list
.bo
= mem
->validate_list
.bo
;
1793 mem
->resv_list
.shared
= mem
->validate_list
.shared
;
1796 /* Reserve all BOs and page tables for validation */
1797 ret
= ttm_eu_reserve_buffers(&ticket
, &resv_list
, false, &duplicates
);
1798 WARN(!list_empty(&duplicates
), "Duplicates should be empty");
1802 amdgpu_sync_create(&sync
);
1804 /* Avoid triggering eviction fences when unmapping invalid
1805 * userptr BOs (waits for all fences, doesn't use
1808 list_for_each_entry(peer_vm
, &process_info
->vm_list_head
,
1810 amdgpu_amdkfd_remove_eviction_fence(peer_vm
->root
.base
.bo
,
1811 process_info
->eviction_fence
,
1814 ret
= process_validate_vms(process_info
);
1818 /* Validate BOs and update GPUVM page tables */
1819 list_for_each_entry_safe(mem
, tmp_mem
,
1820 &process_info
->userptr_inval_list
,
1821 validate_list
.head
) {
1822 struct kfd_bo_va_list
*bo_va_entry
;
1826 /* Copy pages array and validate the BO if we got user pages */
1827 if (mem
->user_pages
[0]) {
1828 amdgpu_ttm_tt_set_user_pages(bo
->tbo
.ttm
,
1830 amdgpu_bo_placement_from_domain(bo
, mem
->domain
);
1831 ret
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, &ctx
);
1833 pr_err("%s: failed to validate BO\n", __func__
);
1838 /* Validate succeeded, now the BO owns the pages, free
1839 * our copy of the pointer array. Put this BO back on
1840 * the userptr_valid_list. If we need to revalidate
1841 * it, we need to start from scratch.
1843 kvfree(mem
->user_pages
);
1844 mem
->user_pages
= NULL
;
1845 list_move_tail(&mem
->validate_list
.head
,
1846 &process_info
->userptr_valid_list
);
1848 /* Update mapping. If the BO was not validated
1849 * (because we couldn't get user pages), this will
1850 * clear the page table entries, which will result in
1851 * VM faults if the GPU tries to access the invalid
1854 list_for_each_entry(bo_va_entry
, &mem
->bo_va_list
, bo_list
) {
1855 if (!bo_va_entry
->is_mapped
)
1858 ret
= update_gpuvm_pte((struct amdgpu_device
*)
1859 bo_va_entry
->kgd_dev
,
1860 bo_va_entry
, &sync
);
1862 pr_err("%s: update PTE failed\n", __func__
);
1863 /* make sure this gets validated again */
1864 atomic_inc(&mem
->invalid
);
1870 /* Update page directories */
1871 ret
= process_update_pds(process_info
, &sync
);
1874 list_for_each_entry(peer_vm
, &process_info
->vm_list_head
,
1876 amdgpu_bo_fence(peer_vm
->root
.base
.bo
,
1877 &process_info
->eviction_fence
->base
, true);
1878 ttm_eu_backoff_reservation(&ticket
, &resv_list
);
1879 amdgpu_sync_wait(&sync
, false);
1880 amdgpu_sync_free(&sync
);
1882 kfree(pd_bo_list_entries
);
1887 /* Worker callback to restore evicted userptr BOs
1889 * Tries to update and validate all userptr BOs. If successful and no
1890 * concurrent evictions happened, the queues are restarted. Otherwise,
1891 * reschedule for another attempt later.
1893 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct
*work
)
1895 struct delayed_work
*dwork
= to_delayed_work(work
);
1896 struct amdkfd_process_info
*process_info
=
1897 container_of(dwork
, struct amdkfd_process_info
,
1898 restore_userptr_work
);
1899 struct task_struct
*usertask
;
1900 struct mm_struct
*mm
;
1903 evicted_bos
= atomic_read(&process_info
->evicted_bos
);
1907 /* Reference task and mm in case of concurrent process termination */
1908 usertask
= get_pid_task(process_info
->pid
, PIDTYPE_PID
);
1911 mm
= get_task_mm(usertask
);
1913 put_task_struct(usertask
);
1917 mutex_lock(&process_info
->lock
);
1919 if (update_invalid_user_pages(process_info
, mm
))
1921 /* userptr_inval_list can be empty if all evicted userptr BOs
1922 * have been freed. In that case there is nothing to validate
1923 * and we can just restart the queues.
1925 if (!list_empty(&process_info
->userptr_inval_list
)) {
1926 if (atomic_read(&process_info
->evicted_bos
) != evicted_bos
)
1927 goto unlock_out
; /* Concurrent eviction, try again */
1929 if (validate_invalid_user_pages(process_info
))
1932 /* Final check for concurrent evicton and atomic update. If
1933 * another eviction happens after successful update, it will
1934 * be a first eviction that calls quiesce_mm. The eviction
1935 * reference counting inside KFD will handle this case.
1937 if (atomic_cmpxchg(&process_info
->evicted_bos
, evicted_bos
, 0) !=
1941 if (kgd2kfd
->resume_mm(mm
)) {
1942 pr_err("%s: Failed to resume KFD\n", __func__
);
1943 /* No recovery from this failure. Probably the CP is
1944 * hanging. No point trying again.
1948 mutex_unlock(&process_info
->lock
);
1950 put_task_struct(usertask
);
1952 /* If validation failed, reschedule another attempt */
1954 schedule_delayed_work(&process_info
->restore_userptr_work
,
1955 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS
));
1958 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
1959 * KFD process identified by process_info
1961 * @process_info: amdkfd_process_info of the KFD process
1963 * After memory eviction, restore thread calls this function. The function
1964 * should be called when the Process is still valid. BO restore involves -
1966 * 1. Release old eviction fence and create new one
1967 * 2. Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
1968 * 3 Use the second PD list and kfd_bo_list to create a list (ctx.list) of
1969 * BOs that need to be reserved.
1970 * 4. Reserve all the BOs
1971 * 5. Validate of PD and PT BOs.
1972 * 6. Validate all KFD BOs using kfd_bo_list and Map them and add new fence
1973 * 7. Add fence to all PD and PT BOs.
1974 * 8. Unreserve all BOs
1976 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info
, struct dma_fence
**ef
)
1978 struct amdgpu_bo_list_entry
*pd_bo_list
;
1979 struct amdkfd_process_info
*process_info
= info
;
1980 struct amdgpu_vm
*peer_vm
;
1981 struct kgd_mem
*mem
;
1982 struct bo_vm_reservation_context ctx
;
1983 struct amdgpu_amdkfd_fence
*new_fence
;
1985 struct list_head duplicate_save
;
1986 struct amdgpu_sync sync_obj
;
1988 INIT_LIST_HEAD(&duplicate_save
);
1989 INIT_LIST_HEAD(&ctx
.list
);
1990 INIT_LIST_HEAD(&ctx
.duplicates
);
1992 pd_bo_list
= kcalloc(process_info
->n_vms
,
1993 sizeof(struct amdgpu_bo_list_entry
),
1999 mutex_lock(&process_info
->lock
);
2000 list_for_each_entry(peer_vm
, &process_info
->vm_list_head
,
2002 amdgpu_vm_get_pd_bo(peer_vm
, &ctx
.list
, &pd_bo_list
[i
++]);
2004 /* Reserve all BOs and page tables/directory. Add all BOs from
2005 * kfd_bo_list to ctx.list
2007 list_for_each_entry(mem
, &process_info
->kfd_bo_list
,
2008 validate_list
.head
) {
2010 list_add_tail(&mem
->resv_list
.head
, &ctx
.list
);
2011 mem
->resv_list
.bo
= mem
->validate_list
.bo
;
2012 mem
->resv_list
.shared
= mem
->validate_list
.shared
;
2015 ret
= ttm_eu_reserve_buffers(&ctx
.ticket
, &ctx
.list
,
2016 false, &duplicate_save
);
2018 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2019 goto ttm_reserve_fail
;
2022 amdgpu_sync_create(&sync_obj
);
2024 /* Validate PDs and PTs */
2025 ret
= process_validate_vms(process_info
);
2027 goto validate_map_fail
;
2029 /* Wait for PD/PTs validate to finish */
2030 /* FIXME: I think this isn't needed */
2031 list_for_each_entry(peer_vm
, &process_info
->vm_list_head
,
2033 struct amdgpu_bo
*bo
= peer_vm
->root
.base
.bo
;
2035 ttm_bo_wait(&bo
->tbo
, false, false);
2038 /* Validate BOs and map them to GPUVM (update VM page tables). */
2039 list_for_each_entry(mem
, &process_info
->kfd_bo_list
,
2040 validate_list
.head
) {
2042 struct amdgpu_bo
*bo
= mem
->bo
;
2043 uint32_t domain
= mem
->domain
;
2044 struct kfd_bo_va_list
*bo_va_entry
;
2046 ret
= amdgpu_amdkfd_bo_validate(bo
, domain
, false);
2048 pr_debug("Memory eviction: Validate BOs failed. Try again\n");
2049 goto validate_map_fail
;
2052 list_for_each_entry(bo_va_entry
, &mem
->bo_va_list
,
2054 ret
= update_gpuvm_pte((struct amdgpu_device
*)
2055 bo_va_entry
->kgd_dev
,
2059 pr_debug("Memory eviction: update PTE failed. Try again\n");
2060 goto validate_map_fail
;
2065 /* Update page directories */
2066 ret
= process_update_pds(process_info
, &sync_obj
);
2068 pr_debug("Memory eviction: update PDs failed. Try again\n");
2069 goto validate_map_fail
;
2072 amdgpu_sync_wait(&sync_obj
, false);
2074 /* Release old eviction fence and create new one, because fence only
2075 * goes from unsignaled to signaled, fence cannot be reused.
2076 * Use context and mm from the old fence.
2078 new_fence
= amdgpu_amdkfd_fence_create(
2079 process_info
->eviction_fence
->base
.context
,
2080 process_info
->eviction_fence
->mm
);
2082 pr_err("Failed to create eviction fence\n");
2084 goto validate_map_fail
;
2086 dma_fence_put(&process_info
->eviction_fence
->base
);
2087 process_info
->eviction_fence
= new_fence
;
2088 *ef
= dma_fence_get(&new_fence
->base
);
2090 /* Wait for validate to finish and attach new eviction fence */
2091 list_for_each_entry(mem
, &process_info
->kfd_bo_list
,
2093 ttm_bo_wait(&mem
->bo
->tbo
, false, false);
2094 list_for_each_entry(mem
, &process_info
->kfd_bo_list
,
2096 amdgpu_bo_fence(mem
->bo
,
2097 &process_info
->eviction_fence
->base
, true);
2099 /* Attach eviction fence to PD / PT BOs */
2100 list_for_each_entry(peer_vm
, &process_info
->vm_list_head
,
2102 struct amdgpu_bo
*bo
= peer_vm
->root
.base
.bo
;
2104 amdgpu_bo_fence(bo
, &process_info
->eviction_fence
->base
, true);
2108 ttm_eu_backoff_reservation(&ctx
.ticket
, &ctx
.list
);
2109 amdgpu_sync_free(&sync_obj
);
2111 mutex_unlock(&process_info
->lock
);