treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_amdkfd_gpuvm.c
blobb2487f4f271b1d87d23596b6a884abcab62e56c1
1 /*
2 * Copyright 2014-2018 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 #include <linux/dma-buf.h>
23 #include <linux/list.h>
24 #include <linux/pagemap.h>
25 #include <linux/sched/mm.h>
26 #include <linux/sched/task.h>
28 #include "amdgpu_object.h"
29 #include "amdgpu_vm.h"
30 #include "amdgpu_amdkfd.h"
31 #include "amdgpu_dma_buf.h"
33 /* BO flag to indicate a KFD userptr BO */
34 #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
36 /* Userptr restore delay, just long enough to allow consecutive VM
37 * changes to accumulate
39 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
41 /* Impose limit on how much memory KFD can use */
42 static struct {
43 uint64_t max_system_mem_limit;
44 uint64_t max_ttm_mem_limit;
45 int64_t system_mem_used;
46 int64_t ttm_mem_used;
47 spinlock_t mem_limit_lock;
48 } kfd_mem_limit;
50 /* Struct used for amdgpu_amdkfd_bo_validate */
51 struct amdgpu_vm_parser {
52 uint32_t domain;
53 bool wait;
56 static const char * const domain_bit_to_string[] = {
57 "CPU",
58 "GTT",
59 "VRAM",
60 "GDS",
61 "GWS",
62 "OA"
65 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
67 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
70 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
72 return (struct amdgpu_device *)kgd;
75 static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
76 struct kgd_mem *mem)
78 struct kfd_bo_va_list *entry;
80 list_for_each_entry(entry, &mem->bo_va_list, bo_list)
81 if (entry->bo_va->base.vm == avm)
82 return false;
84 return true;
87 /* Set memory usage limits. Current, limits are
88 * System (TTM + userptr) memory - 15/16th System RAM
89 * TTM memory - 3/8th System RAM
91 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
93 struct sysinfo si;
94 uint64_t mem;
96 si_meminfo(&si);
97 mem = si.totalram - si.totalhigh;
98 mem *= si.mem_unit;
100 spin_lock_init(&kfd_mem_limit.mem_limit_lock);
101 kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4);
102 kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
103 pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
104 (kfd_mem_limit.max_system_mem_limit >> 20),
105 (kfd_mem_limit.max_ttm_mem_limit >> 20));
108 /* Estimate page table size needed to represent a given memory size
110 * With 4KB pages, we need one 8 byte PTE for each 4KB of memory
111 * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB
112 * of memory (factor 256K, >> 18). ROCm user mode tries to optimize
113 * for 2MB pages for TLB efficiency. However, small allocations and
114 * fragmented system memory still need some 4KB pages. We choose a
115 * compromise that should work in most cases without reserving too
116 * much memory for page tables unnecessarily (factor 16K, >> 14).
118 #define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14)
120 static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
121 uint64_t size, u32 domain, bool sg)
123 uint64_t reserved_for_pt =
124 ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
125 size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
126 int ret = 0;
128 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
129 sizeof(struct amdgpu_bo));
131 vram_needed = 0;
132 if (domain == AMDGPU_GEM_DOMAIN_GTT) {
133 /* TTM GTT memory */
134 system_mem_needed = acc_size + size;
135 ttm_mem_needed = acc_size + size;
136 } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
137 /* Userptr */
138 system_mem_needed = acc_size + size;
139 ttm_mem_needed = acc_size;
140 } else {
141 /* VRAM and SG */
142 system_mem_needed = acc_size;
143 ttm_mem_needed = acc_size;
144 if (domain == AMDGPU_GEM_DOMAIN_VRAM)
145 vram_needed = size;
148 spin_lock(&kfd_mem_limit.mem_limit_lock);
150 if ((kfd_mem_limit.system_mem_used + system_mem_needed >
151 kfd_mem_limit.max_system_mem_limit) ||
152 (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
153 kfd_mem_limit.max_ttm_mem_limit) ||
154 (adev->kfd.vram_used + vram_needed >
155 adev->gmc.real_vram_size - reserved_for_pt)) {
156 ret = -ENOMEM;
157 } else {
158 kfd_mem_limit.system_mem_used += system_mem_needed;
159 kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
160 adev->kfd.vram_used += vram_needed;
163 spin_unlock(&kfd_mem_limit.mem_limit_lock);
164 return ret;
167 static void unreserve_mem_limit(struct amdgpu_device *adev,
168 uint64_t size, u32 domain, bool sg)
170 size_t acc_size;
172 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
173 sizeof(struct amdgpu_bo));
175 spin_lock(&kfd_mem_limit.mem_limit_lock);
176 if (domain == AMDGPU_GEM_DOMAIN_GTT) {
177 kfd_mem_limit.system_mem_used -= (acc_size + size);
178 kfd_mem_limit.ttm_mem_used -= (acc_size + size);
179 } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
180 kfd_mem_limit.system_mem_used -= (acc_size + size);
181 kfd_mem_limit.ttm_mem_used -= acc_size;
182 } else {
183 kfd_mem_limit.system_mem_used -= acc_size;
184 kfd_mem_limit.ttm_mem_used -= acc_size;
185 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
186 adev->kfd.vram_used -= size;
187 WARN_ONCE(adev->kfd.vram_used < 0,
188 "kfd VRAM memory accounting unbalanced");
191 WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
192 "kfd system memory accounting unbalanced");
193 WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
194 "kfd TTM memory accounting unbalanced");
196 spin_unlock(&kfd_mem_limit.mem_limit_lock);
199 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
201 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
202 u32 domain = bo->preferred_domains;
203 bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
205 if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) {
206 domain = AMDGPU_GEM_DOMAIN_CPU;
207 sg = false;
210 unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
214 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
215 * reservation object.
217 * @bo: [IN] Remove eviction fence(s) from this BO
218 * @ef: [IN] This eviction fence is removed if it
219 * is present in the shared list.
221 * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
223 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
224 struct amdgpu_amdkfd_fence *ef)
226 struct dma_resv *resv = bo->tbo.base.resv;
227 struct dma_resv_list *old, *new;
228 unsigned int i, j, k;
230 if (!ef)
231 return -EINVAL;
233 old = dma_resv_get_list(resv);
234 if (!old)
235 return 0;
237 new = kmalloc(offsetof(typeof(*new), shared[old->shared_max]),
238 GFP_KERNEL);
239 if (!new)
240 return -ENOMEM;
242 /* Go through all the shared fences in the resevation object and sort
243 * the interesting ones to the end of the list.
245 for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
246 struct dma_fence *f;
248 f = rcu_dereference_protected(old->shared[i],
249 dma_resv_held(resv));
251 if (f->context == ef->base.context)
252 RCU_INIT_POINTER(new->shared[--j], f);
253 else
254 RCU_INIT_POINTER(new->shared[k++], f);
256 new->shared_max = old->shared_max;
257 new->shared_count = k;
259 /* Install the new fence list, seqcount provides the barriers */
260 preempt_disable();
261 write_seqcount_begin(&resv->seq);
262 RCU_INIT_POINTER(resv->fence, new);
263 write_seqcount_end(&resv->seq);
264 preempt_enable();
266 /* Drop the references to the removed fences or move them to ef_list */
267 for (i = j, k = 0; i < old->shared_count; ++i) {
268 struct dma_fence *f;
270 f = rcu_dereference_protected(new->shared[i],
271 dma_resv_held(resv));
272 dma_fence_put(f);
274 kfree_rcu(old, rcu);
276 return 0;
279 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
280 bool wait)
282 struct ttm_operation_ctx ctx = { false, false };
283 int ret;
285 if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
286 "Called with userptr BO"))
287 return -EINVAL;
289 amdgpu_bo_placement_from_domain(bo, domain);
291 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
292 if (ret)
293 goto validate_fail;
294 if (wait)
295 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
297 validate_fail:
298 return ret;
301 static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
303 struct amdgpu_vm_parser *p = param;
305 return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
308 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
310 * Page directories are not updated here because huge page handling
311 * during page table updates can invalidate page directory entries
312 * again. Page directories are only updated after updating page
313 * tables.
315 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
317 struct amdgpu_bo *pd = vm->root.base.bo;
318 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
319 struct amdgpu_vm_parser param;
320 int ret;
322 param.domain = AMDGPU_GEM_DOMAIN_VRAM;
323 param.wait = false;
325 ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
326 &param);
327 if (ret) {
328 pr_err("amdgpu: failed to validate PT BOs\n");
329 return ret;
332 ret = amdgpu_amdkfd_validate(&param, pd);
333 if (ret) {
334 pr_err("amdgpu: failed to validate PD\n");
335 return ret;
338 vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
340 if (vm->use_cpu_for_update) {
341 ret = amdgpu_bo_kmap(pd, NULL);
342 if (ret) {
343 pr_err("amdgpu: failed to kmap PD, ret=%d\n", ret);
344 return ret;
348 return 0;
351 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
353 struct amdgpu_bo *pd = vm->root.base.bo;
354 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
355 int ret;
357 ret = amdgpu_vm_update_pdes(adev, vm, false);
358 if (ret)
359 return ret;
361 return amdgpu_sync_fence(sync, vm->last_update, false);
364 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
366 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
367 bool coherent = mem->alloc_flags & ALLOC_MEM_FLAGS_COHERENT;
368 uint32_t mapping_flags;
370 mapping_flags = AMDGPU_VM_PAGE_READABLE;
371 if (mem->alloc_flags & ALLOC_MEM_FLAGS_WRITABLE)
372 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
373 if (mem->alloc_flags & ALLOC_MEM_FLAGS_EXECUTABLE)
374 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
376 switch (adev->asic_type) {
377 case CHIP_ARCTURUS:
378 if (mem->alloc_flags & ALLOC_MEM_FLAGS_VRAM) {
379 if (bo_adev == adev)
380 mapping_flags |= coherent ?
381 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
382 else
383 mapping_flags |= AMDGPU_VM_MTYPE_UC;
384 } else {
385 mapping_flags |= coherent ?
386 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
388 break;
389 default:
390 mapping_flags |= coherent ?
391 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
394 return amdgpu_gem_va_map_flags(adev, mapping_flags);
397 /* add_bo_to_vm - Add a BO to a VM
399 * Everything that needs to bo done only once when a BO is first added
400 * to a VM. It can later be mapped and unmapped many times without
401 * repeating these steps.
403 * 1. Allocate and initialize BO VA entry data structure
404 * 2. Add BO to the VM
405 * 3. Determine ASIC-specific PTE flags
406 * 4. Alloc page tables and directories if needed
407 * 4a. Validate new page tables and directories
409 static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
410 struct amdgpu_vm *vm, bool is_aql,
411 struct kfd_bo_va_list **p_bo_va_entry)
413 int ret;
414 struct kfd_bo_va_list *bo_va_entry;
415 struct amdgpu_bo *bo = mem->bo;
416 uint64_t va = mem->va;
417 struct list_head *list_bo_va = &mem->bo_va_list;
418 unsigned long bo_size = bo->tbo.mem.size;
420 if (!va) {
421 pr_err("Invalid VA when adding BO to VM\n");
422 return -EINVAL;
425 if (is_aql)
426 va += bo_size;
428 bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL);
429 if (!bo_va_entry)
430 return -ENOMEM;
432 pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
433 va + bo_size, vm);
435 /* Add BO to VM internal data structures*/
436 bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo);
437 if (!bo_va_entry->bo_va) {
438 ret = -EINVAL;
439 pr_err("Failed to add BO object to VM. ret == %d\n",
440 ret);
441 goto err_vmadd;
444 bo_va_entry->va = va;
445 bo_va_entry->pte_flags = get_pte_flags(adev, mem);
446 bo_va_entry->kgd_dev = (void *)adev;
447 list_add(&bo_va_entry->bo_list, list_bo_va);
449 if (p_bo_va_entry)
450 *p_bo_va_entry = bo_va_entry;
452 /* Allocate validate page tables if needed */
453 ret = vm_validate_pt_pd_bos(vm);
454 if (ret) {
455 pr_err("validate_pt_pd_bos() failed\n");
456 goto err_alloc_pts;
459 return 0;
461 err_alloc_pts:
462 amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
463 list_del(&bo_va_entry->bo_list);
464 err_vmadd:
465 kfree(bo_va_entry);
466 return ret;
469 static void remove_bo_from_vm(struct amdgpu_device *adev,
470 struct kfd_bo_va_list *entry, unsigned long size)
472 pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n",
473 entry->va,
474 entry->va + size, entry);
475 amdgpu_vm_bo_rmv(adev, entry->bo_va);
476 list_del(&entry->bo_list);
477 kfree(entry);
480 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
481 struct amdkfd_process_info *process_info,
482 bool userptr)
484 struct ttm_validate_buffer *entry = &mem->validate_list;
485 struct amdgpu_bo *bo = mem->bo;
487 INIT_LIST_HEAD(&entry->head);
488 entry->num_shared = 1;
489 entry->bo = &bo->tbo;
490 mutex_lock(&process_info->lock);
491 if (userptr)
492 list_add_tail(&entry->head, &process_info->userptr_valid_list);
493 else
494 list_add_tail(&entry->head, &process_info->kfd_bo_list);
495 mutex_unlock(&process_info->lock);
498 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
499 struct amdkfd_process_info *process_info)
501 struct ttm_validate_buffer *bo_list_entry;
503 bo_list_entry = &mem->validate_list;
504 mutex_lock(&process_info->lock);
505 list_del(&bo_list_entry->head);
506 mutex_unlock(&process_info->lock);
509 /* Initializes user pages. It registers the MMU notifier and validates
510 * the userptr BO in the GTT domain.
512 * The BO must already be on the userptr_valid_list. Otherwise an
513 * eviction and restore may happen that leaves the new BO unmapped
514 * with the user mode queues running.
516 * Takes the process_info->lock to protect against concurrent restore
517 * workers.
519 * Returns 0 for success, negative errno for errors.
521 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr)
523 struct amdkfd_process_info *process_info = mem->process_info;
524 struct amdgpu_bo *bo = mem->bo;
525 struct ttm_operation_ctx ctx = { true, false };
526 int ret = 0;
528 mutex_lock(&process_info->lock);
530 ret = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, user_addr, 0);
531 if (ret) {
532 pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
533 goto out;
536 ret = amdgpu_mn_register(bo, user_addr);
537 if (ret) {
538 pr_err("%s: Failed to register MMU notifier: %d\n",
539 __func__, ret);
540 goto out;
543 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
544 if (ret) {
545 pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
546 goto unregister_out;
549 ret = amdgpu_bo_reserve(bo, true);
550 if (ret) {
551 pr_err("%s: Failed to reserve BO\n", __func__);
552 goto release_out;
554 amdgpu_bo_placement_from_domain(bo, mem->domain);
555 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
556 if (ret)
557 pr_err("%s: failed to validate BO\n", __func__);
558 amdgpu_bo_unreserve(bo);
560 release_out:
561 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
562 unregister_out:
563 if (ret)
564 amdgpu_mn_unregister(bo);
565 out:
566 mutex_unlock(&process_info->lock);
567 return ret;
570 /* Reserving a BO and its page table BOs must happen atomically to
571 * avoid deadlocks. Some operations update multiple VMs at once. Track
572 * all the reservation info in a context structure. Optionally a sync
573 * object can track VM updates.
575 struct bo_vm_reservation_context {
576 struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
577 unsigned int n_vms; /* Number of VMs reserved */
578 struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries */
579 struct ww_acquire_ctx ticket; /* Reservation ticket */
580 struct list_head list, duplicates; /* BO lists */
581 struct amdgpu_sync *sync; /* Pointer to sync object */
582 bool reserved; /* Whether BOs are reserved */
585 enum bo_vm_match {
586 BO_VM_NOT_MAPPED = 0, /* Match VMs where a BO is not mapped */
587 BO_VM_MAPPED, /* Match VMs where a BO is mapped */
588 BO_VM_ALL, /* Match all VMs a BO was added to */
592 * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
593 * @mem: KFD BO structure.
594 * @vm: the VM to reserve.
595 * @ctx: the struct that will be used in unreserve_bo_and_vms().
597 static int reserve_bo_and_vm(struct kgd_mem *mem,
598 struct amdgpu_vm *vm,
599 struct bo_vm_reservation_context *ctx)
601 struct amdgpu_bo *bo = mem->bo;
602 int ret;
604 WARN_ON(!vm);
606 ctx->reserved = false;
607 ctx->n_vms = 1;
608 ctx->sync = &mem->sync;
610 INIT_LIST_HEAD(&ctx->list);
611 INIT_LIST_HEAD(&ctx->duplicates);
613 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
614 if (!ctx->vm_pd)
615 return -ENOMEM;
617 ctx->kfd_bo.priority = 0;
618 ctx->kfd_bo.tv.bo = &bo->tbo;
619 ctx->kfd_bo.tv.num_shared = 1;
620 list_add(&ctx->kfd_bo.tv.head, &ctx->list);
622 amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
624 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
625 false, &ctx->duplicates);
626 if (!ret)
627 ctx->reserved = true;
628 else {
629 pr_err("Failed to reserve buffers in ttm\n");
630 kfree(ctx->vm_pd);
631 ctx->vm_pd = NULL;
634 return ret;
638 * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
639 * @mem: KFD BO structure.
640 * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
641 * is used. Otherwise, a single VM associated with the BO.
642 * @map_type: the mapping status that will be used to filter the VMs.
643 * @ctx: the struct that will be used in unreserve_bo_and_vms().
645 * Returns 0 for success, negative for failure.
647 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
648 struct amdgpu_vm *vm, enum bo_vm_match map_type,
649 struct bo_vm_reservation_context *ctx)
651 struct amdgpu_bo *bo = mem->bo;
652 struct kfd_bo_va_list *entry;
653 unsigned int i;
654 int ret;
656 ctx->reserved = false;
657 ctx->n_vms = 0;
658 ctx->vm_pd = NULL;
659 ctx->sync = &mem->sync;
661 INIT_LIST_HEAD(&ctx->list);
662 INIT_LIST_HEAD(&ctx->duplicates);
664 list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
665 if ((vm && vm != entry->bo_va->base.vm) ||
666 (entry->is_mapped != map_type
667 && map_type != BO_VM_ALL))
668 continue;
670 ctx->n_vms++;
673 if (ctx->n_vms != 0) {
674 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
675 GFP_KERNEL);
676 if (!ctx->vm_pd)
677 return -ENOMEM;
680 ctx->kfd_bo.priority = 0;
681 ctx->kfd_bo.tv.bo = &bo->tbo;
682 ctx->kfd_bo.tv.num_shared = 1;
683 list_add(&ctx->kfd_bo.tv.head, &ctx->list);
685 i = 0;
686 list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
687 if ((vm && vm != entry->bo_va->base.vm) ||
688 (entry->is_mapped != map_type
689 && map_type != BO_VM_ALL))
690 continue;
692 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
693 &ctx->vm_pd[i]);
694 i++;
697 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
698 false, &ctx->duplicates);
699 if (!ret)
700 ctx->reserved = true;
701 else
702 pr_err("Failed to reserve buffers in ttm.\n");
704 if (ret) {
705 kfree(ctx->vm_pd);
706 ctx->vm_pd = NULL;
709 return ret;
713 * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
714 * @ctx: Reservation context to unreserve
715 * @wait: Optionally wait for a sync object representing pending VM updates
716 * @intr: Whether the wait is interruptible
718 * Also frees any resources allocated in
719 * reserve_bo_and_(cond_)vm(s). Returns the status from
720 * amdgpu_sync_wait.
722 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
723 bool wait, bool intr)
725 int ret = 0;
727 if (wait)
728 ret = amdgpu_sync_wait(ctx->sync, intr);
730 if (ctx->reserved)
731 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
732 kfree(ctx->vm_pd);
734 ctx->sync = NULL;
736 ctx->reserved = false;
737 ctx->vm_pd = NULL;
739 return ret;
742 static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
743 struct kfd_bo_va_list *entry,
744 struct amdgpu_sync *sync)
746 struct amdgpu_bo_va *bo_va = entry->bo_va;
747 struct amdgpu_vm *vm = bo_va->base.vm;
749 amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
751 amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
753 amdgpu_sync_fence(sync, bo_va->last_pt_update, false);
755 return 0;
758 static int update_gpuvm_pte(struct amdgpu_device *adev,
759 struct kfd_bo_va_list *entry,
760 struct amdgpu_sync *sync)
762 int ret;
763 struct amdgpu_bo_va *bo_va = entry->bo_va;
765 /* Update the page tables */
766 ret = amdgpu_vm_bo_update(adev, bo_va, false);
767 if (ret) {
768 pr_err("amdgpu_vm_bo_update failed\n");
769 return ret;
772 return amdgpu_sync_fence(sync, bo_va->last_pt_update, false);
775 static int map_bo_to_gpuvm(struct amdgpu_device *adev,
776 struct kfd_bo_va_list *entry, struct amdgpu_sync *sync,
777 bool no_update_pte)
779 int ret;
781 /* Set virtual address for the allocation */
782 ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0,
783 amdgpu_bo_size(entry->bo_va->base.bo),
784 entry->pte_flags);
785 if (ret) {
786 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
787 entry->va, ret);
788 return ret;
791 if (no_update_pte)
792 return 0;
794 ret = update_gpuvm_pte(adev, entry, sync);
795 if (ret) {
796 pr_err("update_gpuvm_pte() failed\n");
797 goto update_gpuvm_pte_failed;
800 return 0;
802 update_gpuvm_pte_failed:
803 unmap_bo_from_gpuvm(adev, entry, sync);
804 return ret;
807 static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
809 struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
811 if (!sg)
812 return NULL;
813 if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
814 kfree(sg);
815 return NULL;
817 sg->sgl->dma_address = addr;
818 sg->sgl->length = size;
819 #ifdef CONFIG_NEED_SG_DMA_LENGTH
820 sg->sgl->dma_length = size;
821 #endif
822 return sg;
825 static int process_validate_vms(struct amdkfd_process_info *process_info)
827 struct amdgpu_vm *peer_vm;
828 int ret;
830 list_for_each_entry(peer_vm, &process_info->vm_list_head,
831 vm_list_node) {
832 ret = vm_validate_pt_pd_bos(peer_vm);
833 if (ret)
834 return ret;
837 return 0;
840 static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
841 struct amdgpu_sync *sync)
843 struct amdgpu_vm *peer_vm;
844 int ret;
846 list_for_each_entry(peer_vm, &process_info->vm_list_head,
847 vm_list_node) {
848 struct amdgpu_bo *pd = peer_vm->root.base.bo;
850 ret = amdgpu_sync_resv(NULL,
851 sync, pd->tbo.base.resv,
852 AMDGPU_FENCE_OWNER_KFD, false);
853 if (ret)
854 return ret;
857 return 0;
860 static int process_update_pds(struct amdkfd_process_info *process_info,
861 struct amdgpu_sync *sync)
863 struct amdgpu_vm *peer_vm;
864 int ret;
866 list_for_each_entry(peer_vm, &process_info->vm_list_head,
867 vm_list_node) {
868 ret = vm_update_pds(peer_vm, sync);
869 if (ret)
870 return ret;
873 return 0;
876 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
877 struct dma_fence **ef)
879 struct amdkfd_process_info *info = NULL;
880 int ret;
882 if (!*process_info) {
883 info = kzalloc(sizeof(*info), GFP_KERNEL);
884 if (!info)
885 return -ENOMEM;
887 mutex_init(&info->lock);
888 INIT_LIST_HEAD(&info->vm_list_head);
889 INIT_LIST_HEAD(&info->kfd_bo_list);
890 INIT_LIST_HEAD(&info->userptr_valid_list);
891 INIT_LIST_HEAD(&info->userptr_inval_list);
893 info->eviction_fence =
894 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
895 current->mm);
896 if (!info->eviction_fence) {
897 pr_err("Failed to create eviction fence\n");
898 ret = -ENOMEM;
899 goto create_evict_fence_fail;
902 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
903 atomic_set(&info->evicted_bos, 0);
904 INIT_DELAYED_WORK(&info->restore_userptr_work,
905 amdgpu_amdkfd_restore_userptr_worker);
907 *process_info = info;
908 *ef = dma_fence_get(&info->eviction_fence->base);
911 vm->process_info = *process_info;
913 /* Validate page directory and attach eviction fence */
914 ret = amdgpu_bo_reserve(vm->root.base.bo, true);
915 if (ret)
916 goto reserve_pd_fail;
917 ret = vm_validate_pt_pd_bos(vm);
918 if (ret) {
919 pr_err("validate_pt_pd_bos() failed\n");
920 goto validate_pd_fail;
922 ret = amdgpu_bo_sync_wait(vm->root.base.bo,
923 AMDGPU_FENCE_OWNER_KFD, false);
924 if (ret)
925 goto wait_pd_fail;
926 ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
927 if (ret)
928 goto reserve_shared_fail;
929 amdgpu_bo_fence(vm->root.base.bo,
930 &vm->process_info->eviction_fence->base, true);
931 amdgpu_bo_unreserve(vm->root.base.bo);
933 /* Update process info */
934 mutex_lock(&vm->process_info->lock);
935 list_add_tail(&vm->vm_list_node,
936 &(vm->process_info->vm_list_head));
937 vm->process_info->n_vms++;
938 mutex_unlock(&vm->process_info->lock);
940 return 0;
942 reserve_shared_fail:
943 wait_pd_fail:
944 validate_pd_fail:
945 amdgpu_bo_unreserve(vm->root.base.bo);
946 reserve_pd_fail:
947 vm->process_info = NULL;
948 if (info) {
949 /* Two fence references: one in info and one in *ef */
950 dma_fence_put(&info->eviction_fence->base);
951 dma_fence_put(*ef);
952 *ef = NULL;
953 *process_info = NULL;
954 put_pid(info->pid);
955 create_evict_fence_fail:
956 mutex_destroy(&info->lock);
957 kfree(info);
959 return ret;
962 int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, unsigned int pasid,
963 void **vm, void **process_info,
964 struct dma_fence **ef)
966 struct amdgpu_device *adev = get_amdgpu_device(kgd);
967 struct amdgpu_vm *new_vm;
968 int ret;
970 new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL);
971 if (!new_vm)
972 return -ENOMEM;
974 /* Initialize AMDGPU part of the VM */
975 ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, pasid);
976 if (ret) {
977 pr_err("Failed init vm ret %d\n", ret);
978 goto amdgpu_vm_init_fail;
981 /* Initialize KFD part of the VM and process info */
982 ret = init_kfd_vm(new_vm, process_info, ef);
983 if (ret)
984 goto init_kfd_vm_fail;
986 *vm = (void *) new_vm;
988 return 0;
990 init_kfd_vm_fail:
991 amdgpu_vm_fini(adev, new_vm);
992 amdgpu_vm_init_fail:
993 kfree(new_vm);
994 return ret;
997 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
998 struct file *filp, unsigned int pasid,
999 void **vm, void **process_info,
1000 struct dma_fence **ef)
1002 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1003 struct drm_file *drm_priv = filp->private_data;
1004 struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv;
1005 struct amdgpu_vm *avm = &drv_priv->vm;
1006 int ret;
1008 /* Already a compute VM? */
1009 if (avm->process_info)
1010 return -EINVAL;
1012 /* Convert VM into a compute VM */
1013 ret = amdgpu_vm_make_compute(adev, avm, pasid);
1014 if (ret)
1015 return ret;
1017 /* Initialize KFD part of the VM and process info */
1018 ret = init_kfd_vm(avm, process_info, ef);
1019 if (ret)
1020 return ret;
1022 *vm = (void *)avm;
1024 return 0;
1027 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1028 struct amdgpu_vm *vm)
1030 struct amdkfd_process_info *process_info = vm->process_info;
1031 struct amdgpu_bo *pd = vm->root.base.bo;
1033 if (!process_info)
1034 return;
1036 /* Release eviction fence from PD */
1037 amdgpu_bo_reserve(pd, false);
1038 amdgpu_bo_fence(pd, NULL, false);
1039 amdgpu_bo_unreserve(pd);
1041 /* Update process info */
1042 mutex_lock(&process_info->lock);
1043 process_info->n_vms--;
1044 list_del(&vm->vm_list_node);
1045 mutex_unlock(&process_info->lock);
1047 /* Release per-process resources when last compute VM is destroyed */
1048 if (!process_info->n_vms) {
1049 WARN_ON(!list_empty(&process_info->kfd_bo_list));
1050 WARN_ON(!list_empty(&process_info->userptr_valid_list));
1051 WARN_ON(!list_empty(&process_info->userptr_inval_list));
1053 dma_fence_put(&process_info->eviction_fence->base);
1054 cancel_delayed_work_sync(&process_info->restore_userptr_work);
1055 put_pid(process_info->pid);
1056 mutex_destroy(&process_info->lock);
1057 kfree(process_info);
1061 void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
1063 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1064 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1066 if (WARN_ON(!kgd || !vm))
1067 return;
1069 pr_debug("Destroying process vm %p\n", vm);
1071 /* Release the VM context */
1072 amdgpu_vm_fini(adev, avm);
1073 kfree(vm);
1076 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm)
1078 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1079 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1081 if (WARN_ON(!kgd || !vm))
1082 return;
1084 pr_debug("Releasing process vm %p\n", vm);
1086 /* The original pasid of amdgpu vm has already been
1087 * released during making a amdgpu vm to a compute vm
1088 * The current pasid is managed by kfd and will be
1089 * released on kfd process destroy. Set amdgpu pasid
1090 * to 0 to avoid duplicate release.
1092 amdgpu_vm_release_compute(adev, avm);
1095 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
1097 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1098 struct amdgpu_bo *pd = avm->root.base.bo;
1099 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1101 if (adev->asic_type < CHIP_VEGA10)
1102 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1103 return avm->pd_phys_addr;
1106 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1107 struct kgd_dev *kgd, uint64_t va, uint64_t size,
1108 void *vm, struct kgd_mem **mem,
1109 uint64_t *offset, uint32_t flags)
1111 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1112 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1113 enum ttm_bo_type bo_type = ttm_bo_type_device;
1114 struct sg_table *sg = NULL;
1115 uint64_t user_addr = 0;
1116 struct amdgpu_bo *bo;
1117 struct amdgpu_bo_param bp;
1118 u32 domain, alloc_domain;
1119 u64 alloc_flags;
1120 int ret;
1123 * Check on which domain to allocate BO
1125 if (flags & ALLOC_MEM_FLAGS_VRAM) {
1126 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1127 alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
1128 alloc_flags |= (flags & ALLOC_MEM_FLAGS_PUBLIC) ?
1129 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
1130 AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
1131 } else if (flags & ALLOC_MEM_FLAGS_GTT) {
1132 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1133 alloc_flags = 0;
1134 } else if (flags & ALLOC_MEM_FLAGS_USERPTR) {
1135 domain = AMDGPU_GEM_DOMAIN_GTT;
1136 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1137 alloc_flags = 0;
1138 if (!offset || !*offset)
1139 return -EINVAL;
1140 user_addr = untagged_addr(*offset);
1141 } else if (flags & (ALLOC_MEM_FLAGS_DOORBELL |
1142 ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1143 domain = AMDGPU_GEM_DOMAIN_GTT;
1144 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1145 bo_type = ttm_bo_type_sg;
1146 alloc_flags = 0;
1147 if (size > UINT_MAX)
1148 return -EINVAL;
1149 sg = create_doorbell_sg(*offset, size);
1150 if (!sg)
1151 return -ENOMEM;
1152 } else {
1153 return -EINVAL;
1156 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1157 if (!*mem) {
1158 ret = -ENOMEM;
1159 goto err;
1161 INIT_LIST_HEAD(&(*mem)->bo_va_list);
1162 mutex_init(&(*mem)->lock);
1163 (*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1165 /* Workaround for AQL queue wraparound bug. Map the same
1166 * memory twice. That means we only actually allocate half
1167 * the memory.
1169 if ((*mem)->aql_queue)
1170 size = size >> 1;
1172 (*mem)->alloc_flags = flags;
1174 amdgpu_sync_create(&(*mem)->sync);
1176 ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
1177 if (ret) {
1178 pr_debug("Insufficient system memory\n");
1179 goto err_reserve_limit;
1182 pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1183 va, size, domain_string(alloc_domain));
1185 memset(&bp, 0, sizeof(bp));
1186 bp.size = size;
1187 bp.byte_align = 1;
1188 bp.domain = alloc_domain;
1189 bp.flags = alloc_flags;
1190 bp.type = bo_type;
1191 bp.resv = NULL;
1192 ret = amdgpu_bo_create(adev, &bp, &bo);
1193 if (ret) {
1194 pr_debug("Failed to create BO on domain %s. ret %d\n",
1195 domain_string(alloc_domain), ret);
1196 goto err_bo_create;
1198 if (bo_type == ttm_bo_type_sg) {
1199 bo->tbo.sg = sg;
1200 bo->tbo.ttm->sg = sg;
1202 bo->kfd_bo = *mem;
1203 (*mem)->bo = bo;
1204 if (user_addr)
1205 bo->flags |= AMDGPU_AMDKFD_USERPTR_BO;
1207 (*mem)->va = va;
1208 (*mem)->domain = domain;
1209 (*mem)->mapped_to_gpu_memory = 0;
1210 (*mem)->process_info = avm->process_info;
1211 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1213 if (user_addr) {
1214 ret = init_user_pages(*mem, user_addr);
1215 if (ret)
1216 goto allocate_init_user_pages_failed;
1219 if (offset)
1220 *offset = amdgpu_bo_mmap_offset(bo);
1222 return 0;
1224 allocate_init_user_pages_failed:
1225 remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
1226 amdgpu_bo_unref(&bo);
1227 /* Don't unreserve system mem limit twice */
1228 goto err_reserve_limit;
1229 err_bo_create:
1230 unreserve_mem_limit(adev, size, alloc_domain, !!sg);
1231 err_reserve_limit:
1232 mutex_destroy(&(*mem)->lock);
1233 kfree(*mem);
1234 err:
1235 if (sg) {
1236 sg_free_table(sg);
1237 kfree(sg);
1239 return ret;
1242 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1243 struct kgd_dev *kgd, struct kgd_mem *mem)
1245 struct amdkfd_process_info *process_info = mem->process_info;
1246 unsigned long bo_size = mem->bo->tbo.mem.size;
1247 struct kfd_bo_va_list *entry, *tmp;
1248 struct bo_vm_reservation_context ctx;
1249 struct ttm_validate_buffer *bo_list_entry;
1250 int ret;
1252 mutex_lock(&mem->lock);
1254 if (mem->mapped_to_gpu_memory > 0) {
1255 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1256 mem->va, bo_size);
1257 mutex_unlock(&mem->lock);
1258 return -EBUSY;
1261 mutex_unlock(&mem->lock);
1262 /* lock is not needed after this, since mem is unused and will
1263 * be freed anyway
1266 /* No more MMU notifiers */
1267 amdgpu_mn_unregister(mem->bo);
1269 /* Make sure restore workers don't access the BO any more */
1270 bo_list_entry = &mem->validate_list;
1271 mutex_lock(&process_info->lock);
1272 list_del(&bo_list_entry->head);
1273 mutex_unlock(&process_info->lock);
1275 ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1276 if (unlikely(ret))
1277 return ret;
1279 /* The eviction fence should be removed by the last unmap.
1280 * TODO: Log an error condition if the bo still has the eviction fence
1281 * attached
1283 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1284 process_info->eviction_fence);
1285 pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1286 mem->va + bo_size * (1 + mem->aql_queue));
1288 /* Remove from VM internal data structures */
1289 list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list)
1290 remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev,
1291 entry, bo_size);
1293 ret = unreserve_bo_and_vms(&ctx, false, false);
1295 /* Free the sync object */
1296 amdgpu_sync_free(&mem->sync);
1298 /* If the SG is not NULL, it's one we created for a doorbell or mmio
1299 * remap BO. We need to free it.
1301 if (mem->bo->tbo.sg) {
1302 sg_free_table(mem->bo->tbo.sg);
1303 kfree(mem->bo->tbo.sg);
1306 /* Free the BO*/
1307 amdgpu_bo_unref(&mem->bo);
1308 mutex_destroy(&mem->lock);
1309 kfree(mem);
1311 return ret;
1314 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1315 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1317 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1318 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1319 int ret;
1320 struct amdgpu_bo *bo;
1321 uint32_t domain;
1322 struct kfd_bo_va_list *entry;
1323 struct bo_vm_reservation_context ctx;
1324 struct kfd_bo_va_list *bo_va_entry = NULL;
1325 struct kfd_bo_va_list *bo_va_entry_aql = NULL;
1326 unsigned long bo_size;
1327 bool is_invalid_userptr = false;
1329 bo = mem->bo;
1330 if (!bo) {
1331 pr_err("Invalid BO when mapping memory to GPU\n");
1332 return -EINVAL;
1335 /* Make sure restore is not running concurrently. Since we
1336 * don't map invalid userptr BOs, we rely on the next restore
1337 * worker to do the mapping
1339 mutex_lock(&mem->process_info->lock);
1341 /* Lock mmap-sem. If we find an invalid userptr BO, we can be
1342 * sure that the MMU notifier is no longer running
1343 * concurrently and the queues are actually stopped
1345 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1346 down_write(&current->mm->mmap_sem);
1347 is_invalid_userptr = atomic_read(&mem->invalid);
1348 up_write(&current->mm->mmap_sem);
1351 mutex_lock(&mem->lock);
1353 domain = mem->domain;
1354 bo_size = bo->tbo.mem.size;
1356 pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1357 mem->va,
1358 mem->va + bo_size * (1 + mem->aql_queue),
1359 vm, domain_string(domain));
1361 ret = reserve_bo_and_vm(mem, vm, &ctx);
1362 if (unlikely(ret))
1363 goto out;
1365 /* Userptr can be marked as "not invalid", but not actually be
1366 * validated yet (still in the system domain). In that case
1367 * the queues are still stopped and we can leave mapping for
1368 * the next restore worker
1370 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
1371 bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
1372 is_invalid_userptr = true;
1374 if (check_if_add_bo_to_vm(avm, mem)) {
1375 ret = add_bo_to_vm(adev, mem, avm, false,
1376 &bo_va_entry);
1377 if (ret)
1378 goto add_bo_to_vm_failed;
1379 if (mem->aql_queue) {
1380 ret = add_bo_to_vm(adev, mem, avm,
1381 true, &bo_va_entry_aql);
1382 if (ret)
1383 goto add_bo_to_vm_failed_aql;
1385 } else {
1386 ret = vm_validate_pt_pd_bos(avm);
1387 if (unlikely(ret))
1388 goto add_bo_to_vm_failed;
1391 if (mem->mapped_to_gpu_memory == 0 &&
1392 !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1393 /* Validate BO only once. The eviction fence gets added to BO
1394 * the first time it is mapped. Validate will wait for all
1395 * background evictions to complete.
1397 ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1398 if (ret) {
1399 pr_debug("Validate failed\n");
1400 goto map_bo_to_gpuvm_failed;
1404 list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1405 if (entry->bo_va->base.vm == vm && !entry->is_mapped) {
1406 pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1407 entry->va, entry->va + bo_size,
1408 entry);
1410 ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
1411 is_invalid_userptr);
1412 if (ret) {
1413 pr_err("Failed to map bo to gpuvm\n");
1414 goto map_bo_to_gpuvm_failed;
1417 ret = vm_update_pds(vm, ctx.sync);
1418 if (ret) {
1419 pr_err("Failed to update page directories\n");
1420 goto map_bo_to_gpuvm_failed;
1423 entry->is_mapped = true;
1424 mem->mapped_to_gpu_memory++;
1425 pr_debug("\t INC mapping count %d\n",
1426 mem->mapped_to_gpu_memory);
1430 if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->pin_count)
1431 amdgpu_bo_fence(bo,
1432 &avm->process_info->eviction_fence->base,
1433 true);
1434 ret = unreserve_bo_and_vms(&ctx, false, false);
1436 goto out;
1438 map_bo_to_gpuvm_failed:
1439 if (bo_va_entry_aql)
1440 remove_bo_from_vm(adev, bo_va_entry_aql, bo_size);
1441 add_bo_to_vm_failed_aql:
1442 if (bo_va_entry)
1443 remove_bo_from_vm(adev, bo_va_entry, bo_size);
1444 add_bo_to_vm_failed:
1445 unreserve_bo_and_vms(&ctx, false, false);
1446 out:
1447 mutex_unlock(&mem->process_info->lock);
1448 mutex_unlock(&mem->lock);
1449 return ret;
1452 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1453 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1455 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1456 struct amdkfd_process_info *process_info =
1457 ((struct amdgpu_vm *)vm)->process_info;
1458 unsigned long bo_size = mem->bo->tbo.mem.size;
1459 struct kfd_bo_va_list *entry;
1460 struct bo_vm_reservation_context ctx;
1461 int ret;
1463 mutex_lock(&mem->lock);
1465 ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx);
1466 if (unlikely(ret))
1467 goto out;
1468 /* If no VMs were reserved, it means the BO wasn't actually mapped */
1469 if (ctx.n_vms == 0) {
1470 ret = -EINVAL;
1471 goto unreserve_out;
1474 ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm);
1475 if (unlikely(ret))
1476 goto unreserve_out;
1478 pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1479 mem->va,
1480 mem->va + bo_size * (1 + mem->aql_queue),
1481 vm);
1483 list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1484 if (entry->bo_va->base.vm == vm && entry->is_mapped) {
1485 pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1486 entry->va,
1487 entry->va + bo_size,
1488 entry);
1490 ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
1491 if (ret == 0) {
1492 entry->is_mapped = false;
1493 } else {
1494 pr_err("failed to unmap VA 0x%llx\n",
1495 mem->va);
1496 goto unreserve_out;
1499 mem->mapped_to_gpu_memory--;
1500 pr_debug("\t DEC mapping count %d\n",
1501 mem->mapped_to_gpu_memory);
1505 /* If BO is unmapped from all VMs, unfence it. It can be evicted if
1506 * required.
1508 if (mem->mapped_to_gpu_memory == 0 &&
1509 !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count)
1510 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1511 process_info->eviction_fence);
1513 unreserve_out:
1514 unreserve_bo_and_vms(&ctx, false, false);
1515 out:
1516 mutex_unlock(&mem->lock);
1517 return ret;
1520 int amdgpu_amdkfd_gpuvm_sync_memory(
1521 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
1523 struct amdgpu_sync sync;
1524 int ret;
1526 amdgpu_sync_create(&sync);
1528 mutex_lock(&mem->lock);
1529 amdgpu_sync_clone(&mem->sync, &sync);
1530 mutex_unlock(&mem->lock);
1532 ret = amdgpu_sync_wait(&sync, intr);
1533 amdgpu_sync_free(&sync);
1534 return ret;
1537 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1538 struct kgd_mem *mem, void **kptr, uint64_t *size)
1540 int ret;
1541 struct amdgpu_bo *bo = mem->bo;
1543 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1544 pr_err("userptr can't be mapped to kernel\n");
1545 return -EINVAL;
1548 /* delete kgd_mem from kfd_bo_list to avoid re-validating
1549 * this BO in BO's restoring after eviction.
1551 mutex_lock(&mem->process_info->lock);
1553 ret = amdgpu_bo_reserve(bo, true);
1554 if (ret) {
1555 pr_err("Failed to reserve bo. ret %d\n", ret);
1556 goto bo_reserve_failed;
1559 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
1560 if (ret) {
1561 pr_err("Failed to pin bo. ret %d\n", ret);
1562 goto pin_failed;
1565 ret = amdgpu_bo_kmap(bo, kptr);
1566 if (ret) {
1567 pr_err("Failed to map bo to kernel. ret %d\n", ret);
1568 goto kmap_failed;
1571 amdgpu_amdkfd_remove_eviction_fence(
1572 bo, mem->process_info->eviction_fence);
1573 list_del_init(&mem->validate_list.head);
1575 if (size)
1576 *size = amdgpu_bo_size(bo);
1578 amdgpu_bo_unreserve(bo);
1580 mutex_unlock(&mem->process_info->lock);
1581 return 0;
1583 kmap_failed:
1584 amdgpu_bo_unpin(bo);
1585 pin_failed:
1586 amdgpu_bo_unreserve(bo);
1587 bo_reserve_failed:
1588 mutex_unlock(&mem->process_info->lock);
1590 return ret;
1593 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
1594 struct kfd_vm_fault_info *mem)
1596 struct amdgpu_device *adev;
1598 adev = (struct amdgpu_device *)kgd;
1599 if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
1600 *mem = *adev->gmc.vm_fault_info;
1601 mb();
1602 atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1604 return 0;
1607 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
1608 struct dma_buf *dma_buf,
1609 uint64_t va, void *vm,
1610 struct kgd_mem **mem, uint64_t *size,
1611 uint64_t *mmap_offset)
1613 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
1614 struct drm_gem_object *obj;
1615 struct amdgpu_bo *bo;
1616 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1618 if (dma_buf->ops != &amdgpu_dmabuf_ops)
1619 /* Can't handle non-graphics buffers */
1620 return -EINVAL;
1622 obj = dma_buf->priv;
1623 if (obj->dev->dev_private != adev)
1624 /* Can't handle buffers from other devices */
1625 return -EINVAL;
1627 bo = gem_to_amdgpu_bo(obj);
1628 if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
1629 AMDGPU_GEM_DOMAIN_GTT)))
1630 /* Only VRAM and GTT BOs are supported */
1631 return -EINVAL;
1633 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1634 if (!*mem)
1635 return -ENOMEM;
1637 if (size)
1638 *size = amdgpu_bo_size(bo);
1640 if (mmap_offset)
1641 *mmap_offset = amdgpu_bo_mmap_offset(bo);
1643 INIT_LIST_HEAD(&(*mem)->bo_va_list);
1644 mutex_init(&(*mem)->lock);
1645 (*mem)->alloc_flags =
1646 ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1647 ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT) |
1648 ALLOC_MEM_FLAGS_WRITABLE | ALLOC_MEM_FLAGS_EXECUTABLE;
1650 (*mem)->bo = amdgpu_bo_ref(bo);
1651 (*mem)->va = va;
1652 (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1653 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
1654 (*mem)->mapped_to_gpu_memory = 0;
1655 (*mem)->process_info = avm->process_info;
1656 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
1657 amdgpu_sync_create(&(*mem)->sync);
1659 return 0;
1662 /* Evict a userptr BO by stopping the queues if necessary
1664 * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
1665 * cannot do any memory allocations, and cannot take any locks that
1666 * are held elsewhere while allocating memory. Therefore this is as
1667 * simple as possible, using atomic counters.
1669 * It doesn't do anything to the BO itself. The real work happens in
1670 * restore, where we get updated page addresses. This function only
1671 * ensures that GPU access to the BO is stopped.
1673 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
1674 struct mm_struct *mm)
1676 struct amdkfd_process_info *process_info = mem->process_info;
1677 int evicted_bos;
1678 int r = 0;
1680 atomic_inc(&mem->invalid);
1681 evicted_bos = atomic_inc_return(&process_info->evicted_bos);
1682 if (evicted_bos == 1) {
1683 /* First eviction, stop the queues */
1684 r = kgd2kfd_quiesce_mm(mm);
1685 if (r)
1686 pr_err("Failed to quiesce KFD\n");
1687 schedule_delayed_work(&process_info->restore_userptr_work,
1688 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1691 return r;
1694 /* Update invalid userptr BOs
1696 * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
1697 * userptr_inval_list and updates user pages for all BOs that have
1698 * been invalidated since their last update.
1700 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
1701 struct mm_struct *mm)
1703 struct kgd_mem *mem, *tmp_mem;
1704 struct amdgpu_bo *bo;
1705 struct ttm_operation_ctx ctx = { false, false };
1706 int invalid, ret;
1708 /* Move all invalidated BOs to the userptr_inval_list and
1709 * release their user pages by migration to the CPU domain
1711 list_for_each_entry_safe(mem, tmp_mem,
1712 &process_info->userptr_valid_list,
1713 validate_list.head) {
1714 if (!atomic_read(&mem->invalid))
1715 continue; /* BO is still valid */
1717 bo = mem->bo;
1719 if (amdgpu_bo_reserve(bo, true))
1720 return -EAGAIN;
1721 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
1722 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1723 amdgpu_bo_unreserve(bo);
1724 if (ret) {
1725 pr_err("%s: Failed to invalidate userptr BO\n",
1726 __func__);
1727 return -EAGAIN;
1730 list_move_tail(&mem->validate_list.head,
1731 &process_info->userptr_inval_list);
1734 if (list_empty(&process_info->userptr_inval_list))
1735 return 0; /* All evicted userptr BOs were freed */
1737 /* Go through userptr_inval_list and update any invalid user_pages */
1738 list_for_each_entry(mem, &process_info->userptr_inval_list,
1739 validate_list.head) {
1740 invalid = atomic_read(&mem->invalid);
1741 if (!invalid)
1742 /* BO hasn't been invalidated since the last
1743 * revalidation attempt. Keep its BO list.
1745 continue;
1747 bo = mem->bo;
1749 /* Get updated user pages */
1750 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
1751 if (ret) {
1752 pr_debug("%s: Failed to get user pages: %d\n",
1753 __func__, ret);
1755 /* Return error -EBUSY or -ENOMEM, retry restore */
1756 return ret;
1760 * FIXME: Cannot ignore the return code, must hold
1761 * notifier_lock
1763 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
1765 /* Mark the BO as valid unless it was invalidated
1766 * again concurrently.
1768 if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
1769 return -EAGAIN;
1772 return 0;
1775 /* Validate invalid userptr BOs
1777 * Validates BOs on the userptr_inval_list, and moves them back to the
1778 * userptr_valid_list. Also updates GPUVM page tables with new page
1779 * addresses and waits for the page table updates to complete.
1781 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
1783 struct amdgpu_bo_list_entry *pd_bo_list_entries;
1784 struct list_head resv_list, duplicates;
1785 struct ww_acquire_ctx ticket;
1786 struct amdgpu_sync sync;
1788 struct amdgpu_vm *peer_vm;
1789 struct kgd_mem *mem, *tmp_mem;
1790 struct amdgpu_bo *bo;
1791 struct ttm_operation_ctx ctx = { false, false };
1792 int i, ret;
1794 pd_bo_list_entries = kcalloc(process_info->n_vms,
1795 sizeof(struct amdgpu_bo_list_entry),
1796 GFP_KERNEL);
1797 if (!pd_bo_list_entries) {
1798 pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
1799 ret = -ENOMEM;
1800 goto out_no_mem;
1803 INIT_LIST_HEAD(&resv_list);
1804 INIT_LIST_HEAD(&duplicates);
1806 /* Get all the page directory BOs that need to be reserved */
1807 i = 0;
1808 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1809 vm_list_node)
1810 amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
1811 &pd_bo_list_entries[i++]);
1812 /* Add the userptr_inval_list entries to resv_list */
1813 list_for_each_entry(mem, &process_info->userptr_inval_list,
1814 validate_list.head) {
1815 list_add_tail(&mem->resv_list.head, &resv_list);
1816 mem->resv_list.bo = mem->validate_list.bo;
1817 mem->resv_list.num_shared = mem->validate_list.num_shared;
1820 /* Reserve all BOs and page tables for validation */
1821 ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
1822 WARN(!list_empty(&duplicates), "Duplicates should be empty");
1823 if (ret)
1824 goto out_free;
1826 amdgpu_sync_create(&sync);
1828 ret = process_validate_vms(process_info);
1829 if (ret)
1830 goto unreserve_out;
1832 /* Validate BOs and update GPUVM page tables */
1833 list_for_each_entry_safe(mem, tmp_mem,
1834 &process_info->userptr_inval_list,
1835 validate_list.head) {
1836 struct kfd_bo_va_list *bo_va_entry;
1838 bo = mem->bo;
1840 /* Validate the BO if we got user pages */
1841 if (bo->tbo.ttm->pages[0]) {
1842 amdgpu_bo_placement_from_domain(bo, mem->domain);
1843 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1844 if (ret) {
1845 pr_err("%s: failed to validate BO\n", __func__);
1846 goto unreserve_out;
1850 list_move_tail(&mem->validate_list.head,
1851 &process_info->userptr_valid_list);
1853 /* Update mapping. If the BO was not validated
1854 * (because we couldn't get user pages), this will
1855 * clear the page table entries, which will result in
1856 * VM faults if the GPU tries to access the invalid
1857 * memory.
1859 list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) {
1860 if (!bo_va_entry->is_mapped)
1861 continue;
1863 ret = update_gpuvm_pte((struct amdgpu_device *)
1864 bo_va_entry->kgd_dev,
1865 bo_va_entry, &sync);
1866 if (ret) {
1867 pr_err("%s: update PTE failed\n", __func__);
1868 /* make sure this gets validated again */
1869 atomic_inc(&mem->invalid);
1870 goto unreserve_out;
1875 /* Update page directories */
1876 ret = process_update_pds(process_info, &sync);
1878 unreserve_out:
1879 ttm_eu_backoff_reservation(&ticket, &resv_list);
1880 amdgpu_sync_wait(&sync, false);
1881 amdgpu_sync_free(&sync);
1882 out_free:
1883 kfree(pd_bo_list_entries);
1884 out_no_mem:
1886 return ret;
1889 /* Worker callback to restore evicted userptr BOs
1891 * Tries to update and validate all userptr BOs. If successful and no
1892 * concurrent evictions happened, the queues are restarted. Otherwise,
1893 * reschedule for another attempt later.
1895 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
1897 struct delayed_work *dwork = to_delayed_work(work);
1898 struct amdkfd_process_info *process_info =
1899 container_of(dwork, struct amdkfd_process_info,
1900 restore_userptr_work);
1901 struct task_struct *usertask;
1902 struct mm_struct *mm;
1903 int evicted_bos;
1905 evicted_bos = atomic_read(&process_info->evicted_bos);
1906 if (!evicted_bos)
1907 return;
1909 /* Reference task and mm in case of concurrent process termination */
1910 usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
1911 if (!usertask)
1912 return;
1913 mm = get_task_mm(usertask);
1914 if (!mm) {
1915 put_task_struct(usertask);
1916 return;
1919 mutex_lock(&process_info->lock);
1921 if (update_invalid_user_pages(process_info, mm))
1922 goto unlock_out;
1923 /* userptr_inval_list can be empty if all evicted userptr BOs
1924 * have been freed. In that case there is nothing to validate
1925 * and we can just restart the queues.
1927 if (!list_empty(&process_info->userptr_inval_list)) {
1928 if (atomic_read(&process_info->evicted_bos) != evicted_bos)
1929 goto unlock_out; /* Concurrent eviction, try again */
1931 if (validate_invalid_user_pages(process_info))
1932 goto unlock_out;
1934 /* Final check for concurrent evicton and atomic update. If
1935 * another eviction happens after successful update, it will
1936 * be a first eviction that calls quiesce_mm. The eviction
1937 * reference counting inside KFD will handle this case.
1939 if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
1940 evicted_bos)
1941 goto unlock_out;
1942 evicted_bos = 0;
1943 if (kgd2kfd_resume_mm(mm)) {
1944 pr_err("%s: Failed to resume KFD\n", __func__);
1945 /* No recovery from this failure. Probably the CP is
1946 * hanging. No point trying again.
1950 unlock_out:
1951 mutex_unlock(&process_info->lock);
1952 mmput(mm);
1953 put_task_struct(usertask);
1955 /* If validation failed, reschedule another attempt */
1956 if (evicted_bos)
1957 schedule_delayed_work(&process_info->restore_userptr_work,
1958 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1961 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
1962 * KFD process identified by process_info
1964 * @process_info: amdkfd_process_info of the KFD process
1966 * After memory eviction, restore thread calls this function. The function
1967 * should be called when the Process is still valid. BO restore involves -
1969 * 1. Release old eviction fence and create new one
1970 * 2. Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
1971 * 3 Use the second PD list and kfd_bo_list to create a list (ctx.list) of
1972 * BOs that need to be reserved.
1973 * 4. Reserve all the BOs
1974 * 5. Validate of PD and PT BOs.
1975 * 6. Validate all KFD BOs using kfd_bo_list and Map them and add new fence
1976 * 7. Add fence to all PD and PT BOs.
1977 * 8. Unreserve all BOs
1979 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
1981 struct amdgpu_bo_list_entry *pd_bo_list;
1982 struct amdkfd_process_info *process_info = info;
1983 struct amdgpu_vm *peer_vm;
1984 struct kgd_mem *mem;
1985 struct bo_vm_reservation_context ctx;
1986 struct amdgpu_amdkfd_fence *new_fence;
1987 int ret = 0, i;
1988 struct list_head duplicate_save;
1989 struct amdgpu_sync sync_obj;
1991 INIT_LIST_HEAD(&duplicate_save);
1992 INIT_LIST_HEAD(&ctx.list);
1993 INIT_LIST_HEAD(&ctx.duplicates);
1995 pd_bo_list = kcalloc(process_info->n_vms,
1996 sizeof(struct amdgpu_bo_list_entry),
1997 GFP_KERNEL);
1998 if (!pd_bo_list)
1999 return -ENOMEM;
2001 i = 0;
2002 mutex_lock(&process_info->lock);
2003 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2004 vm_list_node)
2005 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
2007 /* Reserve all BOs and page tables/directory. Add all BOs from
2008 * kfd_bo_list to ctx.list
2010 list_for_each_entry(mem, &process_info->kfd_bo_list,
2011 validate_list.head) {
2013 list_add_tail(&mem->resv_list.head, &ctx.list);
2014 mem->resv_list.bo = mem->validate_list.bo;
2015 mem->resv_list.num_shared = mem->validate_list.num_shared;
2018 ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
2019 false, &duplicate_save);
2020 if (ret) {
2021 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2022 goto ttm_reserve_fail;
2025 amdgpu_sync_create(&sync_obj);
2027 /* Validate PDs and PTs */
2028 ret = process_validate_vms(process_info);
2029 if (ret)
2030 goto validate_map_fail;
2032 ret = process_sync_pds_resv(process_info, &sync_obj);
2033 if (ret) {
2034 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
2035 goto validate_map_fail;
2038 /* Validate BOs and map them to GPUVM (update VM page tables). */
2039 list_for_each_entry(mem, &process_info->kfd_bo_list,
2040 validate_list.head) {
2042 struct amdgpu_bo *bo = mem->bo;
2043 uint32_t domain = mem->domain;
2044 struct kfd_bo_va_list *bo_va_entry;
2046 ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2047 if (ret) {
2048 pr_debug("Memory eviction: Validate BOs failed. Try again\n");
2049 goto validate_map_fail;
2051 ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving, false);
2052 if (ret) {
2053 pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2054 goto validate_map_fail;
2056 list_for_each_entry(bo_va_entry, &mem->bo_va_list,
2057 bo_list) {
2058 ret = update_gpuvm_pte((struct amdgpu_device *)
2059 bo_va_entry->kgd_dev,
2060 bo_va_entry,
2061 &sync_obj);
2062 if (ret) {
2063 pr_debug("Memory eviction: update PTE failed. Try again\n");
2064 goto validate_map_fail;
2069 /* Update page directories */
2070 ret = process_update_pds(process_info, &sync_obj);
2071 if (ret) {
2072 pr_debug("Memory eviction: update PDs failed. Try again\n");
2073 goto validate_map_fail;
2076 /* Wait for validate and PT updates to finish */
2077 amdgpu_sync_wait(&sync_obj, false);
2079 /* Release old eviction fence and create new one, because fence only
2080 * goes from unsignaled to signaled, fence cannot be reused.
2081 * Use context and mm from the old fence.
2083 new_fence = amdgpu_amdkfd_fence_create(
2084 process_info->eviction_fence->base.context,
2085 process_info->eviction_fence->mm);
2086 if (!new_fence) {
2087 pr_err("Failed to create eviction fence\n");
2088 ret = -ENOMEM;
2089 goto validate_map_fail;
2091 dma_fence_put(&process_info->eviction_fence->base);
2092 process_info->eviction_fence = new_fence;
2093 *ef = dma_fence_get(&new_fence->base);
2095 /* Attach new eviction fence to all BOs */
2096 list_for_each_entry(mem, &process_info->kfd_bo_list,
2097 validate_list.head)
2098 amdgpu_bo_fence(mem->bo,
2099 &process_info->eviction_fence->base, true);
2101 /* Attach eviction fence to PD / PT BOs */
2102 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2103 vm_list_node) {
2104 struct amdgpu_bo *bo = peer_vm->root.base.bo;
2106 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2109 validate_map_fail:
2110 ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2111 amdgpu_sync_free(&sync_obj);
2112 ttm_reserve_fail:
2113 mutex_unlock(&process_info->lock);
2114 kfree(pd_bo_list);
2115 return ret;
2118 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
2120 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2121 struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
2122 int ret;
2124 if (!info || !gws)
2125 return -EINVAL;
2127 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2128 if (!*mem)
2129 return -ENOMEM;
2131 mutex_init(&(*mem)->lock);
2132 (*mem)->bo = amdgpu_bo_ref(gws_bo);
2133 (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
2134 (*mem)->process_info = process_info;
2135 add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
2136 amdgpu_sync_create(&(*mem)->sync);
2139 /* Validate gws bo the first time it is added to process */
2140 mutex_lock(&(*mem)->process_info->lock);
2141 ret = amdgpu_bo_reserve(gws_bo, false);
2142 if (unlikely(ret)) {
2143 pr_err("Reserve gws bo failed %d\n", ret);
2144 goto bo_reservation_failure;
2147 ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
2148 if (ret) {
2149 pr_err("GWS BO validate failed %d\n", ret);
2150 goto bo_validation_failure;
2152 /* GWS resource is shared b/t amdgpu and amdkfd
2153 * Add process eviction fence to bo so they can
2154 * evict each other.
2156 ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
2157 if (ret)
2158 goto reserve_shared_fail;
2159 amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
2160 amdgpu_bo_unreserve(gws_bo);
2161 mutex_unlock(&(*mem)->process_info->lock);
2163 return ret;
2165 reserve_shared_fail:
2166 bo_validation_failure:
2167 amdgpu_bo_unreserve(gws_bo);
2168 bo_reservation_failure:
2169 mutex_unlock(&(*mem)->process_info->lock);
2170 amdgpu_sync_free(&(*mem)->sync);
2171 remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
2172 amdgpu_bo_unref(&gws_bo);
2173 mutex_destroy(&(*mem)->lock);
2174 kfree(*mem);
2175 *mem = NULL;
2176 return ret;
2179 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
2181 int ret;
2182 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2183 struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
2184 struct amdgpu_bo *gws_bo = kgd_mem->bo;
2186 /* Remove BO from process's validate list so restore worker won't touch
2187 * it anymore
2189 remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
2191 ret = amdgpu_bo_reserve(gws_bo, false);
2192 if (unlikely(ret)) {
2193 pr_err("Reserve gws bo failed %d\n", ret);
2194 //TODO add BO back to validate_list?
2195 return ret;
2197 amdgpu_amdkfd_remove_eviction_fence(gws_bo,
2198 process_info->eviction_fence);
2199 amdgpu_bo_unreserve(gws_bo);
2200 amdgpu_sync_free(&kgd_mem->sync);
2201 amdgpu_bo_unref(&gws_bo);
2202 mutex_destroy(&kgd_mem->lock);
2203 kfree(mem);
2204 return 0;