treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vm_sdma.c
blob19b7f80758f12eb0e37379f9d35799d6bc448c57
1 /*
2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include "amdgpu_vm.h"
24 #include "amdgpu_job.h"
25 #include "amdgpu_object.h"
26 #include "amdgpu_trace.h"
28 #define AMDGPU_VM_SDMA_MIN_NUM_DW 256u
29 #define AMDGPU_VM_SDMA_MAX_NUM_DW (16u * 1024u)
31 /**
32 * amdgpu_vm_sdma_map_table - make sure new PDs/PTs are GTT mapped
34 * @table: newly allocated or validated PD/PT
36 static int amdgpu_vm_sdma_map_table(struct amdgpu_bo *table)
38 int r;
40 r = amdgpu_ttm_alloc_gart(&table->tbo);
41 if (r)
42 return r;
44 if (table->shadow)
45 r = amdgpu_ttm_alloc_gart(&table->shadow->tbo);
47 return r;
50 /**
51 * amdgpu_vm_sdma_prepare - prepare SDMA command submission
53 * @p: see amdgpu_vm_update_params definition
54 * @owner: owner we need to sync to
55 * @exclusive: exclusive move fence we need to sync to
57 * Returns:
58 * Negativ errno, 0 for success.
60 static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
61 void *owner, struct dma_fence *exclusive)
63 struct amdgpu_bo *root = p->vm->root.base.bo;
64 unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
65 int r;
67 r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, &p->job);
68 if (r)
69 return r;
71 p->num_dw_left = ndw;
73 /* Wait for moves to be completed */
74 r = amdgpu_sync_fence(&p->job->sync, exclusive, false);
75 if (r)
76 return r;
78 /* Don't wait for any submissions during page fault handling */
79 if (p->direct)
80 return 0;
82 return amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.base.resv,
83 owner, false);
86 /**
87 * amdgpu_vm_sdma_commit - commit SDMA command submission
89 * @p: see amdgpu_vm_update_params definition
90 * @fence: resulting fence
92 * Returns:
93 * Negativ errno, 0 for success.
95 static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
96 struct dma_fence **fence)
98 struct amdgpu_ib *ib = p->job->ibs;
99 struct drm_sched_entity *entity;
100 struct dma_fence *f, *tmp;
101 struct amdgpu_ring *ring;
102 int r;
104 entity = p->direct ? &p->vm->direct : &p->vm->delayed;
105 ring = container_of(entity->rq->sched, struct amdgpu_ring, sched);
107 WARN_ON(ib->length_dw == 0);
108 amdgpu_ring_pad_ib(ring, ib);
109 WARN_ON(ib->length_dw > p->num_dw_left);
110 r = amdgpu_job_submit(p->job, entity, AMDGPU_FENCE_OWNER_VM, &f);
111 if (r)
112 goto error;
114 tmp = dma_fence_get(f);
115 if (p->direct)
116 swap(p->vm->last_direct, tmp);
117 else
118 swap(p->vm->last_delayed, tmp);
119 dma_fence_put(tmp);
121 if (fence && !p->direct)
122 swap(*fence, f);
123 dma_fence_put(f);
124 return 0;
126 error:
127 amdgpu_job_free(p->job);
128 return r;
132 * amdgpu_vm_sdma_copy_ptes - copy the PTEs from mapping
134 * @p: see amdgpu_vm_update_params definition
135 * @bo: PD/PT to update
136 * @pe: addr of the page entry
137 * @count: number of page entries to copy
139 * Traces the parameters and calls the DMA function to copy the PTEs.
141 static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p,
142 struct amdgpu_bo *bo, uint64_t pe,
143 unsigned count)
145 struct amdgpu_ib *ib = p->job->ibs;
146 uint64_t src = ib->gpu_addr;
148 src += p->num_dw_left * 4;
150 pe += amdgpu_bo_gpu_offset(bo);
151 trace_amdgpu_vm_copy_ptes(pe, src, count, p->direct);
153 amdgpu_vm_copy_pte(p->adev, ib, pe, src, count);
157 * amdgpu_vm_sdma_set_ptes - helper to call the right asic function
159 * @p: see amdgpu_vm_update_params definition
160 * @bo: PD/PT to update
161 * @pe: addr of the page entry
162 * @addr: dst addr to write into pe
163 * @count: number of page entries to update
164 * @incr: increase next addr by incr bytes
165 * @flags: hw access flags
167 * Traces the parameters and calls the right asic functions
168 * to setup the page table using the DMA.
170 static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
171 struct amdgpu_bo *bo, uint64_t pe,
172 uint64_t addr, unsigned count,
173 uint32_t incr, uint64_t flags)
175 struct amdgpu_ib *ib = p->job->ibs;
177 pe += amdgpu_bo_gpu_offset(bo);
178 trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct);
179 if (count < 3) {
180 amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags,
181 count, incr);
182 } else {
183 amdgpu_vm_set_pte_pde(p->adev, ib, pe, addr,
184 count, incr, flags);
189 * amdgpu_vm_sdma_update - execute VM update
191 * @p: see amdgpu_vm_update_params definition
192 * @bo: PD/PT to update
193 * @pe: addr of the page entry
194 * @addr: dst addr to write into pe
195 * @count: number of page entries to update
196 * @incr: increase next addr by incr bytes
197 * @flags: hw access flags
199 * Reserve space in the IB, setup mapping buffer on demand and write commands to
200 * the IB.
202 static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
203 struct amdgpu_bo *bo, uint64_t pe,
204 uint64_t addr, unsigned count, uint32_t incr,
205 uint64_t flags)
207 unsigned int i, ndw, nptes;
208 uint64_t *pte;
209 int r;
211 do {
212 ndw = p->num_dw_left;
213 ndw -= p->job->ibs->length_dw;
215 if (ndw < 32) {
216 r = amdgpu_vm_sdma_commit(p, NULL);
217 if (r)
218 return r;
220 /* estimate how many dw we need */
221 ndw = 32;
222 if (p->pages_addr)
223 ndw += count * 2;
224 ndw = max(ndw, AMDGPU_VM_SDMA_MIN_NUM_DW);
225 ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
227 r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, &p->job);
228 if (r)
229 return r;
231 p->num_dw_left = ndw;
234 if (!p->pages_addr) {
235 /* set page commands needed */
236 if (bo->shadow)
237 amdgpu_vm_sdma_set_ptes(p, bo->shadow, pe, addr,
238 count, incr, flags);
239 amdgpu_vm_sdma_set_ptes(p, bo, pe, addr, count,
240 incr, flags);
241 return 0;
244 /* copy commands needed */
245 ndw -= p->adev->vm_manager.vm_pte_funcs->copy_pte_num_dw *
246 (bo->shadow ? 2 : 1);
248 /* for padding */
249 ndw -= 7;
251 nptes = min(count, ndw / 2);
253 /* Put the PTEs at the end of the IB. */
254 p->num_dw_left -= nptes * 2;
255 pte = (uint64_t *)&(p->job->ibs->ptr[p->num_dw_left]);
256 for (i = 0; i < nptes; ++i, addr += incr) {
257 pte[i] = amdgpu_vm_map_gart(p->pages_addr, addr);
258 pte[i] |= flags;
261 if (bo->shadow)
262 amdgpu_vm_sdma_copy_ptes(p, bo->shadow, pe, nptes);
263 amdgpu_vm_sdma_copy_ptes(p, bo, pe, nptes);
265 pe += nptes * 8;
266 count -= nptes;
267 } while (count);
269 return 0;
272 const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs = {
273 .map_table = amdgpu_vm_sdma_map_table,
274 .prepare = amdgpu_vm_sdma_prepare,
275 .update = amdgpu_vm_sdma_update,
276 .commit = amdgpu_vm_sdma_commit