2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
29 #include <linux/seq_file.h>
30 #include <linux/slab.h>
32 #include <drm/amdgpu_drm.h>
38 * IBs (Indirect Buffers) and areas of GPU accessible memory where
39 * commands are stored. You can put a pointer to the IB in the
40 * command ring and the hw will fetch the commands from the IB
41 * and execute them. Generally userspace acceleration drivers
42 * produce command buffers which are send to the kernel and
43 * put in IBs for execution by the requested ring.
45 static int amdgpu_debugfs_sa_init(struct amdgpu_device
*adev
);
48 * amdgpu_ib_get - request an IB (Indirect Buffer)
50 * @ring: ring index the IB is associated with
51 * @size: requested IB size
52 * @ib: IB object returned
54 * Request an IB (all asics). IBs are allocated using the
56 * Returns 0 on success, error on failure.
58 int amdgpu_ib_get(struct amdgpu_ring
*ring
, struct amdgpu_vm
*vm
,
59 unsigned size
, struct amdgpu_ib
*ib
)
61 struct amdgpu_device
*adev
= ring
->adev
;
65 r
= amdgpu_sa_bo_new(&adev
->ring_tmp_bo
,
66 &ib
->sa_bo
, size
, 256);
68 dev_err(adev
->dev
, "failed to get a new IB (%d)\n", r
);
72 ib
->ptr
= amdgpu_sa_bo_cpu_addr(ib
->sa_bo
);
75 ib
->gpu_addr
= amdgpu_sa_bo_gpu_addr(ib
->sa_bo
);
78 amdgpu_sync_create(&ib
->sync
);
87 * amdgpu_ib_free - free an IB (Indirect Buffer)
89 * @adev: amdgpu_device pointer
90 * @ib: IB object to free
92 * Free an IB (all asics).
94 void amdgpu_ib_free(struct amdgpu_device
*adev
, struct amdgpu_ib
*ib
)
96 amdgpu_sync_free(adev
, &ib
->sync
, &ib
->fence
->base
);
97 amdgpu_sa_bo_free(adev
, &ib
->sa_bo
, &ib
->fence
->base
);
99 fence_put(&ib
->fence
->base
);
103 * amdgpu_ib_schedule - schedule an IB (Indirect Buffer) on the ring
105 * @adev: amdgpu_device pointer
106 * @num_ibs: number of IBs to schedule
107 * @ibs: IB objects to schedule
108 * @owner: owner for creating the fences
110 * Schedule an IB on the associated ring (all asics).
111 * Returns 0 on success, error on failure.
113 * On SI, there are two parallel engines fed from the primary ring,
114 * the CE (Constant Engine) and the DE (Drawing Engine). Since
115 * resource descriptors have moved to memory, the CE allows you to
116 * prime the caches while the DE is updating register state so that
117 * the resource descriptors will be already in cache when the draw is
118 * processed. To accomplish this, the userspace driver submits two
119 * IBs, one for the CE and one for the DE. If there is a CE IB (called
120 * a CONST_IB), it will be put on the ring prior to the DE IB. Prior
121 * to SI there was just a DE IB.
123 int amdgpu_ib_schedule(struct amdgpu_device
*adev
, unsigned num_ibs
,
124 struct amdgpu_ib
*ibs
, void *owner
)
126 struct amdgpu_ib
*ib
= &ibs
[0];
127 struct amdgpu_ring
*ring
;
128 struct amdgpu_ctx
*ctx
, *old_ctx
;
129 struct amdgpu_vm
*vm
;
141 dev_err(adev
->dev
, "couldn't schedule ib\n");
144 r
= amdgpu_sync_wait(&ibs
->sync
);
146 dev_err(adev
->dev
, "IB sync failed (%d).\n", r
);
149 r
= amdgpu_ring_lock(ring
, (256 + AMDGPU_NUM_SYNCS
* 8) * num_ibs
);
151 dev_err(adev
->dev
, "scheduling IB failed (%d).\n", r
);
156 /* grab a vm id if necessary */
157 r
= amdgpu_vm_grab_id(ibs
->vm
, ibs
->ring
, &ibs
->sync
);
159 amdgpu_ring_unlock_undo(ring
);
164 r
= amdgpu_sync_rings(&ibs
->sync
, ring
);
166 amdgpu_ring_unlock_undo(ring
);
167 dev_err(adev
->dev
, "failed to sync rings (%d)\n", r
);
172 /* do context switch */
173 amdgpu_vm_flush(ring
, vm
, ib
->sync
.last_vm_update
);
175 if (ring
->funcs
->emit_gds_switch
)
176 amdgpu_ring_emit_gds_switch(ring
, ib
->vm
->ids
[ring
->idx
].id
,
177 ib
->gds_base
, ib
->gds_size
,
178 ib
->gws_base
, ib
->gws_size
,
179 ib
->oa_base
, ib
->oa_size
);
181 if (ring
->funcs
->emit_hdp_flush
)
182 amdgpu_ring_emit_hdp_flush(ring
);
185 old_ctx
= ring
->current_ctx
;
186 for (i
= 0; i
< num_ibs
; ++i
) {
189 if (ib
->ring
!= ring
|| ib
->ctx
!= ctx
|| ib
->vm
!= vm
) {
190 ring
->current_ctx
= old_ctx
;
191 amdgpu_ring_unlock_undo(ring
);
194 amdgpu_ring_emit_ib(ring
, ib
);
195 ring
->current_ctx
= ctx
;
198 r
= amdgpu_fence_emit(ring
, owner
, &ib
->fence
);
200 dev_err(adev
->dev
, "failed to emit fence (%d)\n", r
);
201 ring
->current_ctx
= old_ctx
;
202 amdgpu_ring_unlock_undo(ring
);
206 if (!amdgpu_enable_scheduler
&& ib
->ctx
)
207 ib
->sequence
= amdgpu_ctx_add_fence(ib
->ctx
, ring
,
210 /* wrap the last IB with fence */
212 uint64_t addr
= amdgpu_bo_gpu_offset(ib
->user
->bo
);
213 addr
+= ib
->user
->offset
;
214 amdgpu_ring_emit_fence(ring
, addr
, ib
->sequence
,
215 AMDGPU_FENCE_FLAG_64BIT
);
219 amdgpu_vm_fence(adev
, ib
->vm
, &ib
->fence
->base
);
221 amdgpu_ring_unlock_commit(ring
);
226 * amdgpu_ib_pool_init - Init the IB (Indirect Buffer) pool
228 * @adev: amdgpu_device pointer
230 * Initialize the suballocator to manage a pool of memory
231 * for use as IBs (all asics).
232 * Returns 0 on success, error on failure.
234 int amdgpu_ib_pool_init(struct amdgpu_device
*adev
)
238 if (adev
->ib_pool_ready
) {
241 r
= amdgpu_sa_bo_manager_init(adev
, &adev
->ring_tmp_bo
,
242 AMDGPU_IB_POOL_SIZE
*64*1024,
243 AMDGPU_GPU_PAGE_SIZE
,
244 AMDGPU_GEM_DOMAIN_GTT
);
249 r
= amdgpu_sa_bo_manager_start(adev
, &adev
->ring_tmp_bo
);
254 adev
->ib_pool_ready
= true;
255 if (amdgpu_debugfs_sa_init(adev
)) {
256 dev_err(adev
->dev
, "failed to register debugfs file for SA\n");
262 * amdgpu_ib_pool_fini - Free the IB (Indirect Buffer) pool
264 * @adev: amdgpu_device pointer
266 * Tear down the suballocator managing the pool of memory
267 * for use as IBs (all asics).
269 void amdgpu_ib_pool_fini(struct amdgpu_device
*adev
)
271 if (adev
->ib_pool_ready
) {
272 amdgpu_sa_bo_manager_suspend(adev
, &adev
->ring_tmp_bo
);
273 amdgpu_sa_bo_manager_fini(adev
, &adev
->ring_tmp_bo
);
274 adev
->ib_pool_ready
= false;
279 * amdgpu_ib_ring_tests - test IBs on the rings
281 * @adev: amdgpu_device pointer
283 * Test an IB (Indirect Buffer) on each ring.
284 * If the test fails, disable the ring.
285 * Returns 0 on success, error if the primary GFX ring
288 int amdgpu_ib_ring_tests(struct amdgpu_device
*adev
)
293 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
294 struct amdgpu_ring
*ring
= adev
->rings
[i
];
296 if (!ring
|| !ring
->ready
)
299 r
= amdgpu_ring_test_ib(ring
);
303 if (ring
== &adev
->gfx
.gfx_ring
[0]) {
304 /* oh, oh, that's really bad */
305 DRM_ERROR("amdgpu: failed testing IB on GFX ring (%d).\n", r
);
306 adev
->accel_working
= false;
310 /* still not good, but we can live with it */
311 DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i
, r
);
321 #if defined(CONFIG_DEBUG_FS)
323 static int amdgpu_debugfs_sa_info(struct seq_file
*m
, void *data
)
325 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
326 struct drm_device
*dev
= node
->minor
->dev
;
327 struct amdgpu_device
*adev
= dev
->dev_private
;
329 amdgpu_sa_bo_dump_debug_info(&adev
->ring_tmp_bo
, m
);
335 static struct drm_info_list amdgpu_debugfs_sa_list
[] = {
336 {"amdgpu_sa_info", &amdgpu_debugfs_sa_info
, 0, NULL
},
341 static int amdgpu_debugfs_sa_init(struct amdgpu_device
*adev
)
343 #if defined(CONFIG_DEBUG_FS)
344 return amdgpu_debugfs_add_files(adev
, amdgpu_debugfs_sa_list
, 1);