Linux 4.19.133
[linux/fpc-iii.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_ctx.c
blobdf69657610460a81104a343fff489a3c1695856e
1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: monk liu <monk.liu@amd.com>
25 #include <drm/drmP.h>
26 #include <drm/drm_auth.h>
27 #include "amdgpu.h"
28 #include "amdgpu_sched.h"
30 static int amdgpu_ctx_priority_permit(struct drm_file *filp,
31 enum drm_sched_priority priority)
33 /* NORMAL and below are accessible by everyone */
34 if (priority <= DRM_SCHED_PRIORITY_NORMAL)
35 return 0;
37 if (capable(CAP_SYS_NICE))
38 return 0;
40 if (drm_is_current_master(filp))
41 return 0;
43 return -EACCES;
46 static int amdgpu_ctx_init(struct amdgpu_device *adev,
47 enum drm_sched_priority priority,
48 struct drm_file *filp,
49 struct amdgpu_ctx *ctx)
51 unsigned i, j;
52 int r;
54 if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
55 return -EINVAL;
57 r = amdgpu_ctx_priority_permit(filp, priority);
58 if (r)
59 return r;
61 memset(ctx, 0, sizeof(*ctx));
62 ctx->adev = adev;
63 kref_init(&ctx->refcount);
64 spin_lock_init(&ctx->ring_lock);
65 ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS,
66 sizeof(struct dma_fence*), GFP_KERNEL);
67 if (!ctx->fences)
68 return -ENOMEM;
70 mutex_init(&ctx->lock);
72 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
73 ctx->rings[i].sequence = 1;
74 ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i];
77 ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
78 ctx->reset_counter_query = ctx->reset_counter;
79 ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
80 ctx->init_priority = priority;
81 ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
83 /* create context entity for each ring */
84 for (i = 0; i < adev->num_rings; i++) {
85 struct amdgpu_ring *ring = adev->rings[i];
86 struct drm_sched_rq *rq;
88 rq = &ring->sched.sched_rq[priority];
90 if (ring == &adev->gfx.kiq.ring)
91 continue;
93 r = drm_sched_entity_init(&ctx->rings[i].entity,
94 &rq, 1, &ctx->guilty);
95 if (r)
96 goto failed;
99 r = amdgpu_queue_mgr_init(adev, &ctx->queue_mgr);
100 if (r)
101 goto failed;
103 return 0;
105 failed:
106 for (j = 0; j < i; j++)
107 drm_sched_entity_destroy(&ctx->rings[j].entity);
108 kfree(ctx->fences);
109 ctx->fences = NULL;
110 return r;
113 static void amdgpu_ctx_fini(struct kref *ref)
115 struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
116 struct amdgpu_device *adev = ctx->adev;
117 unsigned i, j;
119 if (!adev)
120 return;
122 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
123 for (j = 0; j < amdgpu_sched_jobs; ++j)
124 dma_fence_put(ctx->rings[i].fences[j]);
125 kfree(ctx->fences);
126 ctx->fences = NULL;
128 amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr);
130 mutex_destroy(&ctx->lock);
132 kfree(ctx);
135 static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
136 struct amdgpu_fpriv *fpriv,
137 struct drm_file *filp,
138 enum drm_sched_priority priority,
139 uint32_t *id)
141 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
142 struct amdgpu_ctx *ctx;
143 int r;
145 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
146 if (!ctx)
147 return -ENOMEM;
149 mutex_lock(&mgr->lock);
150 r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
151 if (r < 0) {
152 mutex_unlock(&mgr->lock);
153 kfree(ctx);
154 return r;
157 *id = (uint32_t)r;
158 r = amdgpu_ctx_init(adev, priority, filp, ctx);
159 if (r) {
160 idr_remove(&mgr->ctx_handles, *id);
161 *id = 0;
162 kfree(ctx);
164 mutex_unlock(&mgr->lock);
165 return r;
168 static void amdgpu_ctx_do_release(struct kref *ref)
170 struct amdgpu_ctx *ctx;
171 u32 i;
173 ctx = container_of(ref, struct amdgpu_ctx, refcount);
175 for (i = 0; i < ctx->adev->num_rings; i++) {
177 if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
178 continue;
180 drm_sched_entity_destroy(&ctx->rings[i].entity);
183 amdgpu_ctx_fini(ref);
186 static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
188 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
189 struct amdgpu_ctx *ctx;
191 mutex_lock(&mgr->lock);
192 ctx = idr_remove(&mgr->ctx_handles, id);
193 if (ctx)
194 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
195 mutex_unlock(&mgr->lock);
196 return ctx ? 0 : -EINVAL;
199 static int amdgpu_ctx_query(struct amdgpu_device *adev,
200 struct amdgpu_fpriv *fpriv, uint32_t id,
201 union drm_amdgpu_ctx_out *out)
203 struct amdgpu_ctx *ctx;
204 struct amdgpu_ctx_mgr *mgr;
205 unsigned reset_counter;
207 if (!fpriv)
208 return -EINVAL;
210 mgr = &fpriv->ctx_mgr;
211 mutex_lock(&mgr->lock);
212 ctx = idr_find(&mgr->ctx_handles, id);
213 if (!ctx) {
214 mutex_unlock(&mgr->lock);
215 return -EINVAL;
218 /* TODO: these two are always zero */
219 out->state.flags = 0x0;
220 out->state.hangs = 0x0;
222 /* determine if a GPU reset has occured since the last call */
223 reset_counter = atomic_read(&adev->gpu_reset_counter);
224 /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
225 if (ctx->reset_counter_query == reset_counter)
226 out->state.reset_status = AMDGPU_CTX_NO_RESET;
227 else
228 out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
229 ctx->reset_counter_query = reset_counter;
231 mutex_unlock(&mgr->lock);
232 return 0;
235 static int amdgpu_ctx_query2(struct amdgpu_device *adev,
236 struct amdgpu_fpriv *fpriv, uint32_t id,
237 union drm_amdgpu_ctx_out *out)
239 struct amdgpu_ctx *ctx;
240 struct amdgpu_ctx_mgr *mgr;
242 if (!fpriv)
243 return -EINVAL;
245 mgr = &fpriv->ctx_mgr;
246 mutex_lock(&mgr->lock);
247 ctx = idr_find(&mgr->ctx_handles, id);
248 if (!ctx) {
249 mutex_unlock(&mgr->lock);
250 return -EINVAL;
253 out->state.flags = 0x0;
254 out->state.hangs = 0x0;
256 if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))
257 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;
259 if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
260 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;
262 if (atomic_read(&ctx->guilty))
263 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
265 mutex_unlock(&mgr->lock);
266 return 0;
269 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
270 struct drm_file *filp)
272 int r;
273 uint32_t id;
274 enum drm_sched_priority priority;
276 union drm_amdgpu_ctx *args = data;
277 struct amdgpu_device *adev = dev->dev_private;
278 struct amdgpu_fpriv *fpriv = filp->driver_priv;
280 r = 0;
281 id = args->in.ctx_id;
282 priority = amdgpu_to_sched_priority(args->in.priority);
284 /* For backwards compatibility reasons, we need to accept
285 * ioctls with garbage in the priority field */
286 if (priority == DRM_SCHED_PRIORITY_INVALID)
287 priority = DRM_SCHED_PRIORITY_NORMAL;
289 switch (args->in.op) {
290 case AMDGPU_CTX_OP_ALLOC_CTX:
291 r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
292 args->out.alloc.ctx_id = id;
293 break;
294 case AMDGPU_CTX_OP_FREE_CTX:
295 r = amdgpu_ctx_free(fpriv, id);
296 break;
297 case AMDGPU_CTX_OP_QUERY_STATE:
298 r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
299 break;
300 case AMDGPU_CTX_OP_QUERY_STATE2:
301 r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
302 break;
303 default:
304 return -EINVAL;
307 return r;
310 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
312 struct amdgpu_ctx *ctx;
313 struct amdgpu_ctx_mgr *mgr;
315 if (!fpriv)
316 return NULL;
318 mgr = &fpriv->ctx_mgr;
320 mutex_lock(&mgr->lock);
321 ctx = idr_find(&mgr->ctx_handles, id);
322 if (ctx)
323 kref_get(&ctx->refcount);
324 mutex_unlock(&mgr->lock);
325 return ctx;
328 int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
330 if (ctx == NULL)
331 return -EINVAL;
333 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
334 return 0;
337 int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
338 struct dma_fence *fence, uint64_t* handler)
340 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
341 uint64_t seq = cring->sequence;
342 unsigned idx = 0;
343 struct dma_fence *other = NULL;
345 idx = seq & (amdgpu_sched_jobs - 1);
346 other = cring->fences[idx];
347 if (other)
348 BUG_ON(!dma_fence_is_signaled(other));
350 dma_fence_get(fence);
352 spin_lock(&ctx->ring_lock);
353 cring->fences[idx] = fence;
354 cring->sequence++;
355 spin_unlock(&ctx->ring_lock);
357 dma_fence_put(other);
358 if (handler)
359 *handler = seq;
361 return 0;
364 struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
365 struct amdgpu_ring *ring, uint64_t seq)
367 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
368 struct dma_fence *fence;
370 spin_lock(&ctx->ring_lock);
372 if (seq == ~0ull)
373 seq = ctx->rings[ring->idx].sequence - 1;
375 if (seq >= cring->sequence) {
376 spin_unlock(&ctx->ring_lock);
377 return ERR_PTR(-EINVAL);
381 if (seq + amdgpu_sched_jobs < cring->sequence) {
382 spin_unlock(&ctx->ring_lock);
383 return NULL;
386 fence = dma_fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]);
387 spin_unlock(&ctx->ring_lock);
389 return fence;
392 void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
393 enum drm_sched_priority priority)
395 int i;
396 struct amdgpu_device *adev = ctx->adev;
397 struct drm_sched_rq *rq;
398 struct drm_sched_entity *entity;
399 struct amdgpu_ring *ring;
400 enum drm_sched_priority ctx_prio;
402 ctx->override_priority = priority;
404 ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
405 ctx->init_priority : ctx->override_priority;
407 for (i = 0; i < adev->num_rings; i++) {
408 ring = adev->rings[i];
409 entity = &ctx->rings[i].entity;
410 rq = &ring->sched.sched_rq[ctx_prio];
412 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
413 continue;
415 drm_sched_entity_set_rq(entity, rq);
419 int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id)
421 struct amdgpu_ctx_ring *cring = &ctx->rings[ring_id];
422 unsigned idx = cring->sequence & (amdgpu_sched_jobs - 1);
423 struct dma_fence *other = cring->fences[idx];
425 if (other) {
426 signed long r;
427 r = dma_fence_wait(other, true);
428 if (r < 0) {
429 if (r != -ERESTARTSYS)
430 DRM_ERROR("Error (%ld) waiting for fence!\n", r);
432 return r;
436 return 0;
439 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
441 mutex_init(&mgr->lock);
442 idr_init(&mgr->ctx_handles);
445 void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr)
447 struct amdgpu_ctx *ctx;
448 struct idr *idp;
449 uint32_t id, i;
450 long max_wait = MAX_WAIT_SCHED_ENTITY_Q_EMPTY;
452 idp = &mgr->ctx_handles;
454 mutex_lock(&mgr->lock);
455 idr_for_each_entry(idp, ctx, id) {
457 if (!ctx->adev) {
458 mutex_unlock(&mgr->lock);
459 return;
462 for (i = 0; i < ctx->adev->num_rings; i++) {
464 if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
465 continue;
467 max_wait = drm_sched_entity_flush(&ctx->rings[i].entity,
468 max_wait);
471 mutex_unlock(&mgr->lock);
474 void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
476 struct amdgpu_ctx *ctx;
477 struct idr *idp;
478 uint32_t id, i;
480 idp = &mgr->ctx_handles;
482 idr_for_each_entry(idp, ctx, id) {
484 if (!ctx->adev)
485 return;
487 for (i = 0; i < ctx->adev->num_rings; i++) {
489 if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
490 continue;
492 if (kref_read(&ctx->refcount) == 1)
493 drm_sched_entity_fini(&ctx->rings[i].entity);
494 else
495 DRM_ERROR("ctx %p is still alive\n", ctx);
500 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
502 struct amdgpu_ctx *ctx;
503 struct idr *idp;
504 uint32_t id;
506 amdgpu_ctx_mgr_entity_fini(mgr);
508 idp = &mgr->ctx_handles;
510 idr_for_each_entry(idp, ctx, id) {
511 if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
512 DRM_ERROR("ctx %p is still alive\n", ctx);
515 idr_destroy(&mgr->ctx_handles);
516 mutex_destroy(&mgr->lock);