treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / msm / msm_gem_submit.c
blob385d4965a8d0755c917a1466e793190db0656a9e
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
7 #include <linux/file.h>
8 #include <linux/sync_file.h>
9 #include <linux/uaccess.h>
11 #include <drm/drm_file.h>
13 #include "msm_drv.h"
14 #include "msm_gpu.h"
15 #include "msm_gem.h"
16 #include "msm_gpu_trace.h"
19 * Cmdstream submission:
22 /* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
23 #define BO_VALID 0x8000 /* is current addr in cmdstream correct/valid? */
24 #define BO_LOCKED 0x4000
25 #define BO_PINNED 0x2000
27 static struct msm_gem_submit *submit_create(struct drm_device *dev,
28 struct msm_gpu *gpu, struct msm_gem_address_space *aspace,
29 struct msm_gpu_submitqueue *queue, uint32_t nr_bos,
30 uint32_t nr_cmds)
32 struct msm_gem_submit *submit;
33 uint64_t sz = struct_size(submit, bos, nr_bos) +
34 ((u64)nr_cmds * sizeof(submit->cmd[0]));
36 if (sz > SIZE_MAX)
37 return NULL;
39 submit = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
40 if (!submit)
41 return NULL;
43 submit->dev = dev;
44 submit->aspace = aspace;
45 submit->gpu = gpu;
46 submit->fence = NULL;
47 submit->cmd = (void *)&submit->bos[nr_bos];
48 submit->queue = queue;
49 submit->ring = gpu->rb[queue->prio];
51 /* initially, until copy_from_user() and bo lookup succeeds: */
52 submit->nr_bos = 0;
53 submit->nr_cmds = 0;
55 INIT_LIST_HEAD(&submit->node);
56 INIT_LIST_HEAD(&submit->bo_list);
58 return submit;
61 void msm_gem_submit_free(struct msm_gem_submit *submit)
63 dma_fence_put(submit->fence);
64 list_del(&submit->node);
65 put_pid(submit->pid);
66 msm_submitqueue_put(submit->queue);
68 kfree(submit);
71 static int submit_lookup_objects(struct msm_gem_submit *submit,
72 struct drm_msm_gem_submit *args, struct drm_file *file)
74 unsigned i;
75 int ret = 0;
77 for (i = 0; i < args->nr_bos; i++) {
78 struct drm_msm_gem_submit_bo submit_bo;
79 void __user *userptr =
80 u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
82 /* make sure we don't have garbage flags, in case we hit
83 * error path before flags is initialized:
85 submit->bos[i].flags = 0;
87 if (copy_from_user(&submit_bo, userptr, sizeof(submit_bo))) {
88 ret = -EFAULT;
89 i = 0;
90 goto out;
93 /* at least one of READ and/or WRITE flags should be set: */
94 #define MANDATORY_FLAGS (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
96 if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
97 !(submit_bo.flags & MANDATORY_FLAGS)) {
98 DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
99 ret = -EINVAL;
100 i = 0;
101 goto out;
104 submit->bos[i].handle = submit_bo.handle;
105 submit->bos[i].flags = submit_bo.flags;
106 /* in validate_objects() we figure out if this is true: */
107 submit->bos[i].iova = submit_bo.presumed;
110 spin_lock(&file->table_lock);
112 for (i = 0; i < args->nr_bos; i++) {
113 struct drm_gem_object *obj;
114 struct msm_gem_object *msm_obj;
116 /* normally use drm_gem_object_lookup(), but for bulk lookup
117 * all under single table_lock just hit object_idr directly:
119 obj = idr_find(&file->object_idr, submit->bos[i].handle);
120 if (!obj) {
121 DRM_ERROR("invalid handle %u at index %u\n", submit->bos[i].handle, i);
122 ret = -EINVAL;
123 goto out_unlock;
126 msm_obj = to_msm_bo(obj);
128 if (!list_empty(&msm_obj->submit_entry)) {
129 DRM_ERROR("handle %u at index %u already on submit list\n",
130 submit->bos[i].handle, i);
131 ret = -EINVAL;
132 goto out_unlock;
135 drm_gem_object_get(obj);
137 submit->bos[i].obj = msm_obj;
139 list_add_tail(&msm_obj->submit_entry, &submit->bo_list);
142 out_unlock:
143 spin_unlock(&file->table_lock);
145 out:
146 submit->nr_bos = i;
148 return ret;
151 static void submit_unlock_unpin_bo(struct msm_gem_submit *submit,
152 int i, bool backoff)
154 struct msm_gem_object *msm_obj = submit->bos[i].obj;
156 if (submit->bos[i].flags & BO_PINNED)
157 msm_gem_unpin_iova(&msm_obj->base, submit->aspace);
159 if (submit->bos[i].flags & BO_LOCKED)
160 dma_resv_unlock(msm_obj->base.resv);
162 if (backoff && !(submit->bos[i].flags & BO_VALID))
163 submit->bos[i].iova = 0;
165 submit->bos[i].flags &= ~(BO_LOCKED | BO_PINNED);
168 /* This is where we make sure all the bo's are reserved and pin'd: */
169 static int submit_lock_objects(struct msm_gem_submit *submit)
171 int contended, slow_locked = -1, i, ret = 0;
173 retry:
174 for (i = 0; i < submit->nr_bos; i++) {
175 struct msm_gem_object *msm_obj = submit->bos[i].obj;
177 if (slow_locked == i)
178 slow_locked = -1;
180 contended = i;
182 if (!(submit->bos[i].flags & BO_LOCKED)) {
183 ret = dma_resv_lock_interruptible(msm_obj->base.resv,
184 &submit->ticket);
185 if (ret)
186 goto fail;
187 submit->bos[i].flags |= BO_LOCKED;
191 ww_acquire_done(&submit->ticket);
193 return 0;
195 fail:
196 for (; i >= 0; i--)
197 submit_unlock_unpin_bo(submit, i, true);
199 if (slow_locked > 0)
200 submit_unlock_unpin_bo(submit, slow_locked, true);
202 if (ret == -EDEADLK) {
203 struct msm_gem_object *msm_obj = submit->bos[contended].obj;
204 /* we lost out in a seqno race, lock and retry.. */
205 ret = dma_resv_lock_slow_interruptible(msm_obj->base.resv,
206 &submit->ticket);
207 if (!ret) {
208 submit->bos[contended].flags |= BO_LOCKED;
209 slow_locked = contended;
210 goto retry;
214 return ret;
217 static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
219 int i, ret = 0;
221 for (i = 0; i < submit->nr_bos; i++) {
222 struct msm_gem_object *msm_obj = submit->bos[i].obj;
223 bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
225 if (!write) {
226 /* NOTE: _reserve_shared() must happen before
227 * _add_shared_fence(), which makes this a slightly
228 * strange place to call it. OTOH this is a
229 * convenient can-fail point to hook it in.
231 ret = dma_resv_reserve_shared(msm_obj->base.resv,
233 if (ret)
234 return ret;
237 if (no_implicit)
238 continue;
240 ret = msm_gem_sync_object(&msm_obj->base, submit->ring->fctx,
241 write);
242 if (ret)
243 break;
246 return ret;
249 static int submit_pin_objects(struct msm_gem_submit *submit)
251 int i, ret = 0;
253 submit->valid = true;
255 for (i = 0; i < submit->nr_bos; i++) {
256 struct msm_gem_object *msm_obj = submit->bos[i].obj;
257 uint64_t iova;
259 /* if locking succeeded, pin bo: */
260 ret = msm_gem_get_and_pin_iova(&msm_obj->base,
261 submit->aspace, &iova);
263 if (ret)
264 break;
266 submit->bos[i].flags |= BO_PINNED;
268 if (iova == submit->bos[i].iova) {
269 submit->bos[i].flags |= BO_VALID;
270 } else {
271 submit->bos[i].iova = iova;
272 /* iova changed, so address in cmdstream is not valid: */
273 submit->bos[i].flags &= ~BO_VALID;
274 submit->valid = false;
278 return ret;
281 static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
282 struct msm_gem_object **obj, uint64_t *iova, bool *valid)
284 if (idx >= submit->nr_bos) {
285 DRM_ERROR("invalid buffer index: %u (out of %u)\n",
286 idx, submit->nr_bos);
287 return -EINVAL;
290 if (obj)
291 *obj = submit->bos[idx].obj;
292 if (iova)
293 *iova = submit->bos[idx].iova;
294 if (valid)
295 *valid = !!(submit->bos[idx].flags & BO_VALID);
297 return 0;
300 /* process the reloc's and patch up the cmdstream as needed: */
301 static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj,
302 uint32_t offset, uint32_t nr_relocs, uint64_t relocs)
304 uint32_t i, last_offset = 0;
305 uint32_t *ptr;
306 int ret = 0;
308 if (!nr_relocs)
309 return 0;
311 if (offset % 4) {
312 DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
313 return -EINVAL;
316 /* For now, just map the entire thing. Eventually we probably
317 * to do it page-by-page, w/ kmap() if not vmap()d..
319 ptr = msm_gem_get_vaddr(&obj->base);
321 if (IS_ERR(ptr)) {
322 ret = PTR_ERR(ptr);
323 DBG("failed to map: %d", ret);
324 return ret;
327 for (i = 0; i < nr_relocs; i++) {
328 struct drm_msm_gem_submit_reloc submit_reloc;
329 void __user *userptr =
330 u64_to_user_ptr(relocs + (i * sizeof(submit_reloc)));
331 uint32_t off;
332 uint64_t iova;
333 bool valid;
335 if (copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc))) {
336 ret = -EFAULT;
337 goto out;
340 if (submit_reloc.submit_offset % 4) {
341 DRM_ERROR("non-aligned reloc offset: %u\n",
342 submit_reloc.submit_offset);
343 ret = -EINVAL;
344 goto out;
347 /* offset in dwords: */
348 off = submit_reloc.submit_offset / 4;
350 if ((off >= (obj->base.size / 4)) ||
351 (off < last_offset)) {
352 DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
353 ret = -EINVAL;
354 goto out;
357 ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
358 if (ret)
359 goto out;
361 if (valid)
362 continue;
364 iova += submit_reloc.reloc_offset;
366 if (submit_reloc.shift < 0)
367 iova >>= -submit_reloc.shift;
368 else
369 iova <<= submit_reloc.shift;
371 ptr[off] = iova | submit_reloc.or;
373 last_offset = off;
376 out:
377 msm_gem_put_vaddr(&obj->base);
379 return ret;
382 static void submit_cleanup(struct msm_gem_submit *submit)
384 unsigned i;
386 for (i = 0; i < submit->nr_bos; i++) {
387 struct msm_gem_object *msm_obj = submit->bos[i].obj;
388 submit_unlock_unpin_bo(submit, i, false);
389 list_del_init(&msm_obj->submit_entry);
390 drm_gem_object_put(&msm_obj->base);
394 int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
395 struct drm_file *file)
397 static atomic_t ident = ATOMIC_INIT(0);
398 struct msm_drm_private *priv = dev->dev_private;
399 struct drm_msm_gem_submit *args = data;
400 struct msm_file_private *ctx = file->driver_priv;
401 struct msm_gem_submit *submit;
402 struct msm_gpu *gpu = priv->gpu;
403 struct sync_file *sync_file = NULL;
404 struct msm_gpu_submitqueue *queue;
405 struct msm_ringbuffer *ring;
406 int out_fence_fd = -1;
407 struct pid *pid = get_pid(task_pid(current));
408 bool has_ww_ticket = false;
409 unsigned i;
410 int ret, submitid;
411 if (!gpu)
412 return -ENXIO;
414 /* for now, we just have 3d pipe.. eventually this would need to
415 * be more clever to dispatch to appropriate gpu module:
417 if (MSM_PIPE_ID(args->flags) != MSM_PIPE_3D0)
418 return -EINVAL;
420 if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
421 return -EINVAL;
423 if (args->flags & MSM_SUBMIT_SUDO) {
424 if (!IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) ||
425 !capable(CAP_SYS_RAWIO))
426 return -EINVAL;
429 queue = msm_submitqueue_get(ctx, args->queueid);
430 if (!queue)
431 return -ENOENT;
433 /* Get a unique identifier for the submission for logging purposes */
434 submitid = atomic_inc_return(&ident) - 1;
436 ring = gpu->rb[queue->prio];
437 trace_msm_gpu_submit(pid_nr(pid), ring->id, submitid,
438 args->nr_bos, args->nr_cmds);
440 if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
441 struct dma_fence *in_fence;
443 in_fence = sync_file_get_fence(args->fence_fd);
445 if (!in_fence)
446 return -EINVAL;
449 * Wait if the fence is from a foreign context, or if the fence
450 * array contains any fence from a foreign context.
452 ret = 0;
453 if (!dma_fence_match_context(in_fence, ring->fctx->context))
454 ret = dma_fence_wait(in_fence, true);
456 dma_fence_put(in_fence);
457 if (ret)
458 return ret;
461 ret = mutex_lock_interruptible(&dev->struct_mutex);
462 if (ret)
463 return ret;
465 if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
466 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
467 if (out_fence_fd < 0) {
468 ret = out_fence_fd;
469 goto out_unlock;
473 submit = submit_create(dev, gpu, ctx->aspace, queue, args->nr_bos,
474 args->nr_cmds);
475 if (!submit) {
476 ret = -ENOMEM;
477 goto out_unlock;
480 submit->pid = pid;
481 submit->ident = submitid;
483 if (args->flags & MSM_SUBMIT_SUDO)
484 submit->in_rb = true;
486 ret = submit_lookup_objects(submit, args, file);
487 if (ret)
488 goto out;
490 /* copy_*_user while holding a ww ticket upsets lockdep */
491 ww_acquire_init(&submit->ticket, &reservation_ww_class);
492 has_ww_ticket = true;
493 ret = submit_lock_objects(submit);
494 if (ret)
495 goto out;
497 ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT));
498 if (ret)
499 goto out;
501 ret = submit_pin_objects(submit);
502 if (ret)
503 goto out;
505 for (i = 0; i < args->nr_cmds; i++) {
506 struct drm_msm_gem_submit_cmd submit_cmd;
507 void __user *userptr =
508 u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
509 struct msm_gem_object *msm_obj;
510 uint64_t iova;
512 ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
513 if (ret) {
514 ret = -EFAULT;
515 goto out;
518 /* validate input from userspace: */
519 switch (submit_cmd.type) {
520 case MSM_SUBMIT_CMD_BUF:
521 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
522 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
523 break;
524 default:
525 DRM_ERROR("invalid type: %08x\n", submit_cmd.type);
526 ret = -EINVAL;
527 goto out;
530 ret = submit_bo(submit, submit_cmd.submit_idx,
531 &msm_obj, &iova, NULL);
532 if (ret)
533 goto out;
535 if (submit_cmd.size % 4) {
536 DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
537 submit_cmd.size);
538 ret = -EINVAL;
539 goto out;
542 if (!submit_cmd.size ||
543 ((submit_cmd.size + submit_cmd.submit_offset) >
544 msm_obj->base.size)) {
545 DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
546 ret = -EINVAL;
547 goto out;
550 submit->cmd[i].type = submit_cmd.type;
551 submit->cmd[i].size = submit_cmd.size / 4;
552 submit->cmd[i].iova = iova + submit_cmd.submit_offset;
553 submit->cmd[i].idx = submit_cmd.submit_idx;
555 if (submit->valid)
556 continue;
558 ret = submit_reloc(submit, msm_obj, submit_cmd.submit_offset,
559 submit_cmd.nr_relocs, submit_cmd.relocs);
560 if (ret)
561 goto out;
564 submit->nr_cmds = i;
566 submit->fence = msm_fence_alloc(ring->fctx);
567 if (IS_ERR(submit->fence)) {
568 ret = PTR_ERR(submit->fence);
569 submit->fence = NULL;
570 goto out;
573 if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
574 sync_file = sync_file_create(submit->fence);
575 if (!sync_file) {
576 ret = -ENOMEM;
577 goto out;
581 msm_gpu_submit(gpu, submit, ctx);
583 args->fence = submit->fence->seqno;
585 if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
586 fd_install(out_fence_fd, sync_file->file);
587 args->fence_fd = out_fence_fd;
590 out:
591 submit_cleanup(submit);
592 if (has_ww_ticket)
593 ww_acquire_fini(&submit->ticket);
594 if (ret)
595 msm_gem_submit_free(submit);
596 out_unlock:
597 if (ret && (out_fence_fd >= 0))
598 put_unused_fd(out_fence_fd);
599 mutex_unlock(&dev->struct_mutex);
600 return ret;