2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
23 * Cmdstream submission:
26 /* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
27 #define BO_VALID 0x8000
28 #define BO_LOCKED 0x4000
29 #define BO_PINNED 0x2000
31 static inline void __user
*to_user_ptr(u64 address
)
33 return (void __user
*)(uintptr_t)address
;
36 static struct msm_gem_submit
*submit_create(struct drm_device
*dev
,
37 struct msm_gpu
*gpu
, int nr
)
39 struct msm_gem_submit
*submit
;
40 int sz
= sizeof(*submit
) + (nr
* sizeof(submit
->bos
[0]));
42 submit
= kmalloc(sz
, GFP_TEMPORARY
| __GFP_NOWARN
| __GFP_NORETRY
);
47 /* initially, until copy_from_user() and bo lookup succeeds: */
51 INIT_LIST_HEAD(&submit
->bo_list
);
52 ww_acquire_init(&submit
->ticket
, &reservation_ww_class
);
58 static int submit_lookup_objects(struct msm_gem_submit
*submit
,
59 struct drm_msm_gem_submit
*args
, struct drm_file
*file
)
64 spin_lock(&file
->table_lock
);
66 for (i
= 0; i
< args
->nr_bos
; i
++) {
67 struct drm_msm_gem_submit_bo submit_bo
;
68 struct drm_gem_object
*obj
;
69 struct msm_gem_object
*msm_obj
;
70 void __user
*userptr
=
71 to_user_ptr(args
->bos
+ (i
* sizeof(submit_bo
)));
73 ret
= copy_from_user(&submit_bo
, userptr
, sizeof(submit_bo
));
79 if (submit_bo
.flags
& ~MSM_SUBMIT_BO_FLAGS
) {
80 DRM_ERROR("invalid flags: %x\n", submit_bo
.flags
);
85 submit
->bos
[i
].flags
= submit_bo
.flags
;
86 /* in validate_objects() we figure out if this is true: */
87 submit
->bos
[i
].iova
= submit_bo
.presumed
;
89 /* normally use drm_gem_object_lookup(), but for bulk lookup
90 * all under single table_lock just hit object_idr directly:
92 obj
= idr_find(&file
->object_idr
, submit_bo
.handle
);
94 DRM_ERROR("invalid handle %u at index %u\n", submit_bo
.handle
, i
);
99 msm_obj
= to_msm_bo(obj
);
101 if (!list_empty(&msm_obj
->submit_entry
)) {
102 DRM_ERROR("handle %u at index %u already on submit list\n",
103 submit_bo
.handle
, i
);
108 drm_gem_object_reference(obj
);
110 submit
->bos
[i
].obj
= msm_obj
;
112 list_add_tail(&msm_obj
->submit_entry
, &submit
->bo_list
);
117 spin_unlock(&file
->table_lock
);
122 static void submit_unlock_unpin_bo(struct msm_gem_submit
*submit
, int i
)
124 struct msm_gem_object
*msm_obj
= submit
->bos
[i
].obj
;
126 if (submit
->bos
[i
].flags
& BO_PINNED
)
127 msm_gem_put_iova(&msm_obj
->base
, submit
->gpu
->id
);
129 if (submit
->bos
[i
].flags
& BO_LOCKED
)
130 ww_mutex_unlock(&msm_obj
->resv
->lock
);
132 if (!(submit
->bos
[i
].flags
& BO_VALID
))
133 submit
->bos
[i
].iova
= 0;
135 submit
->bos
[i
].flags
&= ~(BO_LOCKED
| BO_PINNED
);
138 /* This is where we make sure all the bo's are reserved and pin'd: */
139 static int submit_validate_objects(struct msm_gem_submit
*submit
)
141 int contended
, slow_locked
= -1, i
, ret
= 0;
144 submit
->valid
= true;
146 for (i
= 0; i
< submit
->nr_bos
; i
++) {
147 struct msm_gem_object
*msm_obj
= submit
->bos
[i
].obj
;
150 if (slow_locked
== i
)
155 if (!(submit
->bos
[i
].flags
& BO_LOCKED
)) {
156 ret
= ww_mutex_lock_interruptible(&msm_obj
->resv
->lock
,
160 submit
->bos
[i
].flags
|= BO_LOCKED
;
164 /* if locking succeeded, pin bo: */
165 ret
= msm_gem_get_iova_locked(&msm_obj
->base
,
166 submit
->gpu
->id
, &iova
);
168 /* this would break the logic in the fail path.. there is no
169 * reason for this to happen, but just to be on the safe side
170 * let's notice if this starts happening in the future:
172 WARN_ON(ret
== -EDEADLK
);
177 submit
->bos
[i
].flags
|= BO_PINNED
;
179 if (iova
== submit
->bos
[i
].iova
) {
180 submit
->bos
[i
].flags
|= BO_VALID
;
182 submit
->bos
[i
].iova
= iova
;
183 submit
->bos
[i
].flags
&= ~BO_VALID
;
184 submit
->valid
= false;
188 ww_acquire_done(&submit
->ticket
);
194 submit_unlock_unpin_bo(submit
, i
);
197 submit_unlock_unpin_bo(submit
, slow_locked
);
199 if (ret
== -EDEADLK
) {
200 struct msm_gem_object
*msm_obj
= submit
->bos
[contended
].obj
;
201 /* we lost out in a seqno race, lock and retry.. */
202 ret
= ww_mutex_lock_slow_interruptible(&msm_obj
->resv
->lock
,
205 submit
->bos
[contended
].flags
|= BO_LOCKED
;
206 slow_locked
= contended
;
214 static int submit_bo(struct msm_gem_submit
*submit
, uint32_t idx
,
215 struct msm_gem_object
**obj
, uint32_t *iova
, bool *valid
)
217 if (idx
>= submit
->nr_bos
) {
218 DRM_ERROR("invalid buffer index: %u (out of %u)\n",
219 idx
, submit
->nr_bos
);
224 *obj
= submit
->bos
[idx
].obj
;
226 *iova
= submit
->bos
[idx
].iova
;
228 *valid
= !!(submit
->bos
[idx
].flags
& BO_VALID
);
233 /* process the reloc's and patch up the cmdstream as needed: */
234 static int submit_reloc(struct msm_gem_submit
*submit
, struct msm_gem_object
*obj
,
235 uint32_t offset
, uint32_t nr_relocs
, uint64_t relocs
)
237 uint32_t i
, last_offset
= 0;
242 DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset
);
246 /* For now, just map the entire thing. Eventually we probably
247 * to do it page-by-page, w/ kmap() if not vmap()d..
249 ptr
= msm_gem_vaddr_locked(&obj
->base
);
253 DBG("failed to map: %d", ret
);
257 for (i
= 0; i
< nr_relocs
; i
++) {
258 struct drm_msm_gem_submit_reloc submit_reloc
;
259 void __user
*userptr
=
260 to_user_ptr(relocs
+ (i
* sizeof(submit_reloc
)));
264 ret
= copy_from_user(&submit_reloc
, userptr
, sizeof(submit_reloc
));
268 if (submit_reloc
.submit_offset
% 4) {
269 DRM_ERROR("non-aligned reloc offset: %u\n",
270 submit_reloc
.submit_offset
);
274 /* offset in dwords: */
275 off
= submit_reloc
.submit_offset
/ 4;
277 if ((off
>= (obj
->base
.size
/ 4)) ||
278 (off
< last_offset
)) {
279 DRM_ERROR("invalid offset %u at reloc %u\n", off
, i
);
283 ret
= submit_bo(submit
, submit_reloc
.reloc_idx
, NULL
, &iova
, &valid
);
290 iova
+= submit_reloc
.reloc_offset
;
292 if (submit_reloc
.shift
< 0)
293 iova
>>= -submit_reloc
.shift
;
295 iova
<<= submit_reloc
.shift
;
297 ptr
[off
] = iova
| submit_reloc
.or;
305 static void submit_cleanup(struct msm_gem_submit
*submit
, bool fail
)
309 for (i
= 0; i
< submit
->nr_bos
; i
++) {
310 struct msm_gem_object
*msm_obj
= submit
->bos
[i
].obj
;
311 submit_unlock_unpin_bo(submit
, i
);
312 list_del_init(&msm_obj
->submit_entry
);
313 drm_gem_object_unreference(&msm_obj
->base
);
316 ww_acquire_fini(&submit
->ticket
);
320 int msm_ioctl_gem_submit(struct drm_device
*dev
, void *data
,
321 struct drm_file
*file
)
323 struct msm_drm_private
*priv
= dev
->dev_private
;
324 struct drm_msm_gem_submit
*args
= data
;
325 struct msm_file_private
*ctx
= file
->driver_priv
;
326 struct msm_gem_submit
*submit
;
331 /* for now, we just have 3d pipe.. eventually this would need to
332 * be more clever to dispatch to appropriate gpu module:
334 if (args
->pipe
!= MSM_PIPE_3D0
)
339 if (args
->nr_cmds
> MAX_CMDS
)
342 mutex_lock(&dev
->struct_mutex
);
344 submit
= submit_create(dev
, gpu
, args
->nr_bos
);
350 ret
= submit_lookup_objects(submit
, args
, file
);
354 ret
= submit_validate_objects(submit
);
358 for (i
= 0; i
< args
->nr_cmds
; i
++) {
359 struct drm_msm_gem_submit_cmd submit_cmd
;
360 void __user
*userptr
=
361 to_user_ptr(args
->cmds
+ (i
* sizeof(submit_cmd
)));
362 struct msm_gem_object
*msm_obj
;
365 ret
= copy_from_user(&submit_cmd
, userptr
, sizeof(submit_cmd
));
371 /* validate input from userspace: */
372 switch (submit_cmd
.type
) {
373 case MSM_SUBMIT_CMD_BUF
:
374 case MSM_SUBMIT_CMD_IB_TARGET_BUF
:
375 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF
:
378 DRM_ERROR("invalid type: %08x\n", submit_cmd
.type
);
383 ret
= submit_bo(submit
, submit_cmd
.submit_idx
,
384 &msm_obj
, &iova
, NULL
);
388 if (submit_cmd
.size
% 4) {
389 DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
395 if ((submit_cmd
.size
+ submit_cmd
.submit_offset
) >=
396 msm_obj
->base
.size
) {
397 DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd
.size
);
402 submit
->cmd
[i
].type
= submit_cmd
.type
;
403 submit
->cmd
[i
].size
= submit_cmd
.size
/ 4;
404 submit
->cmd
[i
].iova
= iova
+ submit_cmd
.submit_offset
;
405 submit
->cmd
[i
].idx
= submit_cmd
.submit_idx
;
410 ret
= submit_reloc(submit
, msm_obj
, submit_cmd
.submit_offset
,
411 submit_cmd
.nr_relocs
, submit_cmd
.relocs
);
418 ret
= msm_gpu_submit(gpu
, submit
, ctx
);
420 args
->fence
= submit
->fence
;
424 submit_cleanup(submit
, !!ret
);
425 mutex_unlock(&dev
->struct_mutex
);