2 * Copyright (C) 2008 Ben Skeggs.
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include "nouveau_drm.h"
28 #include "nouveau_dma.h"
29 #include "nouveau_fence.h"
30 #include "nouveau_abi16.h"
32 #include "nouveau_ttm.h"
33 #include "nouveau_gem.h"
36 nouveau_gem_object_del(struct drm_gem_object
*gem
)
38 struct nouveau_bo
*nvbo
= nouveau_gem_object(gem
);
39 struct nouveau_drm
*drm
= nouveau_bdev(nvbo
->bo
.bdev
);
40 struct ttm_buffer_object
*bo
= &nvbo
->bo
;
41 struct device
*dev
= drm
->dev
->dev
;
44 ret
= pm_runtime_get_sync(dev
);
45 if (WARN_ON(ret
< 0 && ret
!= -EACCES
))
48 if (gem
->import_attach
)
49 drm_prime_gem_destroy(gem
, nvbo
->bo
.sg
);
51 drm_gem_object_release(gem
);
53 /* reset filp so nouveau_bo_del_ttm() can test for it */
57 pm_runtime_mark_last_busy(dev
);
58 pm_runtime_put_autosuspend(dev
);
62 nouveau_gem_object_open(struct drm_gem_object
*gem
, struct drm_file
*file_priv
)
64 struct nouveau_cli
*cli
= nouveau_cli(file_priv
);
65 struct nouveau_bo
*nvbo
= nouveau_gem_object(gem
);
66 struct nouveau_drm
*drm
= nouveau_bdev(nvbo
->bo
.bdev
);
68 struct device
*dev
= drm
->dev
->dev
;
74 ret
= ttm_bo_reserve(&nvbo
->bo
, false, false, false, NULL
);
78 vma
= nouveau_bo_vma_find(nvbo
, cli
->vm
);
80 vma
= kzalloc(sizeof(*vma
), GFP_KERNEL
);
86 ret
= pm_runtime_get_sync(dev
);
87 if (ret
< 0 && ret
!= -EACCES
)
90 ret
= nouveau_bo_vma_add(nvbo
, cli
->vm
, vma
);
94 pm_runtime_mark_last_busy(dev
);
95 pm_runtime_put_autosuspend(dev
);
101 ttm_bo_unreserve(&nvbo
->bo
);
106 nouveau_gem_object_delete(void *data
)
108 struct nvkm_vma
*vma
= data
;
115 nouveau_gem_object_unmap(struct nouveau_bo
*nvbo
, struct nvkm_vma
*vma
)
117 const bool mapped
= nvbo
->bo
.mem
.mem_type
!= TTM_PL_SYSTEM
;
118 struct reservation_object
*resv
= nvbo
->bo
.resv
;
119 struct reservation_object_list
*fobj
;
120 struct fence
*fence
= NULL
;
122 fobj
= reservation_object_get_list(resv
);
124 list_del(&vma
->head
);
126 if (fobj
&& fobj
->shared_count
> 1)
127 ttm_bo_wait(&nvbo
->bo
, true, false, false);
128 else if (fobj
&& fobj
->shared_count
== 1)
129 fence
= rcu_dereference_protected(fobj
->shared
[0],
130 reservation_object_held(resv
));
132 fence
= reservation_object_get_excl(nvbo
->bo
.resv
);
134 if (fence
&& mapped
) {
135 nouveau_fence_work(fence
, nouveau_gem_object_delete
, vma
);
145 nouveau_gem_object_close(struct drm_gem_object
*gem
, struct drm_file
*file_priv
)
147 struct nouveau_cli
*cli
= nouveau_cli(file_priv
);
148 struct nouveau_bo
*nvbo
= nouveau_gem_object(gem
);
149 struct nouveau_drm
*drm
= nouveau_bdev(nvbo
->bo
.bdev
);
150 struct device
*dev
= drm
->dev
->dev
;
151 struct nvkm_vma
*vma
;
157 ret
= ttm_bo_reserve(&nvbo
->bo
, false, false, false, NULL
);
161 vma
= nouveau_bo_vma_find(nvbo
, cli
->vm
);
163 if (--vma
->refcount
== 0) {
164 ret
= pm_runtime_get_sync(dev
);
165 if (!WARN_ON(ret
< 0 && ret
!= -EACCES
)) {
166 nouveau_gem_object_unmap(nvbo
, vma
);
167 pm_runtime_mark_last_busy(dev
);
168 pm_runtime_put_autosuspend(dev
);
172 ttm_bo_unreserve(&nvbo
->bo
);
176 nouveau_gem_new(struct drm_device
*dev
, int size
, int align
, uint32_t domain
,
177 uint32_t tile_mode
, uint32_t tile_flags
,
178 struct nouveau_bo
**pnvbo
)
180 struct nouveau_drm
*drm
= nouveau_drm(dev
);
181 struct nouveau_bo
*nvbo
;
185 if (domain
& NOUVEAU_GEM_DOMAIN_VRAM
)
186 flags
|= TTM_PL_FLAG_VRAM
;
187 if (domain
& NOUVEAU_GEM_DOMAIN_GART
)
188 flags
|= TTM_PL_FLAG_TT
;
189 if (!flags
|| domain
& NOUVEAU_GEM_DOMAIN_CPU
)
190 flags
|= TTM_PL_FLAG_SYSTEM
;
192 if (domain
& NOUVEAU_GEM_DOMAIN_COHERENT
)
193 flags
|= TTM_PL_FLAG_UNCACHED
;
195 ret
= nouveau_bo_new(dev
, size
, align
, flags
, tile_mode
,
196 tile_flags
, NULL
, NULL
, pnvbo
);
201 /* we restrict allowed domains on nv50+ to only the types
202 * that were requested at creation time. not possibly on
203 * earlier chips without busting the ABI.
205 nvbo
->valid_domains
= NOUVEAU_GEM_DOMAIN_VRAM
|
206 NOUVEAU_GEM_DOMAIN_GART
;
207 if (drm
->device
.info
.family
>= NV_DEVICE_INFO_V0_TESLA
)
208 nvbo
->valid_domains
&= domain
;
210 /* Initialize the embedded gem-object. We return a single gem-reference
211 * to the caller, instead of a normal nouveau_bo ttm reference. */
212 ret
= drm_gem_object_init(dev
, &nvbo
->gem
, nvbo
->bo
.mem
.size
);
214 nouveau_bo_ref(NULL
, pnvbo
);
218 nvbo
->bo
.persistent_swap_storage
= nvbo
->gem
.filp
;
223 nouveau_gem_info(struct drm_file
*file_priv
, struct drm_gem_object
*gem
,
224 struct drm_nouveau_gem_info
*rep
)
226 struct nouveau_cli
*cli
= nouveau_cli(file_priv
);
227 struct nouveau_bo
*nvbo
= nouveau_gem_object(gem
);
228 struct nvkm_vma
*vma
;
230 if (nvbo
->bo
.mem
.mem_type
== TTM_PL_TT
)
231 rep
->domain
= NOUVEAU_GEM_DOMAIN_GART
;
233 rep
->domain
= NOUVEAU_GEM_DOMAIN_VRAM
;
235 rep
->offset
= nvbo
->bo
.offset
;
237 vma
= nouveau_bo_vma_find(nvbo
, cli
->vm
);
241 rep
->offset
= vma
->offset
;
244 rep
->size
= nvbo
->bo
.mem
.num_pages
<< PAGE_SHIFT
;
245 rep
->map_handle
= drm_vma_node_offset_addr(&nvbo
->bo
.vma_node
);
246 rep
->tile_mode
= nvbo
->tile_mode
;
247 rep
->tile_flags
= nvbo
->tile_flags
;
252 nouveau_gem_ioctl_new(struct drm_device
*dev
, void *data
,
253 struct drm_file
*file_priv
)
255 struct nouveau_drm
*drm
= nouveau_drm(dev
);
256 struct nouveau_cli
*cli
= nouveau_cli(file_priv
);
257 struct nvkm_fb
*pfb
= nvxx_fb(&drm
->device
);
258 struct drm_nouveau_gem_new
*req
= data
;
259 struct nouveau_bo
*nvbo
= NULL
;
262 if (!pfb
->memtype_valid(pfb
, req
->info
.tile_flags
)) {
263 NV_PRINTK(error
, cli
, "bad page flags: 0x%08x\n", req
->info
.tile_flags
);
267 ret
= nouveau_gem_new(dev
, req
->info
.size
, req
->align
,
268 req
->info
.domain
, req
->info
.tile_mode
,
269 req
->info
.tile_flags
, &nvbo
);
273 ret
= drm_gem_handle_create(file_priv
, &nvbo
->gem
, &req
->info
.handle
);
275 ret
= nouveau_gem_info(file_priv
, &nvbo
->gem
, &req
->info
);
277 drm_gem_handle_delete(file_priv
, req
->info
.handle
);
280 /* drop reference from allocate - handle holds it now */
281 drm_gem_object_unreference_unlocked(&nvbo
->gem
);
286 nouveau_gem_set_domain(struct drm_gem_object
*gem
, uint32_t read_domains
,
287 uint32_t write_domains
, uint32_t valid_domains
)
289 struct nouveau_bo
*nvbo
= nouveau_gem_object(gem
);
290 struct ttm_buffer_object
*bo
= &nvbo
->bo
;
291 uint32_t domains
= valid_domains
& nvbo
->valid_domains
&
292 (write_domains
? write_domains
: read_domains
);
293 uint32_t pref_flags
= 0, valid_flags
= 0;
298 if (valid_domains
& NOUVEAU_GEM_DOMAIN_VRAM
)
299 valid_flags
|= TTM_PL_FLAG_VRAM
;
301 if (valid_domains
& NOUVEAU_GEM_DOMAIN_GART
)
302 valid_flags
|= TTM_PL_FLAG_TT
;
304 if ((domains
& NOUVEAU_GEM_DOMAIN_VRAM
) &&
305 bo
->mem
.mem_type
== TTM_PL_VRAM
)
306 pref_flags
|= TTM_PL_FLAG_VRAM
;
308 else if ((domains
& NOUVEAU_GEM_DOMAIN_GART
) &&
309 bo
->mem
.mem_type
== TTM_PL_TT
)
310 pref_flags
|= TTM_PL_FLAG_TT
;
312 else if (domains
& NOUVEAU_GEM_DOMAIN_VRAM
)
313 pref_flags
|= TTM_PL_FLAG_VRAM
;
316 pref_flags
|= TTM_PL_FLAG_TT
;
318 nouveau_bo_placement_set(nvbo
, pref_flags
, valid_flags
);
324 struct list_head list
;
325 struct ww_acquire_ctx ticket
;
329 validate_fini_no_ticket(struct validate_op
*op
, struct nouveau_fence
*fence
,
330 struct drm_nouveau_gem_pushbuf_bo
*pbbo
)
332 struct nouveau_bo
*nvbo
;
333 struct drm_nouveau_gem_pushbuf_bo
*b
;
335 while (!list_empty(&op
->list
)) {
336 nvbo
= list_entry(op
->list
.next
, struct nouveau_bo
, entry
);
337 b
= &pbbo
[nvbo
->pbbo_index
];
340 nouveau_bo_fence(nvbo
, fence
, !!b
->write_domains
);
342 if (unlikely(nvbo
->validate_mapped
)) {
343 ttm_bo_kunmap(&nvbo
->kmap
);
344 nvbo
->validate_mapped
= false;
347 list_del(&nvbo
->entry
);
348 nvbo
->reserved_by
= NULL
;
349 ttm_bo_unreserve_ticket(&nvbo
->bo
, &op
->ticket
);
350 drm_gem_object_unreference_unlocked(&nvbo
->gem
);
355 validate_fini(struct validate_op
*op
, struct nouveau_fence
*fence
,
356 struct drm_nouveau_gem_pushbuf_bo
*pbbo
)
358 validate_fini_no_ticket(op
, fence
, pbbo
);
359 ww_acquire_fini(&op
->ticket
);
363 validate_init(struct nouveau_channel
*chan
, struct drm_file
*file_priv
,
364 struct drm_nouveau_gem_pushbuf_bo
*pbbo
,
365 int nr_buffers
, struct validate_op
*op
)
367 struct nouveau_cli
*cli
= nouveau_cli(file_priv
);
368 struct drm_device
*dev
= chan
->drm
->dev
;
371 struct nouveau_bo
*res_bo
= NULL
;
372 LIST_HEAD(gart_list
);
373 LIST_HEAD(vram_list
);
374 LIST_HEAD(both_list
);
376 ww_acquire_init(&op
->ticket
, &reservation_ww_class
);
378 if (++trycnt
> 100000) {
379 NV_PRINTK(error
, cli
, "%s failed and gave up.\n", __func__
);
383 for (i
= 0; i
< nr_buffers
; i
++) {
384 struct drm_nouveau_gem_pushbuf_bo
*b
= &pbbo
[i
];
385 struct drm_gem_object
*gem
;
386 struct nouveau_bo
*nvbo
;
388 gem
= drm_gem_object_lookup(dev
, file_priv
, b
->handle
);
390 NV_PRINTK(error
, cli
, "Unknown handle 0x%08x\n", b
->handle
);
394 nvbo
= nouveau_gem_object(gem
);
395 if (nvbo
== res_bo
) {
397 drm_gem_object_unreference_unlocked(gem
);
401 if (nvbo
->reserved_by
&& nvbo
->reserved_by
== file_priv
) {
402 NV_PRINTK(error
, cli
, "multiple instances of buffer %d on "
403 "validation list\n", b
->handle
);
404 drm_gem_object_unreference_unlocked(gem
);
409 ret
= ttm_bo_reserve(&nvbo
->bo
, true, false, true, &op
->ticket
);
411 list_splice_tail_init(&vram_list
, &op
->list
);
412 list_splice_tail_init(&gart_list
, &op
->list
);
413 list_splice_tail_init(&both_list
, &op
->list
);
414 validate_fini_no_ticket(op
, NULL
, NULL
);
415 if (unlikely(ret
== -EDEADLK
)) {
416 ret
= ttm_bo_reserve_slowpath(&nvbo
->bo
, true,
422 if (ret
!= -ERESTARTSYS
)
423 NV_PRINTK(error
, cli
, "fail reserve\n");
428 b
->user_priv
= (uint64_t)(unsigned long)nvbo
;
429 nvbo
->reserved_by
= file_priv
;
430 nvbo
->pbbo_index
= i
;
431 if ((b
->valid_domains
& NOUVEAU_GEM_DOMAIN_VRAM
) &&
432 (b
->valid_domains
& NOUVEAU_GEM_DOMAIN_GART
))
433 list_add_tail(&nvbo
->entry
, &both_list
);
435 if (b
->valid_domains
& NOUVEAU_GEM_DOMAIN_VRAM
)
436 list_add_tail(&nvbo
->entry
, &vram_list
);
438 if (b
->valid_domains
& NOUVEAU_GEM_DOMAIN_GART
)
439 list_add_tail(&nvbo
->entry
, &gart_list
);
441 NV_PRINTK(error
, cli
, "invalid valid domains: 0x%08x\n",
443 list_add_tail(&nvbo
->entry
, &both_list
);
451 ww_acquire_done(&op
->ticket
);
452 list_splice_tail(&vram_list
, &op
->list
);
453 list_splice_tail(&gart_list
, &op
->list
);
454 list_splice_tail(&both_list
, &op
->list
);
456 validate_fini(op
, NULL
, NULL
);
462 validate_list(struct nouveau_channel
*chan
, struct nouveau_cli
*cli
,
463 struct list_head
*list
, struct drm_nouveau_gem_pushbuf_bo
*pbbo
,
464 uint64_t user_pbbo_ptr
)
466 struct nouveau_drm
*drm
= chan
->drm
;
467 struct drm_nouveau_gem_pushbuf_bo __user
*upbbo
=
468 (void __force __user
*)(uintptr_t)user_pbbo_ptr
;
469 struct nouveau_bo
*nvbo
;
472 list_for_each_entry(nvbo
, list
, entry
) {
473 struct drm_nouveau_gem_pushbuf_bo
*b
= &pbbo
[nvbo
->pbbo_index
];
475 ret
= nouveau_gem_set_domain(&nvbo
->gem
, b
->read_domains
,
479 NV_PRINTK(error
, cli
, "fail set_domain\n");
483 ret
= nouveau_bo_validate(nvbo
, true, false);
485 if (ret
!= -ERESTARTSYS
)
486 NV_PRINTK(error
, cli
, "fail ttm_validate\n");
490 ret
= nouveau_fence_sync(nvbo
, chan
, !!b
->write_domains
, true);
492 if (ret
!= -ERESTARTSYS
)
493 NV_PRINTK(error
, cli
, "fail post-validate sync\n");
497 if (drm
->device
.info
.family
< NV_DEVICE_INFO_V0_TESLA
) {
498 if (nvbo
->bo
.offset
== b
->presumed
.offset
&&
499 ((nvbo
->bo
.mem
.mem_type
== TTM_PL_VRAM
&&
500 b
->presumed
.domain
& NOUVEAU_GEM_DOMAIN_VRAM
) ||
501 (nvbo
->bo
.mem
.mem_type
== TTM_PL_TT
&&
502 b
->presumed
.domain
& NOUVEAU_GEM_DOMAIN_GART
)))
505 if (nvbo
->bo
.mem
.mem_type
== TTM_PL_TT
)
506 b
->presumed
.domain
= NOUVEAU_GEM_DOMAIN_GART
;
508 b
->presumed
.domain
= NOUVEAU_GEM_DOMAIN_VRAM
;
509 b
->presumed
.offset
= nvbo
->bo
.offset
;
510 b
->presumed
.valid
= 0;
513 if (copy_to_user(&upbbo
[nvbo
->pbbo_index
].presumed
,
514 &b
->presumed
, sizeof(b
->presumed
)))
523 nouveau_gem_pushbuf_validate(struct nouveau_channel
*chan
,
524 struct drm_file
*file_priv
,
525 struct drm_nouveau_gem_pushbuf_bo
*pbbo
,
526 uint64_t user_buffers
, int nr_buffers
,
527 struct validate_op
*op
, int *apply_relocs
)
529 struct nouveau_cli
*cli
= nouveau_cli(file_priv
);
532 INIT_LIST_HEAD(&op
->list
);
537 ret
= validate_init(chan
, file_priv
, pbbo
, nr_buffers
, op
);
539 if (ret
!= -ERESTARTSYS
)
540 NV_PRINTK(error
, cli
, "validate_init\n");
544 ret
= validate_list(chan
, cli
, &op
->list
, pbbo
, user_buffers
);
545 if (unlikely(ret
< 0)) {
546 if (ret
!= -ERESTARTSYS
)
547 NV_PRINTK(error
, cli
, "validating bo list\n");
548 validate_fini(op
, NULL
, NULL
);
562 u_memcpya(uint64_t user
, unsigned nmemb
, unsigned size
)
565 void __user
*userptr
= (void __force __user
*)(uintptr_t)user
;
569 mem
= kmalloc(size
, GFP_KERNEL
| __GFP_NOWARN
);
573 return ERR_PTR(-ENOMEM
);
575 if (copy_from_user(mem
, userptr
, size
)) {
577 return ERR_PTR(-EFAULT
);
584 nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli
*cli
,
585 struct drm_nouveau_gem_pushbuf
*req
,
586 struct drm_nouveau_gem_pushbuf_bo
*bo
)
588 struct drm_nouveau_gem_pushbuf_reloc
*reloc
= NULL
;
592 reloc
= u_memcpya(req
->relocs
, req
->nr_relocs
, sizeof(*reloc
));
594 return PTR_ERR(reloc
);
596 for (i
= 0; i
< req
->nr_relocs
; i
++) {
597 struct drm_nouveau_gem_pushbuf_reloc
*r
= &reloc
[i
];
598 struct drm_nouveau_gem_pushbuf_bo
*b
;
599 struct nouveau_bo
*nvbo
;
602 if (unlikely(r
->bo_index
> req
->nr_buffers
)) {
603 NV_PRINTK(error
, cli
, "reloc bo index invalid\n");
608 b
= &bo
[r
->bo_index
];
609 if (b
->presumed
.valid
)
612 if (unlikely(r
->reloc_bo_index
> req
->nr_buffers
)) {
613 NV_PRINTK(error
, cli
, "reloc container bo index invalid\n");
617 nvbo
= (void *)(unsigned long)bo
[r
->reloc_bo_index
].user_priv
;
619 if (unlikely(r
->reloc_bo_offset
+ 4 >
620 nvbo
->bo
.mem
.num_pages
<< PAGE_SHIFT
)) {
621 NV_PRINTK(error
, cli
, "reloc outside of bo\n");
626 if (!nvbo
->kmap
.virtual) {
627 ret
= ttm_bo_kmap(&nvbo
->bo
, 0, nvbo
->bo
.mem
.num_pages
,
630 NV_PRINTK(error
, cli
, "failed kmap for reloc\n");
633 nvbo
->validate_mapped
= true;
636 if (r
->flags
& NOUVEAU_GEM_RELOC_LOW
)
637 data
= b
->presumed
.offset
+ r
->data
;
639 if (r
->flags
& NOUVEAU_GEM_RELOC_HIGH
)
640 data
= (b
->presumed
.offset
+ r
->data
) >> 32;
644 if (r
->flags
& NOUVEAU_GEM_RELOC_OR
) {
645 if (b
->presumed
.domain
== NOUVEAU_GEM_DOMAIN_GART
)
651 ret
= ttm_bo_wait(&nvbo
->bo
, true, false, false);
653 NV_PRINTK(error
, cli
, "reloc wait_idle failed: %d\n", ret
);
657 nouveau_bo_wr32(nvbo
, r
->reloc_bo_offset
>> 2, data
);
665 nouveau_gem_ioctl_pushbuf(struct drm_device
*dev
, void *data
,
666 struct drm_file
*file_priv
)
668 struct nouveau_abi16
*abi16
= nouveau_abi16_get(file_priv
, dev
);
669 struct nouveau_cli
*cli
= nouveau_cli(file_priv
);
670 struct nouveau_abi16_chan
*temp
;
671 struct nouveau_drm
*drm
= nouveau_drm(dev
);
672 struct drm_nouveau_gem_pushbuf
*req
= data
;
673 struct drm_nouveau_gem_pushbuf_push
*push
;
674 struct drm_nouveau_gem_pushbuf_bo
*bo
;
675 struct nouveau_channel
*chan
= NULL
;
676 struct validate_op op
;
677 struct nouveau_fence
*fence
= NULL
;
678 int i
, j
, ret
= 0, do_reloc
= 0;
680 if (unlikely(!abi16
))
683 list_for_each_entry(temp
, &abi16
->channels
, head
) {
684 if (temp
->chan
->object
->handle
== (NVDRM_CHAN
| req
->channel
)) {
691 return nouveau_abi16_put(abi16
, -ENOENT
);
693 req
->vram_available
= drm
->gem
.vram_available
;
694 req
->gart_available
= drm
->gem
.gart_available
;
695 if (unlikely(req
->nr_push
== 0))
698 if (unlikely(req
->nr_push
> NOUVEAU_GEM_MAX_PUSH
)) {
699 NV_PRINTK(error
, cli
, "pushbuf push count exceeds limit: %d max %d\n",
700 req
->nr_push
, NOUVEAU_GEM_MAX_PUSH
);
701 return nouveau_abi16_put(abi16
, -EINVAL
);
704 if (unlikely(req
->nr_buffers
> NOUVEAU_GEM_MAX_BUFFERS
)) {
705 NV_PRINTK(error
, cli
, "pushbuf bo count exceeds limit: %d max %d\n",
706 req
->nr_buffers
, NOUVEAU_GEM_MAX_BUFFERS
);
707 return nouveau_abi16_put(abi16
, -EINVAL
);
710 if (unlikely(req
->nr_relocs
> NOUVEAU_GEM_MAX_RELOCS
)) {
711 NV_PRINTK(error
, cli
, "pushbuf reloc count exceeds limit: %d max %d\n",
712 req
->nr_relocs
, NOUVEAU_GEM_MAX_RELOCS
);
713 return nouveau_abi16_put(abi16
, -EINVAL
);
716 push
= u_memcpya(req
->push
, req
->nr_push
, sizeof(*push
));
718 return nouveau_abi16_put(abi16
, PTR_ERR(push
));
720 bo
= u_memcpya(req
->buffers
, req
->nr_buffers
, sizeof(*bo
));
723 return nouveau_abi16_put(abi16
, PTR_ERR(bo
));
726 /* Ensure all push buffers are on validate list */
727 for (i
= 0; i
< req
->nr_push
; i
++) {
728 if (push
[i
].bo_index
>= req
->nr_buffers
) {
729 NV_PRINTK(error
, cli
, "push %d buffer not in list\n", i
);
735 /* Validate buffer list */
736 ret
= nouveau_gem_pushbuf_validate(chan
, file_priv
, bo
, req
->buffers
,
737 req
->nr_buffers
, &op
, &do_reloc
);
739 if (ret
!= -ERESTARTSYS
)
740 NV_PRINTK(error
, cli
, "validate: %d\n", ret
);
744 /* Apply any relocations that are required */
746 ret
= nouveau_gem_pushbuf_reloc_apply(cli
, req
, bo
);
748 NV_PRINTK(error
, cli
, "reloc apply: %d\n", ret
);
753 if (chan
->dma
.ib_max
) {
754 ret
= nouveau_dma_wait(chan
, req
->nr_push
+ 1, 16);
756 NV_PRINTK(error
, cli
, "nv50cal_space: %d\n", ret
);
760 for (i
= 0; i
< req
->nr_push
; i
++) {
761 struct nouveau_bo
*nvbo
= (void *)(unsigned long)
762 bo
[push
[i
].bo_index
].user_priv
;
764 nv50_dma_push(chan
, nvbo
, push
[i
].offset
,
768 if (drm
->device
.info
.chipset
>= 0x25) {
769 ret
= RING_SPACE(chan
, req
->nr_push
* 2);
771 NV_PRINTK(error
, cli
, "cal_space: %d\n", ret
);
775 for (i
= 0; i
< req
->nr_push
; i
++) {
776 struct nouveau_bo
*nvbo
= (void *)(unsigned long)
777 bo
[push
[i
].bo_index
].user_priv
;
779 OUT_RING(chan
, (nvbo
->bo
.offset
+ push
[i
].offset
) | 2);
783 ret
= RING_SPACE(chan
, req
->nr_push
* (2 + NOUVEAU_DMA_SKIPS
));
785 NV_PRINTK(error
, cli
, "jmp_space: %d\n", ret
);
789 for (i
= 0; i
< req
->nr_push
; i
++) {
790 struct nouveau_bo
*nvbo
= (void *)(unsigned long)
791 bo
[push
[i
].bo_index
].user_priv
;
794 cmd
= chan
->push
.vma
.offset
+ ((chan
->dma
.cur
+ 2) << 2);
796 if (unlikely(cmd
!= req
->suffix0
)) {
797 if (!nvbo
->kmap
.virtual) {
798 ret
= ttm_bo_kmap(&nvbo
->bo
, 0,
806 nvbo
->validate_mapped
= true;
809 nouveau_bo_wr32(nvbo
, (push
[i
].offset
+
810 push
[i
].length
- 8) / 4, cmd
);
813 OUT_RING(chan
, 0x20000000 |
814 (nvbo
->bo
.offset
+ push
[i
].offset
));
816 for (j
= 0; j
< NOUVEAU_DMA_SKIPS
; j
++)
821 ret
= nouveau_fence_new(chan
, false, &fence
);
823 NV_PRINTK(error
, cli
, "error fencing pushbuf: %d\n", ret
);
829 validate_fini(&op
, fence
, bo
);
830 nouveau_fence_unref(&fence
);
837 if (chan
->dma
.ib_max
) {
838 req
->suffix0
= 0x00000000;
839 req
->suffix1
= 0x00000000;
841 if (drm
->device
.info
.chipset
>= 0x25) {
842 req
->suffix0
= 0x00020000;
843 req
->suffix1
= 0x00000000;
845 req
->suffix0
= 0x20000000 |
846 (chan
->push
.vma
.offset
+ ((chan
->dma
.cur
+ 2) << 2));
847 req
->suffix1
= 0x00000000;
850 return nouveau_abi16_put(abi16
, ret
);
854 nouveau_gem_ioctl_cpu_prep(struct drm_device
*dev
, void *data
,
855 struct drm_file
*file_priv
)
857 struct drm_nouveau_gem_cpu_prep
*req
= data
;
858 struct drm_gem_object
*gem
;
859 struct nouveau_bo
*nvbo
;
860 bool no_wait
= !!(req
->flags
& NOUVEAU_GEM_CPU_PREP_NOWAIT
);
861 bool write
= !!(req
->flags
& NOUVEAU_GEM_CPU_PREP_WRITE
);
864 gem
= drm_gem_object_lookup(dev
, file_priv
, req
->handle
);
867 nvbo
= nouveau_gem_object(gem
);
870 ret
= reservation_object_test_signaled_rcu(nvbo
->bo
.resv
, write
) ? 0 : -EBUSY
;
874 lret
= reservation_object_wait_timeout_rcu(nvbo
->bo
.resv
, write
, true, 30 * HZ
);
882 nouveau_bo_sync_for_cpu(nvbo
);
883 drm_gem_object_unreference_unlocked(gem
);
889 nouveau_gem_ioctl_cpu_fini(struct drm_device
*dev
, void *data
,
890 struct drm_file
*file_priv
)
892 struct drm_nouveau_gem_cpu_fini
*req
= data
;
893 struct drm_gem_object
*gem
;
894 struct nouveau_bo
*nvbo
;
896 gem
= drm_gem_object_lookup(dev
, file_priv
, req
->handle
);
899 nvbo
= nouveau_gem_object(gem
);
901 nouveau_bo_sync_for_device(nvbo
);
902 drm_gem_object_unreference_unlocked(gem
);
907 nouveau_gem_ioctl_info(struct drm_device
*dev
, void *data
,
908 struct drm_file
*file_priv
)
910 struct drm_nouveau_gem_info
*req
= data
;
911 struct drm_gem_object
*gem
;
914 gem
= drm_gem_object_lookup(dev
, file_priv
, req
->handle
);
918 ret
= nouveau_gem_info(file_priv
, gem
, req
);
919 drm_gem_object_unreference_unlocked(gem
);