2 * Copyright (C) 2008 Ben Skeggs.
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include "nouveau_drv.h"
28 #include "nouveau_dma.h"
29 #include "nouveau_fence.h"
30 #include "nouveau_abi16.h"
32 #include "nouveau_ttm.h"
33 #include "nouveau_gem.h"
34 #include "nouveau_mem.h"
35 #include "nouveau_vmm.h"
37 #include <nvif/class.h>
40 nouveau_gem_object_del(struct drm_gem_object
*gem
)
42 struct nouveau_bo
*nvbo
= nouveau_gem_object(gem
);
43 struct nouveau_drm
*drm
= nouveau_bdev(nvbo
->bo
.bdev
);
44 struct device
*dev
= drm
->dev
->dev
;
47 ret
= pm_runtime_get_sync(dev
);
48 if (WARN_ON(ret
< 0 && ret
!= -EACCES
))
51 if (gem
->import_attach
)
52 drm_prime_gem_destroy(gem
, nvbo
->bo
.sg
);
54 ttm_bo_put(&nvbo
->bo
);
56 pm_runtime_mark_last_busy(dev
);
57 pm_runtime_put_autosuspend(dev
);
61 nouveau_gem_object_open(struct drm_gem_object
*gem
, struct drm_file
*file_priv
)
63 struct nouveau_cli
*cli
= nouveau_cli(file_priv
);
64 struct nouveau_bo
*nvbo
= nouveau_gem_object(gem
);
65 struct nouveau_drm
*drm
= nouveau_bdev(nvbo
->bo
.bdev
);
66 struct device
*dev
= drm
->dev
->dev
;
67 struct nouveau_vmm
*vmm
= cli
->svm
.cli
? &cli
->svm
: &cli
->vmm
;
68 struct nouveau_vma
*vma
;
71 if (vmm
->vmm
.object
.oclass
< NVIF_CLASS_VMM_NV50
)
74 ret
= ttm_bo_reserve(&nvbo
->bo
, false, false, NULL
);
78 ret
= pm_runtime_get_sync(dev
);
79 if (ret
< 0 && ret
!= -EACCES
)
82 ret
= nouveau_vma_new(nvbo
, vmm
, &vma
);
83 pm_runtime_mark_last_busy(dev
);
84 pm_runtime_put_autosuspend(dev
);
86 ttm_bo_unreserve(&nvbo
->bo
);
90 struct nouveau_gem_object_unmap
{
91 struct nouveau_cli_work work
;
92 struct nouveau_vma
*vma
;
96 nouveau_gem_object_delete(struct nouveau_vma
*vma
)
98 nouveau_fence_unref(&vma
->fence
);
99 nouveau_vma_del(&vma
);
103 nouveau_gem_object_delete_work(struct nouveau_cli_work
*w
)
105 struct nouveau_gem_object_unmap
*work
=
106 container_of(w
, typeof(*work
), work
);
107 nouveau_gem_object_delete(work
->vma
);
112 nouveau_gem_object_unmap(struct nouveau_bo
*nvbo
, struct nouveau_vma
*vma
)
114 struct dma_fence
*fence
= vma
->fence
? &vma
->fence
->base
: NULL
;
115 struct nouveau_gem_object_unmap
*work
;
117 list_del_init(&vma
->head
);
120 nouveau_gem_object_delete(vma
);
124 if (!(work
= kmalloc(sizeof(*work
), GFP_KERNEL
))) {
125 WARN_ON(dma_fence_wait_timeout(fence
, false, 2 * HZ
) <= 0);
126 nouveau_gem_object_delete(vma
);
130 work
->work
.func
= nouveau_gem_object_delete_work
;
132 nouveau_cli_work_queue(vma
->vmm
->cli
, fence
, &work
->work
);
136 nouveau_gem_object_close(struct drm_gem_object
*gem
, struct drm_file
*file_priv
)
138 struct nouveau_cli
*cli
= nouveau_cli(file_priv
);
139 struct nouveau_bo
*nvbo
= nouveau_gem_object(gem
);
140 struct nouveau_drm
*drm
= nouveau_bdev(nvbo
->bo
.bdev
);
141 struct device
*dev
= drm
->dev
->dev
;
142 struct nouveau_vmm
*vmm
= cli
->svm
.cli
? &cli
->svm
: & cli
->vmm
;
143 struct nouveau_vma
*vma
;
146 if (vmm
->vmm
.object
.oclass
< NVIF_CLASS_VMM_NV50
)
149 ret
= ttm_bo_reserve(&nvbo
->bo
, false, false, NULL
);
153 vma
= nouveau_vma_find(nvbo
, vmm
);
155 if (--vma
->refs
== 0) {
156 ret
= pm_runtime_get_sync(dev
);
157 if (!WARN_ON(ret
< 0 && ret
!= -EACCES
)) {
158 nouveau_gem_object_unmap(nvbo
, vma
);
159 pm_runtime_mark_last_busy(dev
);
160 pm_runtime_put_autosuspend(dev
);
164 ttm_bo_unreserve(&nvbo
->bo
);
168 nouveau_gem_new(struct nouveau_cli
*cli
, u64 size
, int align
, uint32_t domain
,
169 uint32_t tile_mode
, uint32_t tile_flags
,
170 struct nouveau_bo
**pnvbo
)
172 struct nouveau_drm
*drm
= cli
->drm
;
173 struct nouveau_bo
*nvbo
;
177 if (domain
& NOUVEAU_GEM_DOMAIN_VRAM
)
178 flags
|= TTM_PL_FLAG_VRAM
;
179 if (domain
& NOUVEAU_GEM_DOMAIN_GART
)
180 flags
|= TTM_PL_FLAG_TT
;
181 if (!flags
|| domain
& NOUVEAU_GEM_DOMAIN_CPU
)
182 flags
|= TTM_PL_FLAG_SYSTEM
;
184 if (domain
& NOUVEAU_GEM_DOMAIN_COHERENT
)
185 flags
|= TTM_PL_FLAG_UNCACHED
;
187 nvbo
= nouveau_bo_alloc(cli
, &size
, &align
, flags
, tile_mode
,
190 return PTR_ERR(nvbo
);
192 /* Initialize the embedded gem-object. We return a single gem-reference
193 * to the caller, instead of a normal nouveau_bo ttm reference. */
194 ret
= drm_gem_object_init(drm
->dev
, &nvbo
->bo
.base
, size
);
196 nouveau_bo_ref(NULL
, &nvbo
);
200 ret
= nouveau_bo_init(nvbo
, size
, align
, flags
, NULL
, NULL
);
202 nouveau_bo_ref(NULL
, &nvbo
);
206 /* we restrict allowed domains on nv50+ to only the types
207 * that were requested at creation time. not possibly on
208 * earlier chips without busting the ABI.
210 nvbo
->valid_domains
= NOUVEAU_GEM_DOMAIN_VRAM
|
211 NOUVEAU_GEM_DOMAIN_GART
;
212 if (drm
->client
.device
.info
.family
>= NV_DEVICE_INFO_V0_TESLA
)
213 nvbo
->valid_domains
&= domain
;
215 nvbo
->bo
.persistent_swap_storage
= nvbo
->bo
.base
.filp
;
221 nouveau_gem_info(struct drm_file
*file_priv
, struct drm_gem_object
*gem
,
222 struct drm_nouveau_gem_info
*rep
)
224 struct nouveau_cli
*cli
= nouveau_cli(file_priv
);
225 struct nouveau_bo
*nvbo
= nouveau_gem_object(gem
);
226 struct nouveau_vmm
*vmm
= cli
->svm
.cli
? &cli
->svm
: &cli
->vmm
;
227 struct nouveau_vma
*vma
;
229 if (is_power_of_2(nvbo
->valid_domains
))
230 rep
->domain
= nvbo
->valid_domains
;
231 else if (nvbo
->bo
.mem
.mem_type
== TTM_PL_TT
)
232 rep
->domain
= NOUVEAU_GEM_DOMAIN_GART
;
234 rep
->domain
= NOUVEAU_GEM_DOMAIN_VRAM
;
235 rep
->offset
= nvbo
->bo
.offset
;
236 if (vmm
->vmm
.object
.oclass
>= NVIF_CLASS_VMM_NV50
) {
237 vma
= nouveau_vma_find(nvbo
, vmm
);
241 rep
->offset
= vma
->addr
;
244 rep
->size
= nvbo
->bo
.mem
.num_pages
<< PAGE_SHIFT
;
245 rep
->map_handle
= drm_vma_node_offset_addr(&nvbo
->bo
.base
.vma_node
);
246 rep
->tile_mode
= nvbo
->mode
;
247 rep
->tile_flags
= nvbo
->contig
? 0 : NOUVEAU_GEM_TILE_NONCONTIG
;
248 if (cli
->device
.info
.family
>= NV_DEVICE_INFO_V0_FERMI
)
249 rep
->tile_flags
|= nvbo
->kind
<< 8;
251 if (cli
->device
.info
.family
>= NV_DEVICE_INFO_V0_TESLA
)
252 rep
->tile_flags
|= nvbo
->kind
<< 8 | nvbo
->comp
<< 16;
254 rep
->tile_flags
|= nvbo
->zeta
;
259 nouveau_gem_ioctl_new(struct drm_device
*dev
, void *data
,
260 struct drm_file
*file_priv
)
262 struct nouveau_cli
*cli
= nouveau_cli(file_priv
);
263 struct drm_nouveau_gem_new
*req
= data
;
264 struct nouveau_bo
*nvbo
= NULL
;
267 ret
= nouveau_gem_new(cli
, req
->info
.size
, req
->align
,
268 req
->info
.domain
, req
->info
.tile_mode
,
269 req
->info
.tile_flags
, &nvbo
);
273 ret
= drm_gem_handle_create(file_priv
, &nvbo
->bo
.base
,
276 ret
= nouveau_gem_info(file_priv
, &nvbo
->bo
.base
, &req
->info
);
278 drm_gem_handle_delete(file_priv
, req
->info
.handle
);
281 /* drop reference from allocate - handle holds it now */
282 drm_gem_object_put_unlocked(&nvbo
->bo
.base
);
287 nouveau_gem_set_domain(struct drm_gem_object
*gem
, uint32_t read_domains
,
288 uint32_t write_domains
, uint32_t valid_domains
)
290 struct nouveau_bo
*nvbo
= nouveau_gem_object(gem
);
291 struct ttm_buffer_object
*bo
= &nvbo
->bo
;
292 uint32_t domains
= valid_domains
& nvbo
->valid_domains
&
293 (write_domains
? write_domains
: read_domains
);
294 uint32_t pref_flags
= 0, valid_flags
= 0;
299 if (valid_domains
& NOUVEAU_GEM_DOMAIN_VRAM
)
300 valid_flags
|= TTM_PL_FLAG_VRAM
;
302 if (valid_domains
& NOUVEAU_GEM_DOMAIN_GART
)
303 valid_flags
|= TTM_PL_FLAG_TT
;
305 if ((domains
& NOUVEAU_GEM_DOMAIN_VRAM
) &&
306 bo
->mem
.mem_type
== TTM_PL_VRAM
)
307 pref_flags
|= TTM_PL_FLAG_VRAM
;
309 else if ((domains
& NOUVEAU_GEM_DOMAIN_GART
) &&
310 bo
->mem
.mem_type
== TTM_PL_TT
)
311 pref_flags
|= TTM_PL_FLAG_TT
;
313 else if (domains
& NOUVEAU_GEM_DOMAIN_VRAM
)
314 pref_flags
|= TTM_PL_FLAG_VRAM
;
317 pref_flags
|= TTM_PL_FLAG_TT
;
319 nouveau_bo_placement_set(nvbo
, pref_flags
, valid_flags
);
325 struct list_head list
;
326 struct ww_acquire_ctx ticket
;
330 validate_fini_no_ticket(struct validate_op
*op
, struct nouveau_channel
*chan
,
331 struct nouveau_fence
*fence
,
332 struct drm_nouveau_gem_pushbuf_bo
*pbbo
)
334 struct nouveau_bo
*nvbo
;
335 struct drm_nouveau_gem_pushbuf_bo
*b
;
337 while (!list_empty(&op
->list
)) {
338 nvbo
= list_entry(op
->list
.next
, struct nouveau_bo
, entry
);
339 b
= &pbbo
[nvbo
->pbbo_index
];
342 nouveau_bo_fence(nvbo
, fence
, !!b
->write_domains
);
344 if (chan
->vmm
->vmm
.object
.oclass
>= NVIF_CLASS_VMM_NV50
) {
345 struct nouveau_vma
*vma
=
346 (void *)(unsigned long)b
->user_priv
;
347 nouveau_fence_unref(&vma
->fence
);
348 dma_fence_get(&fence
->base
);
353 if (unlikely(nvbo
->validate_mapped
)) {
354 ttm_bo_kunmap(&nvbo
->kmap
);
355 nvbo
->validate_mapped
= false;
358 list_del(&nvbo
->entry
);
359 nvbo
->reserved_by
= NULL
;
360 ttm_bo_unreserve(&nvbo
->bo
);
361 drm_gem_object_put_unlocked(&nvbo
->bo
.base
);
366 validate_fini(struct validate_op
*op
, struct nouveau_channel
*chan
,
367 struct nouveau_fence
*fence
,
368 struct drm_nouveau_gem_pushbuf_bo
*pbbo
)
370 validate_fini_no_ticket(op
, chan
, fence
, pbbo
);
371 ww_acquire_fini(&op
->ticket
);
375 validate_init(struct nouveau_channel
*chan
, struct drm_file
*file_priv
,
376 struct drm_nouveau_gem_pushbuf_bo
*pbbo
,
377 int nr_buffers
, struct validate_op
*op
)
379 struct nouveau_cli
*cli
= nouveau_cli(file_priv
);
381 int ret
= -EINVAL
, i
;
382 struct nouveau_bo
*res_bo
= NULL
;
383 LIST_HEAD(gart_list
);
384 LIST_HEAD(vram_list
);
385 LIST_HEAD(both_list
);
387 ww_acquire_init(&op
->ticket
, &reservation_ww_class
);
389 if (++trycnt
> 100000) {
390 NV_PRINTK(err
, cli
, "%s failed and gave up.\n", __func__
);
394 for (i
= 0; i
< nr_buffers
; i
++) {
395 struct drm_nouveau_gem_pushbuf_bo
*b
= &pbbo
[i
];
396 struct drm_gem_object
*gem
;
397 struct nouveau_bo
*nvbo
;
399 gem
= drm_gem_object_lookup(file_priv
, b
->handle
);
401 NV_PRINTK(err
, cli
, "Unknown handle 0x%08x\n", b
->handle
);
405 nvbo
= nouveau_gem_object(gem
);
406 if (nvbo
== res_bo
) {
408 drm_gem_object_put_unlocked(gem
);
412 if (nvbo
->reserved_by
&& nvbo
->reserved_by
== file_priv
) {
413 NV_PRINTK(err
, cli
, "multiple instances of buffer %d on "
414 "validation list\n", b
->handle
);
415 drm_gem_object_put_unlocked(gem
);
420 ret
= ttm_bo_reserve(&nvbo
->bo
, true, false, &op
->ticket
);
422 list_splice_tail_init(&vram_list
, &op
->list
);
423 list_splice_tail_init(&gart_list
, &op
->list
);
424 list_splice_tail_init(&both_list
, &op
->list
);
425 validate_fini_no_ticket(op
, chan
, NULL
, NULL
);
426 if (unlikely(ret
== -EDEADLK
)) {
427 ret
= ttm_bo_reserve_slowpath(&nvbo
->bo
, true,
433 if (ret
!= -ERESTARTSYS
)
434 NV_PRINTK(err
, cli
, "fail reserve\n");
439 if (chan
->vmm
->vmm
.object
.oclass
>= NVIF_CLASS_VMM_NV50
) {
440 struct nouveau_vmm
*vmm
= chan
->vmm
;
441 struct nouveau_vma
*vma
= nouveau_vma_find(nvbo
, vmm
);
443 NV_PRINTK(err
, cli
, "vma not found!\n");
448 b
->user_priv
= (uint64_t)(unsigned long)vma
;
450 b
->user_priv
= (uint64_t)(unsigned long)nvbo
;
453 nvbo
->reserved_by
= file_priv
;
454 nvbo
->pbbo_index
= i
;
455 if ((b
->valid_domains
& NOUVEAU_GEM_DOMAIN_VRAM
) &&
456 (b
->valid_domains
& NOUVEAU_GEM_DOMAIN_GART
))
457 list_add_tail(&nvbo
->entry
, &both_list
);
459 if (b
->valid_domains
& NOUVEAU_GEM_DOMAIN_VRAM
)
460 list_add_tail(&nvbo
->entry
, &vram_list
);
462 if (b
->valid_domains
& NOUVEAU_GEM_DOMAIN_GART
)
463 list_add_tail(&nvbo
->entry
, &gart_list
);
465 NV_PRINTK(err
, cli
, "invalid valid domains: 0x%08x\n",
467 list_add_tail(&nvbo
->entry
, &both_list
);
475 ww_acquire_done(&op
->ticket
);
476 list_splice_tail(&vram_list
, &op
->list
);
477 list_splice_tail(&gart_list
, &op
->list
);
478 list_splice_tail(&both_list
, &op
->list
);
480 validate_fini(op
, chan
, NULL
, NULL
);
486 validate_list(struct nouveau_channel
*chan
, struct nouveau_cli
*cli
,
487 struct list_head
*list
, struct drm_nouveau_gem_pushbuf_bo
*pbbo
)
489 struct nouveau_drm
*drm
= chan
->drm
;
490 struct nouveau_bo
*nvbo
;
493 list_for_each_entry(nvbo
, list
, entry
) {
494 struct drm_nouveau_gem_pushbuf_bo
*b
= &pbbo
[nvbo
->pbbo_index
];
496 ret
= nouveau_gem_set_domain(&nvbo
->bo
.base
, b
->read_domains
,
500 NV_PRINTK(err
, cli
, "fail set_domain\n");
504 ret
= nouveau_bo_validate(nvbo
, true, false);
506 if (ret
!= -ERESTARTSYS
)
507 NV_PRINTK(err
, cli
, "fail ttm_validate\n");
511 ret
= nouveau_fence_sync(nvbo
, chan
, !!b
->write_domains
, true);
513 if (ret
!= -ERESTARTSYS
)
514 NV_PRINTK(err
, cli
, "fail post-validate sync\n");
518 if (drm
->client
.device
.info
.family
< NV_DEVICE_INFO_V0_TESLA
) {
519 if (nvbo
->bo
.offset
== b
->presumed
.offset
&&
520 ((nvbo
->bo
.mem
.mem_type
== TTM_PL_VRAM
&&
521 b
->presumed
.domain
& NOUVEAU_GEM_DOMAIN_VRAM
) ||
522 (nvbo
->bo
.mem
.mem_type
== TTM_PL_TT
&&
523 b
->presumed
.domain
& NOUVEAU_GEM_DOMAIN_GART
)))
526 if (nvbo
->bo
.mem
.mem_type
== TTM_PL_TT
)
527 b
->presumed
.domain
= NOUVEAU_GEM_DOMAIN_GART
;
529 b
->presumed
.domain
= NOUVEAU_GEM_DOMAIN_VRAM
;
530 b
->presumed
.offset
= nvbo
->bo
.offset
;
531 b
->presumed
.valid
= 0;
540 nouveau_gem_pushbuf_validate(struct nouveau_channel
*chan
,
541 struct drm_file
*file_priv
,
542 struct drm_nouveau_gem_pushbuf_bo
*pbbo
,
544 struct validate_op
*op
, bool *apply_relocs
)
546 struct nouveau_cli
*cli
= nouveau_cli(file_priv
);
549 INIT_LIST_HEAD(&op
->list
);
554 ret
= validate_init(chan
, file_priv
, pbbo
, nr_buffers
, op
);
556 if (ret
!= -ERESTARTSYS
)
557 NV_PRINTK(err
, cli
, "validate_init\n");
561 ret
= validate_list(chan
, cli
, &op
->list
, pbbo
);
562 if (unlikely(ret
< 0)) {
563 if (ret
!= -ERESTARTSYS
)
564 NV_PRINTK(err
, cli
, "validating bo list\n");
565 validate_fini(op
, chan
, NULL
, NULL
);
579 u_memcpya(uint64_t user
, unsigned nmemb
, unsigned size
)
582 void __user
*userptr
= (void __force __user
*)(uintptr_t)user
;
586 mem
= kvmalloc(size
, GFP_KERNEL
);
588 return ERR_PTR(-ENOMEM
);
590 if (copy_from_user(mem
, userptr
, size
)) {
592 return ERR_PTR(-EFAULT
);
599 nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli
*cli
,
600 struct drm_nouveau_gem_pushbuf
*req
,
601 struct drm_nouveau_gem_pushbuf_reloc
*reloc
,
602 struct drm_nouveau_gem_pushbuf_bo
*bo
)
607 for (i
= 0; i
< req
->nr_relocs
; i
++) {
608 struct drm_nouveau_gem_pushbuf_reloc
*r
= &reloc
[i
];
609 struct drm_nouveau_gem_pushbuf_bo
*b
;
610 struct nouveau_bo
*nvbo
;
613 if (unlikely(r
->bo_index
>= req
->nr_buffers
)) {
614 NV_PRINTK(err
, cli
, "reloc bo index invalid\n");
619 b
= &bo
[r
->bo_index
];
620 if (b
->presumed
.valid
)
623 if (unlikely(r
->reloc_bo_index
>= req
->nr_buffers
)) {
624 NV_PRINTK(err
, cli
, "reloc container bo index invalid\n");
628 nvbo
= (void *)(unsigned long)bo
[r
->reloc_bo_index
].user_priv
;
630 if (unlikely(r
->reloc_bo_offset
+ 4 >
631 nvbo
->bo
.mem
.num_pages
<< PAGE_SHIFT
)) {
632 NV_PRINTK(err
, cli
, "reloc outside of bo\n");
637 if (!nvbo
->kmap
.virtual) {
638 ret
= ttm_bo_kmap(&nvbo
->bo
, 0, nvbo
->bo
.mem
.num_pages
,
641 NV_PRINTK(err
, cli
, "failed kmap for reloc\n");
644 nvbo
->validate_mapped
= true;
647 if (r
->flags
& NOUVEAU_GEM_RELOC_LOW
)
648 data
= b
->presumed
.offset
+ r
->data
;
650 if (r
->flags
& NOUVEAU_GEM_RELOC_HIGH
)
651 data
= (b
->presumed
.offset
+ r
->data
) >> 32;
655 if (r
->flags
& NOUVEAU_GEM_RELOC_OR
) {
656 if (b
->presumed
.domain
== NOUVEAU_GEM_DOMAIN_GART
)
662 ret
= ttm_bo_wait(&nvbo
->bo
, false, false);
664 NV_PRINTK(err
, cli
, "reloc wait_idle failed: %d\n", ret
);
668 nouveau_bo_wr32(nvbo
, r
->reloc_bo_offset
>> 2, data
);
676 nouveau_gem_ioctl_pushbuf(struct drm_device
*dev
, void *data
,
677 struct drm_file
*file_priv
)
679 struct nouveau_abi16
*abi16
= nouveau_abi16_get(file_priv
);
680 struct nouveau_cli
*cli
= nouveau_cli(file_priv
);
681 struct nouveau_abi16_chan
*temp
;
682 struct nouveau_drm
*drm
= nouveau_drm(dev
);
683 struct drm_nouveau_gem_pushbuf
*req
= data
;
684 struct drm_nouveau_gem_pushbuf_push
*push
;
685 struct drm_nouveau_gem_pushbuf_reloc
*reloc
= NULL
;
686 struct drm_nouveau_gem_pushbuf_bo
*bo
;
687 struct nouveau_channel
*chan
= NULL
;
688 struct validate_op op
;
689 struct nouveau_fence
*fence
= NULL
;
691 bool do_reloc
= false, sync
= false;
693 if (unlikely(!abi16
))
696 list_for_each_entry(temp
, &abi16
->channels
, head
) {
697 if (temp
->chan
->chid
== req
->channel
) {
704 return nouveau_abi16_put(abi16
, -ENOENT
);
705 if (unlikely(atomic_read(&chan
->killed
)))
706 return nouveau_abi16_put(abi16
, -ENODEV
);
708 sync
= req
->vram_available
& NOUVEAU_GEM_PUSHBUF_SYNC
;
710 req
->vram_available
= drm
->gem
.vram_available
;
711 req
->gart_available
= drm
->gem
.gart_available
;
712 if (unlikely(req
->nr_push
== 0))
715 if (unlikely(req
->nr_push
> NOUVEAU_GEM_MAX_PUSH
)) {
716 NV_PRINTK(err
, cli
, "pushbuf push count exceeds limit: %d max %d\n",
717 req
->nr_push
, NOUVEAU_GEM_MAX_PUSH
);
718 return nouveau_abi16_put(abi16
, -EINVAL
);
721 if (unlikely(req
->nr_buffers
> NOUVEAU_GEM_MAX_BUFFERS
)) {
722 NV_PRINTK(err
, cli
, "pushbuf bo count exceeds limit: %d max %d\n",
723 req
->nr_buffers
, NOUVEAU_GEM_MAX_BUFFERS
);
724 return nouveau_abi16_put(abi16
, -EINVAL
);
727 if (unlikely(req
->nr_relocs
> NOUVEAU_GEM_MAX_RELOCS
)) {
728 NV_PRINTK(err
, cli
, "pushbuf reloc count exceeds limit: %d max %d\n",
729 req
->nr_relocs
, NOUVEAU_GEM_MAX_RELOCS
);
730 return nouveau_abi16_put(abi16
, -EINVAL
);
733 push
= u_memcpya(req
->push
, req
->nr_push
, sizeof(*push
));
735 return nouveau_abi16_put(abi16
, PTR_ERR(push
));
737 bo
= u_memcpya(req
->buffers
, req
->nr_buffers
, sizeof(*bo
));
740 return nouveau_abi16_put(abi16
, PTR_ERR(bo
));
743 /* Ensure all push buffers are on validate list */
744 for (i
= 0; i
< req
->nr_push
; i
++) {
745 if (push
[i
].bo_index
>= req
->nr_buffers
) {
746 NV_PRINTK(err
, cli
, "push %d buffer not in list\n", i
);
752 /* Validate buffer list */
754 ret
= nouveau_gem_pushbuf_validate(chan
, file_priv
, bo
,
755 req
->nr_buffers
, &op
, &do_reloc
);
757 if (ret
!= -ERESTARTSYS
)
758 NV_PRINTK(err
, cli
, "validate: %d\n", ret
);
762 /* Apply any relocations that are required */
765 validate_fini(&op
, chan
, NULL
, bo
);
766 reloc
= u_memcpya(req
->relocs
, req
->nr_relocs
, sizeof(*reloc
));
768 ret
= PTR_ERR(reloc
);
775 ret
= nouveau_gem_pushbuf_reloc_apply(cli
, req
, reloc
, bo
);
777 NV_PRINTK(err
, cli
, "reloc apply: %d\n", ret
);
782 if (chan
->dma
.ib_max
) {
783 ret
= nouveau_dma_wait(chan
, req
->nr_push
+ 1, 16);
785 NV_PRINTK(err
, cli
, "nv50cal_space: %d\n", ret
);
789 for (i
= 0; i
< req
->nr_push
; i
++) {
790 struct nouveau_vma
*vma
= (void *)(unsigned long)
791 bo
[push
[i
].bo_index
].user_priv
;
793 nv50_dma_push(chan
, vma
->addr
+ push
[i
].offset
,
797 if (drm
->client
.device
.info
.chipset
>= 0x25) {
798 ret
= RING_SPACE(chan
, req
->nr_push
* 2);
800 NV_PRINTK(err
, cli
, "cal_space: %d\n", ret
);
804 for (i
= 0; i
< req
->nr_push
; i
++) {
805 struct nouveau_bo
*nvbo
= (void *)(unsigned long)
806 bo
[push
[i
].bo_index
].user_priv
;
808 OUT_RING(chan
, (nvbo
->bo
.offset
+ push
[i
].offset
) | 2);
812 ret
= RING_SPACE(chan
, req
->nr_push
* (2 + NOUVEAU_DMA_SKIPS
));
814 NV_PRINTK(err
, cli
, "jmp_space: %d\n", ret
);
818 for (i
= 0; i
< req
->nr_push
; i
++) {
819 struct nouveau_bo
*nvbo
= (void *)(unsigned long)
820 bo
[push
[i
].bo_index
].user_priv
;
823 cmd
= chan
->push
.addr
+ ((chan
->dma
.cur
+ 2) << 2);
825 if (unlikely(cmd
!= req
->suffix0
)) {
826 if (!nvbo
->kmap
.virtual) {
827 ret
= ttm_bo_kmap(&nvbo
->bo
, 0,
835 nvbo
->validate_mapped
= true;
838 nouveau_bo_wr32(nvbo
, (push
[i
].offset
+
839 push
[i
].length
- 8) / 4, cmd
);
842 OUT_RING(chan
, 0x20000000 |
843 (nvbo
->bo
.offset
+ push
[i
].offset
));
845 for (j
= 0; j
< NOUVEAU_DMA_SKIPS
; j
++)
850 ret
= nouveau_fence_new(chan
, false, &fence
);
852 NV_PRINTK(err
, cli
, "error fencing pushbuf: %d\n", ret
);
858 if (!(ret
= nouveau_fence_wait(fence
, false, false))) {
859 if ((ret
= dma_fence_get_status(&fence
->base
)) == 1)
865 validate_fini(&op
, chan
, fence
, bo
);
866 nouveau_fence_unref(&fence
);
869 struct drm_nouveau_gem_pushbuf_bo __user
*upbbo
=
870 u64_to_user_ptr(req
->buffers
);
872 for (i
= 0; i
< req
->nr_buffers
; i
++) {
873 if (bo
[i
].presumed
.valid
)
876 if (copy_to_user(&upbbo
[i
].presumed
, &bo
[i
].presumed
,
877 sizeof(bo
[i
].presumed
))) {
889 if (chan
->dma
.ib_max
) {
890 req
->suffix0
= 0x00000000;
891 req
->suffix1
= 0x00000000;
893 if (drm
->client
.device
.info
.chipset
>= 0x25) {
894 req
->suffix0
= 0x00020000;
895 req
->suffix1
= 0x00000000;
897 req
->suffix0
= 0x20000000 |
898 (chan
->push
.addr
+ ((chan
->dma
.cur
+ 2) << 2));
899 req
->suffix1
= 0x00000000;
902 return nouveau_abi16_put(abi16
, ret
);
906 nouveau_gem_ioctl_cpu_prep(struct drm_device
*dev
, void *data
,
907 struct drm_file
*file_priv
)
909 struct drm_nouveau_gem_cpu_prep
*req
= data
;
910 struct drm_gem_object
*gem
;
911 struct nouveau_bo
*nvbo
;
912 bool no_wait
= !!(req
->flags
& NOUVEAU_GEM_CPU_PREP_NOWAIT
);
913 bool write
= !!(req
->flags
& NOUVEAU_GEM_CPU_PREP_WRITE
);
917 gem
= drm_gem_object_lookup(file_priv
, req
->handle
);
920 nvbo
= nouveau_gem_object(gem
);
922 lret
= dma_resv_wait_timeout_rcu(nvbo
->bo
.base
.resv
, write
, true,
923 no_wait
? 0 : 30 * HZ
);
931 nouveau_bo_sync_for_cpu(nvbo
);
932 drm_gem_object_put_unlocked(gem
);
938 nouveau_gem_ioctl_cpu_fini(struct drm_device
*dev
, void *data
,
939 struct drm_file
*file_priv
)
941 struct drm_nouveau_gem_cpu_fini
*req
= data
;
942 struct drm_gem_object
*gem
;
943 struct nouveau_bo
*nvbo
;
945 gem
= drm_gem_object_lookup(file_priv
, req
->handle
);
948 nvbo
= nouveau_gem_object(gem
);
950 nouveau_bo_sync_for_device(nvbo
);
951 drm_gem_object_put_unlocked(gem
);
956 nouveau_gem_ioctl_info(struct drm_device
*dev
, void *data
,
957 struct drm_file
*file_priv
)
959 struct drm_nouveau_gem_info
*req
= data
;
960 struct drm_gem_object
*gem
;
963 gem
= drm_gem_object_lookup(file_priv
, req
->handle
);
967 ret
= nouveau_gem_info(file_priv
, gem
, req
);
968 drm_gem_object_put_unlocked(gem
);