2 * Copyright (C) 2008 Ben Skeggs.
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include <linux/dma-buf.h>
30 #include "nouveau_drv.h"
31 #include "nouveau_drm.h"
32 #include "nouveau_dma.h"
33 #include "nouveau_fence.h"
35 #define nouveau_gem_pushbuf_sync(chan) 0
38 nouveau_gem_object_new(struct drm_gem_object
*gem
)
44 nouveau_gem_object_del(struct drm_gem_object
*gem
)
46 struct nouveau_bo
*nvbo
= gem
->driver_private
;
47 struct ttm_buffer_object
*bo
= &nvbo
->bo
;
53 if (unlikely(nvbo
->pin_refcnt
)) {
55 nouveau_bo_unpin(nvbo
);
58 if (gem
->import_attach
)
59 drm_prime_gem_destroy(gem
, nvbo
->bo
.sg
);
63 drm_gem_object_release(gem
);
68 nouveau_gem_object_open(struct drm_gem_object
*gem
, struct drm_file
*file_priv
)
70 struct nouveau_fpriv
*fpriv
= nouveau_fpriv(file_priv
);
71 struct nouveau_bo
*nvbo
= nouveau_gem_object(gem
);
72 struct nouveau_vma
*vma
;
78 ret
= ttm_bo_reserve(&nvbo
->bo
, false, false, false, 0);
82 vma
= nouveau_bo_vma_find(nvbo
, fpriv
->vm
);
84 vma
= kzalloc(sizeof(*vma
), GFP_KERNEL
);
90 ret
= nouveau_bo_vma_add(nvbo
, fpriv
->vm
, vma
);
100 ttm_bo_unreserve(&nvbo
->bo
);
105 nouveau_gem_object_close(struct drm_gem_object
*gem
, struct drm_file
*file_priv
)
107 struct nouveau_fpriv
*fpriv
= nouveau_fpriv(file_priv
);
108 struct nouveau_bo
*nvbo
= nouveau_gem_object(gem
);
109 struct nouveau_vma
*vma
;
115 ret
= ttm_bo_reserve(&nvbo
->bo
, false, false, false, 0);
119 vma
= nouveau_bo_vma_find(nvbo
, fpriv
->vm
);
121 if (--vma
->refcount
== 0) {
122 nouveau_bo_vma_del(nvbo
, vma
);
126 ttm_bo_unreserve(&nvbo
->bo
);
130 nouveau_gem_new(struct drm_device
*dev
, int size
, int align
, uint32_t domain
,
131 uint32_t tile_mode
, uint32_t tile_flags
,
132 struct nouveau_bo
**pnvbo
)
134 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
135 struct nouveau_bo
*nvbo
;
139 if (domain
& NOUVEAU_GEM_DOMAIN_VRAM
)
140 flags
|= TTM_PL_FLAG_VRAM
;
141 if (domain
& NOUVEAU_GEM_DOMAIN_GART
)
142 flags
|= TTM_PL_FLAG_TT
;
143 if (!flags
|| domain
& NOUVEAU_GEM_DOMAIN_CPU
)
144 flags
|= TTM_PL_FLAG_SYSTEM
;
146 ret
= nouveau_bo_new(dev
, size
, align
, flags
, tile_mode
,
147 tile_flags
, NULL
, pnvbo
);
152 /* we restrict allowed domains on nv50+ to only the types
153 * that were requested at creation time. not possibly on
154 * earlier chips without busting the ABI.
156 nvbo
->valid_domains
= NOUVEAU_GEM_DOMAIN_VRAM
|
157 NOUVEAU_GEM_DOMAIN_GART
;
158 if (dev_priv
->card_type
>= NV_50
)
159 nvbo
->valid_domains
&= domain
;
161 nvbo
->gem
= drm_gem_object_alloc(dev
, nvbo
->bo
.mem
.size
);
163 nouveau_bo_ref(NULL
, pnvbo
);
167 nvbo
->bo
.persistent_swap_storage
= nvbo
->gem
->filp
;
168 nvbo
->gem
->driver_private
= nvbo
;
173 nouveau_gem_info(struct drm_file
*file_priv
, struct drm_gem_object
*gem
,
174 struct drm_nouveau_gem_info
*rep
)
176 struct nouveau_fpriv
*fpriv
= nouveau_fpriv(file_priv
);
177 struct nouveau_bo
*nvbo
= nouveau_gem_object(gem
);
178 struct nouveau_vma
*vma
;
180 if (nvbo
->bo
.mem
.mem_type
== TTM_PL_TT
)
181 rep
->domain
= NOUVEAU_GEM_DOMAIN_GART
;
183 rep
->domain
= NOUVEAU_GEM_DOMAIN_VRAM
;
185 rep
->offset
= nvbo
->bo
.offset
;
187 vma
= nouveau_bo_vma_find(nvbo
, fpriv
->vm
);
191 rep
->offset
= vma
->offset
;
194 rep
->size
= nvbo
->bo
.mem
.num_pages
<< PAGE_SHIFT
;
195 rep
->map_handle
= nvbo
->bo
.addr_space_offset
;
196 rep
->tile_mode
= nvbo
->tile_mode
;
197 rep
->tile_flags
= nvbo
->tile_flags
;
202 nouveau_gem_ioctl_new(struct drm_device
*dev
, void *data
,
203 struct drm_file
*file_priv
)
205 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
206 struct drm_nouveau_gem_new
*req
= data
;
207 struct nouveau_bo
*nvbo
= NULL
;
210 dev_priv
->ttm
.bdev
.dev_mapping
= dev
->dev_mapping
;
212 if (!dev_priv
->engine
.vram
.flags_valid(dev
, req
->info
.tile_flags
)) {
213 NV_ERROR(dev
, "bad page flags: 0x%08x\n", req
->info
.tile_flags
);
217 ret
= nouveau_gem_new(dev
, req
->info
.size
, req
->align
,
218 req
->info
.domain
, req
->info
.tile_mode
,
219 req
->info
.tile_flags
, &nvbo
);
223 ret
= drm_gem_handle_create(file_priv
, nvbo
->gem
, &req
->info
.handle
);
225 ret
= nouveau_gem_info(file_priv
, nvbo
->gem
, &req
->info
);
227 drm_gem_handle_delete(file_priv
, req
->info
.handle
);
230 /* drop reference from allocate - handle holds it now */
231 drm_gem_object_unreference_unlocked(nvbo
->gem
);
236 nouveau_gem_set_domain(struct drm_gem_object
*gem
, uint32_t read_domains
,
237 uint32_t write_domains
, uint32_t valid_domains
)
239 struct nouveau_bo
*nvbo
= gem
->driver_private
;
240 struct ttm_buffer_object
*bo
= &nvbo
->bo
;
241 uint32_t domains
= valid_domains
& nvbo
->valid_domains
&
242 (write_domains
? write_domains
: read_domains
);
243 uint32_t pref_flags
= 0, valid_flags
= 0;
248 if (valid_domains
& NOUVEAU_GEM_DOMAIN_VRAM
)
249 valid_flags
|= TTM_PL_FLAG_VRAM
;
251 if (valid_domains
& NOUVEAU_GEM_DOMAIN_GART
)
252 valid_flags
|= TTM_PL_FLAG_TT
;
254 if ((domains
& NOUVEAU_GEM_DOMAIN_VRAM
) &&
255 bo
->mem
.mem_type
== TTM_PL_VRAM
)
256 pref_flags
|= TTM_PL_FLAG_VRAM
;
258 else if ((domains
& NOUVEAU_GEM_DOMAIN_GART
) &&
259 bo
->mem
.mem_type
== TTM_PL_TT
)
260 pref_flags
|= TTM_PL_FLAG_TT
;
262 else if (domains
& NOUVEAU_GEM_DOMAIN_VRAM
)
263 pref_flags
|= TTM_PL_FLAG_VRAM
;
266 pref_flags
|= TTM_PL_FLAG_TT
;
268 nouveau_bo_placement_set(nvbo
, pref_flags
, valid_flags
);
274 struct list_head vram_list
;
275 struct list_head gart_list
;
276 struct list_head both_list
;
280 validate_fini_list(struct list_head
*list
, struct nouveau_fence
*fence
)
282 struct list_head
*entry
, *tmp
;
283 struct nouveau_bo
*nvbo
;
285 list_for_each_safe(entry
, tmp
, list
) {
286 nvbo
= list_entry(entry
, struct nouveau_bo
, entry
);
288 nouveau_bo_fence(nvbo
, fence
);
290 if (unlikely(nvbo
->validate_mapped
)) {
291 ttm_bo_kunmap(&nvbo
->kmap
);
292 nvbo
->validate_mapped
= false;
295 list_del(&nvbo
->entry
);
296 nvbo
->reserved_by
= NULL
;
297 ttm_bo_unreserve(&nvbo
->bo
);
298 drm_gem_object_unreference_unlocked(nvbo
->gem
);
303 validate_fini(struct validate_op
*op
, struct nouveau_fence
* fence
)
305 validate_fini_list(&op
->vram_list
, fence
);
306 validate_fini_list(&op
->gart_list
, fence
);
307 validate_fini_list(&op
->both_list
, fence
);
311 validate_init(struct nouveau_channel
*chan
, struct drm_file
*file_priv
,
312 struct drm_nouveau_gem_pushbuf_bo
*pbbo
,
313 int nr_buffers
, struct validate_op
*op
)
315 struct drm_device
*dev
= chan
->dev
;
316 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
321 sequence
= atomic_add_return(1, &dev_priv
->ttm
.validate_sequence
);
323 if (++trycnt
> 100000) {
324 NV_ERROR(dev
, "%s failed and gave up.\n", __func__
);
328 for (i
= 0; i
< nr_buffers
; i
++) {
329 struct drm_nouveau_gem_pushbuf_bo
*b
= &pbbo
[i
];
330 struct drm_gem_object
*gem
;
331 struct nouveau_bo
*nvbo
;
333 gem
= drm_gem_object_lookup(dev
, file_priv
, b
->handle
);
335 NV_ERROR(dev
, "Unknown handle 0x%08x\n", b
->handle
);
336 validate_fini(op
, NULL
);
339 nvbo
= gem
->driver_private
;
341 if (nvbo
->reserved_by
&& nvbo
->reserved_by
== file_priv
) {
342 NV_ERROR(dev
, "multiple instances of buffer %d on "
343 "validation list\n", b
->handle
);
344 drm_gem_object_unreference_unlocked(gem
);
345 validate_fini(op
, NULL
);
349 ret
= ttm_bo_reserve(&nvbo
->bo
, true, false, true, sequence
);
351 validate_fini(op
, NULL
);
352 if (unlikely(ret
== -EAGAIN
))
353 ret
= ttm_bo_wait_unreserved(&nvbo
->bo
, true);
354 drm_gem_object_unreference_unlocked(gem
);
356 if (ret
!= -ERESTARTSYS
)
357 NV_ERROR(dev
, "fail reserve\n");
363 b
->user_priv
= (uint64_t)(unsigned long)nvbo
;
364 nvbo
->reserved_by
= file_priv
;
365 nvbo
->pbbo_index
= i
;
366 if ((b
->valid_domains
& NOUVEAU_GEM_DOMAIN_VRAM
) &&
367 (b
->valid_domains
& NOUVEAU_GEM_DOMAIN_GART
))
368 list_add_tail(&nvbo
->entry
, &op
->both_list
);
370 if (b
->valid_domains
& NOUVEAU_GEM_DOMAIN_VRAM
)
371 list_add_tail(&nvbo
->entry
, &op
->vram_list
);
373 if (b
->valid_domains
& NOUVEAU_GEM_DOMAIN_GART
)
374 list_add_tail(&nvbo
->entry
, &op
->gart_list
);
376 NV_ERROR(dev
, "invalid valid domains: 0x%08x\n",
378 list_add_tail(&nvbo
->entry
, &op
->both_list
);
379 validate_fini(op
, NULL
);
388 validate_sync(struct nouveau_channel
*chan
, struct nouveau_bo
*nvbo
)
390 struct nouveau_fence
*fence
= NULL
;
393 spin_lock(&nvbo
->bo
.bdev
->fence_lock
);
394 if (nvbo
->bo
.sync_obj
)
395 fence
= nouveau_fence_ref(nvbo
->bo
.sync_obj
);
396 spin_unlock(&nvbo
->bo
.bdev
->fence_lock
);
399 ret
= nouveau_fence_sync(fence
, chan
);
400 nouveau_fence_unref(&fence
);
407 validate_list(struct nouveau_channel
*chan
, struct list_head
*list
,
408 struct drm_nouveau_gem_pushbuf_bo
*pbbo
, uint64_t user_pbbo_ptr
)
410 struct drm_nouveau_private
*dev_priv
= chan
->dev
->dev_private
;
411 struct drm_nouveau_gem_pushbuf_bo __user
*upbbo
=
412 (void __force __user
*)(uintptr_t)user_pbbo_ptr
;
413 struct drm_device
*dev
= chan
->dev
;
414 struct nouveau_bo
*nvbo
;
417 list_for_each_entry(nvbo
, list
, entry
) {
418 struct drm_nouveau_gem_pushbuf_bo
*b
= &pbbo
[nvbo
->pbbo_index
];
420 ret
= validate_sync(chan
, nvbo
);
422 NV_ERROR(dev
, "fail pre-validate sync\n");
426 ret
= nouveau_gem_set_domain(nvbo
->gem
, b
->read_domains
,
430 NV_ERROR(dev
, "fail set_domain\n");
434 ret
= nouveau_bo_validate(nvbo
, true, false, false);
436 if (ret
!= -ERESTARTSYS
)
437 NV_ERROR(dev
, "fail ttm_validate\n");
441 ret
= validate_sync(chan
, nvbo
);
443 NV_ERROR(dev
, "fail post-validate sync\n");
447 if (dev_priv
->card_type
< NV_50
) {
448 if (nvbo
->bo
.offset
== b
->presumed
.offset
&&
449 ((nvbo
->bo
.mem
.mem_type
== TTM_PL_VRAM
&&
450 b
->presumed
.domain
& NOUVEAU_GEM_DOMAIN_VRAM
) ||
451 (nvbo
->bo
.mem
.mem_type
== TTM_PL_TT
&&
452 b
->presumed
.domain
& NOUVEAU_GEM_DOMAIN_GART
)))
455 if (nvbo
->bo
.mem
.mem_type
== TTM_PL_TT
)
456 b
->presumed
.domain
= NOUVEAU_GEM_DOMAIN_GART
;
458 b
->presumed
.domain
= NOUVEAU_GEM_DOMAIN_VRAM
;
459 b
->presumed
.offset
= nvbo
->bo
.offset
;
460 b
->presumed
.valid
= 0;
463 if (DRM_COPY_TO_USER(&upbbo
[nvbo
->pbbo_index
].presumed
,
464 &b
->presumed
, sizeof(b
->presumed
)))
473 nouveau_gem_pushbuf_validate(struct nouveau_channel
*chan
,
474 struct drm_file
*file_priv
,
475 struct drm_nouveau_gem_pushbuf_bo
*pbbo
,
476 uint64_t user_buffers
, int nr_buffers
,
477 struct validate_op
*op
, int *apply_relocs
)
479 struct drm_device
*dev
= chan
->dev
;
482 INIT_LIST_HEAD(&op
->vram_list
);
483 INIT_LIST_HEAD(&op
->gart_list
);
484 INIT_LIST_HEAD(&op
->both_list
);
489 ret
= validate_init(chan
, file_priv
, pbbo
, nr_buffers
, op
);
491 if (ret
!= -ERESTARTSYS
)
492 NV_ERROR(dev
, "validate_init\n");
496 ret
= validate_list(chan
, &op
->vram_list
, pbbo
, user_buffers
);
497 if (unlikely(ret
< 0)) {
498 if (ret
!= -ERESTARTSYS
)
499 NV_ERROR(dev
, "validate vram_list\n");
500 validate_fini(op
, NULL
);
505 ret
= validate_list(chan
, &op
->gart_list
, pbbo
, user_buffers
);
506 if (unlikely(ret
< 0)) {
507 if (ret
!= -ERESTARTSYS
)
508 NV_ERROR(dev
, "validate gart_list\n");
509 validate_fini(op
, NULL
);
514 ret
= validate_list(chan
, &op
->both_list
, pbbo
, user_buffers
);
515 if (unlikely(ret
< 0)) {
516 if (ret
!= -ERESTARTSYS
)
517 NV_ERROR(dev
, "validate both_list\n");
518 validate_fini(op
, NULL
);
523 *apply_relocs
= relocs
;
528 u_memcpya(uint64_t user
, unsigned nmemb
, unsigned size
)
531 void __user
*userptr
= (void __force __user
*)(uintptr_t)user
;
533 mem
= kmalloc(nmemb
* size
, GFP_KERNEL
);
535 return ERR_PTR(-ENOMEM
);
537 if (DRM_COPY_FROM_USER(mem
, userptr
, nmemb
* size
)) {
539 return ERR_PTR(-EFAULT
);
546 nouveau_gem_pushbuf_reloc_apply(struct drm_device
*dev
,
547 struct drm_nouveau_gem_pushbuf
*req
,
548 struct drm_nouveau_gem_pushbuf_bo
*bo
)
550 struct drm_nouveau_gem_pushbuf_reloc
*reloc
= NULL
;
554 reloc
= u_memcpya(req
->relocs
, req
->nr_relocs
, sizeof(*reloc
));
556 return PTR_ERR(reloc
);
558 for (i
= 0; i
< req
->nr_relocs
; i
++) {
559 struct drm_nouveau_gem_pushbuf_reloc
*r
= &reloc
[i
];
560 struct drm_nouveau_gem_pushbuf_bo
*b
;
561 struct nouveau_bo
*nvbo
;
564 if (unlikely(r
->bo_index
> req
->nr_buffers
)) {
565 NV_ERROR(dev
, "reloc bo index invalid\n");
570 b
= &bo
[r
->bo_index
];
571 if (b
->presumed
.valid
)
574 if (unlikely(r
->reloc_bo_index
> req
->nr_buffers
)) {
575 NV_ERROR(dev
, "reloc container bo index invalid\n");
579 nvbo
= (void *)(unsigned long)bo
[r
->reloc_bo_index
].user_priv
;
581 if (unlikely(r
->reloc_bo_offset
+ 4 >
582 nvbo
->bo
.mem
.num_pages
<< PAGE_SHIFT
)) {
583 NV_ERROR(dev
, "reloc outside of bo\n");
588 if (!nvbo
->kmap
.virtual) {
589 ret
= ttm_bo_kmap(&nvbo
->bo
, 0, nvbo
->bo
.mem
.num_pages
,
592 NV_ERROR(dev
, "failed kmap for reloc\n");
595 nvbo
->validate_mapped
= true;
598 if (r
->flags
& NOUVEAU_GEM_RELOC_LOW
)
599 data
= b
->presumed
.offset
+ r
->data
;
601 if (r
->flags
& NOUVEAU_GEM_RELOC_HIGH
)
602 data
= (b
->presumed
.offset
+ r
->data
) >> 32;
606 if (r
->flags
& NOUVEAU_GEM_RELOC_OR
) {
607 if (b
->presumed
.domain
== NOUVEAU_GEM_DOMAIN_GART
)
613 spin_lock(&nvbo
->bo
.bdev
->fence_lock
);
614 ret
= ttm_bo_wait(&nvbo
->bo
, false, false, false);
615 spin_unlock(&nvbo
->bo
.bdev
->fence_lock
);
617 NV_ERROR(dev
, "reloc wait_idle failed: %d\n", ret
);
621 nouveau_bo_wr32(nvbo
, r
->reloc_bo_offset
>> 2, data
);
629 nouveau_gem_ioctl_pushbuf(struct drm_device
*dev
, void *data
,
630 struct drm_file
*file_priv
)
632 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
633 struct drm_nouveau_gem_pushbuf
*req
= data
;
634 struct drm_nouveau_gem_pushbuf_push
*push
;
635 struct drm_nouveau_gem_pushbuf_bo
*bo
;
636 struct nouveau_channel
*chan
;
637 struct validate_op op
;
638 struct nouveau_fence
*fence
= NULL
;
639 int i
, j
, ret
= 0, do_reloc
= 0;
641 chan
= nouveau_channel_get(file_priv
, req
->channel
);
643 return PTR_ERR(chan
);
645 req
->vram_available
= dev_priv
->fb_aper_free
;
646 req
->gart_available
= dev_priv
->gart_info
.aper_free
;
647 if (unlikely(req
->nr_push
== 0))
650 if (unlikely(req
->nr_push
> NOUVEAU_GEM_MAX_PUSH
)) {
651 NV_ERROR(dev
, "pushbuf push count exceeds limit: %d max %d\n",
652 req
->nr_push
, NOUVEAU_GEM_MAX_PUSH
);
653 nouveau_channel_put(&chan
);
657 if (unlikely(req
->nr_buffers
> NOUVEAU_GEM_MAX_BUFFERS
)) {
658 NV_ERROR(dev
, "pushbuf bo count exceeds limit: %d max %d\n",
659 req
->nr_buffers
, NOUVEAU_GEM_MAX_BUFFERS
);
660 nouveau_channel_put(&chan
);
664 if (unlikely(req
->nr_relocs
> NOUVEAU_GEM_MAX_RELOCS
)) {
665 NV_ERROR(dev
, "pushbuf reloc count exceeds limit: %d max %d\n",
666 req
->nr_relocs
, NOUVEAU_GEM_MAX_RELOCS
);
667 nouveau_channel_put(&chan
);
671 push
= u_memcpya(req
->push
, req
->nr_push
, sizeof(*push
));
673 nouveau_channel_put(&chan
);
674 return PTR_ERR(push
);
677 bo
= u_memcpya(req
->buffers
, req
->nr_buffers
, sizeof(*bo
));
680 nouveau_channel_put(&chan
);
684 /* Ensure all push buffers are on validate list */
685 for (i
= 0; i
< req
->nr_push
; i
++) {
686 if (push
[i
].bo_index
>= req
->nr_buffers
) {
687 NV_ERROR(dev
, "push %d buffer not in list\n", i
);
693 /* Validate buffer list */
694 ret
= nouveau_gem_pushbuf_validate(chan
, file_priv
, bo
, req
->buffers
,
695 req
->nr_buffers
, &op
, &do_reloc
);
697 if (ret
!= -ERESTARTSYS
)
698 NV_ERROR(dev
, "validate: %d\n", ret
);
702 /* Apply any relocations that are required */
704 ret
= nouveau_gem_pushbuf_reloc_apply(dev
, req
, bo
);
706 NV_ERROR(dev
, "reloc apply: %d\n", ret
);
711 if (chan
->dma
.ib_max
) {
712 ret
= nouveau_dma_wait(chan
, req
->nr_push
+ 1, 16);
714 NV_INFO(dev
, "nv50cal_space: %d\n", ret
);
718 for (i
= 0; i
< req
->nr_push
; i
++) {
719 struct nouveau_bo
*nvbo
= (void *)(unsigned long)
720 bo
[push
[i
].bo_index
].user_priv
;
722 nv50_dma_push(chan
, nvbo
, push
[i
].offset
,
726 if (dev_priv
->chipset
>= 0x25) {
727 ret
= RING_SPACE(chan
, req
->nr_push
* 2);
729 NV_ERROR(dev
, "cal_space: %d\n", ret
);
733 for (i
= 0; i
< req
->nr_push
; i
++) {
734 struct nouveau_bo
*nvbo
= (void *)(unsigned long)
735 bo
[push
[i
].bo_index
].user_priv
;
736 struct drm_mm_node
*mem
= nvbo
->bo
.mem
.mm_node
;
738 OUT_RING(chan
, ((mem
->start
<< PAGE_SHIFT
) +
739 push
[i
].offset
) | 2);
743 ret
= RING_SPACE(chan
, req
->nr_push
* (2 + NOUVEAU_DMA_SKIPS
));
745 NV_ERROR(dev
, "jmp_space: %d\n", ret
);
749 for (i
= 0; i
< req
->nr_push
; i
++) {
750 struct nouveau_bo
*nvbo
= (void *)(unsigned long)
751 bo
[push
[i
].bo_index
].user_priv
;
752 struct drm_mm_node
*mem
= nvbo
->bo
.mem
.mm_node
;
755 cmd
= chan
->pushbuf_base
+ ((chan
->dma
.cur
+ 2) << 2);
757 if (unlikely(cmd
!= req
->suffix0
)) {
758 if (!nvbo
->kmap
.virtual) {
759 ret
= ttm_bo_kmap(&nvbo
->bo
, 0,
767 nvbo
->validate_mapped
= true;
770 nouveau_bo_wr32(nvbo
, (push
[i
].offset
+
771 push
[i
].length
- 8) / 4, cmd
);
774 OUT_RING(chan
, ((mem
->start
<< PAGE_SHIFT
) +
775 push
[i
].offset
) | 0x20000000);
777 for (j
= 0; j
< NOUVEAU_DMA_SKIPS
; j
++)
782 ret
= nouveau_fence_new(chan
, &fence
);
784 NV_ERROR(dev
, "error fencing pushbuf: %d\n", ret
);
790 validate_fini(&op
, fence
);
791 nouveau_fence_unref(&fence
);
798 if (chan
->dma
.ib_max
) {
799 req
->suffix0
= 0x00000000;
800 req
->suffix1
= 0x00000000;
802 if (dev_priv
->chipset
>= 0x25) {
803 req
->suffix0
= 0x00020000;
804 req
->suffix1
= 0x00000000;
806 req
->suffix0
= 0x20000000 |
807 (chan
->pushbuf_base
+ ((chan
->dma
.cur
+ 2) << 2));
808 req
->suffix1
= 0x00000000;
811 nouveau_channel_put(&chan
);
815 static inline uint32_t
816 domain_to_ttm(struct nouveau_bo
*nvbo
, uint32_t domain
)
820 if (domain
& NOUVEAU_GEM_DOMAIN_VRAM
)
821 flags
|= TTM_PL_FLAG_VRAM
;
822 if (domain
& NOUVEAU_GEM_DOMAIN_GART
)
823 flags
|= TTM_PL_FLAG_TT
;
829 nouveau_gem_ioctl_cpu_prep(struct drm_device
*dev
, void *data
,
830 struct drm_file
*file_priv
)
832 struct drm_nouveau_gem_cpu_prep
*req
= data
;
833 struct drm_gem_object
*gem
;
834 struct nouveau_bo
*nvbo
;
835 bool no_wait
= !!(req
->flags
& NOUVEAU_GEM_CPU_PREP_NOWAIT
);
838 gem
= drm_gem_object_lookup(dev
, file_priv
, req
->handle
);
841 nvbo
= nouveau_gem_object(gem
);
843 spin_lock(&nvbo
->bo
.bdev
->fence_lock
);
844 ret
= ttm_bo_wait(&nvbo
->bo
, true, true, no_wait
);
845 spin_unlock(&nvbo
->bo
.bdev
->fence_lock
);
846 drm_gem_object_unreference_unlocked(gem
);
851 nouveau_gem_ioctl_cpu_fini(struct drm_device
*dev
, void *data
,
852 struct drm_file
*file_priv
)
858 nouveau_gem_ioctl_info(struct drm_device
*dev
, void *data
,
859 struct drm_file
*file_priv
)
861 struct drm_nouveau_gem_info
*req
= data
;
862 struct drm_gem_object
*gem
;
865 gem
= drm_gem_object_lookup(dev
, file_priv
, req
->handle
);
869 ret
= nouveau_gem_info(file_priv
, gem
, req
);
870 drm_gem_object_unreference_unlocked(gem
);