2 * Copyright 2007 Dave Airlied
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
31 #include "ttm/ttm_page_alloc.h"
33 #include "nouveau_drm.h"
34 #include "nouveau_drv.h"
35 #include "nouveau_dma.h"
36 #include "nouveau_mm.h"
37 #include "nouveau_vm.h"
38 #include "nouveau_fence.h"
39 #include "nouveau_ramht.h"
41 #include <linux/log2.h>
42 #include <linux/slab.h>
45 nouveau_bo_del_ttm(struct ttm_buffer_object
*bo
)
47 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bo
->bdev
);
48 struct drm_device
*dev
= dev_priv
->dev
;
49 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
51 if (unlikely(nvbo
->gem
))
52 DRM_ERROR("bo %p still attached to GEM object\n", bo
);
54 nv10_mem_put_tile_region(dev
, nvbo
->tile
, NULL
);
59 nouveau_bo_fixup_align(struct nouveau_bo
*nvbo
, u32 flags
,
60 int *align
, int *size
)
62 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(nvbo
->bo
.bdev
);
64 if (dev_priv
->card_type
< NV_50
) {
65 if (nvbo
->tile_mode
) {
66 if (dev_priv
->chipset
>= 0x40) {
68 *size
= roundup(*size
, 64 * nvbo
->tile_mode
);
70 } else if (dev_priv
->chipset
>= 0x30) {
72 *size
= roundup(*size
, 64 * nvbo
->tile_mode
);
74 } else if (dev_priv
->chipset
>= 0x20) {
76 *size
= roundup(*size
, 64 * nvbo
->tile_mode
);
78 } else if (dev_priv
->chipset
>= 0x10) {
80 *size
= roundup(*size
, 32 * nvbo
->tile_mode
);
84 *size
= roundup(*size
, (1 << nvbo
->page_shift
));
85 *align
= max((1 << nvbo
->page_shift
), *align
);
88 *size
= roundup(*size
, PAGE_SIZE
);
92 nouveau_bo_new(struct drm_device
*dev
, int size
, int align
,
93 uint32_t flags
, uint32_t tile_mode
, uint32_t tile_flags
,
95 struct nouveau_bo
**pnvbo
)
97 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
98 struct nouveau_bo
*nvbo
;
101 int type
= ttm_bo_type_device
;
104 type
= ttm_bo_type_sg
;
106 nvbo
= kzalloc(sizeof(struct nouveau_bo
), GFP_KERNEL
);
109 INIT_LIST_HEAD(&nvbo
->head
);
110 INIT_LIST_HEAD(&nvbo
->entry
);
111 INIT_LIST_HEAD(&nvbo
->vma_list
);
112 nvbo
->tile_mode
= tile_mode
;
113 nvbo
->tile_flags
= tile_flags
;
114 nvbo
->bo
.bdev
= &dev_priv
->ttm
.bdev
;
116 nvbo
->page_shift
= 12;
117 if (dev_priv
->bar1_vm
) {
118 if (!(flags
& TTM_PL_FLAG_TT
) && size
> 256 * 1024)
119 nvbo
->page_shift
= dev_priv
->bar1_vm
->lpg_shift
;
122 nouveau_bo_fixup_align(nvbo
, flags
, &align
, &size
);
123 nvbo
->bo
.mem
.num_pages
= size
>> PAGE_SHIFT
;
124 nouveau_bo_placement_set(nvbo
, flags
, 0);
126 acc_size
= ttm_bo_dma_acc_size(&dev_priv
->ttm
.bdev
, size
,
127 sizeof(struct nouveau_bo
));
129 ret
= ttm_bo_init(&dev_priv
->ttm
.bdev
, &nvbo
->bo
, size
,
130 type
, &nvbo
->placement
,
131 align
>> PAGE_SHIFT
, 0, false, NULL
, acc_size
, sg
,
134 /* ttm will call nouveau_bo_del_ttm if it fails.. */
143 set_placement_list(uint32_t *pl
, unsigned *n
, uint32_t type
, uint32_t flags
)
147 if (type
& TTM_PL_FLAG_VRAM
)
148 pl
[(*n
)++] = TTM_PL_FLAG_VRAM
| flags
;
149 if (type
& TTM_PL_FLAG_TT
)
150 pl
[(*n
)++] = TTM_PL_FLAG_TT
| flags
;
151 if (type
& TTM_PL_FLAG_SYSTEM
)
152 pl
[(*n
)++] = TTM_PL_FLAG_SYSTEM
| flags
;
156 set_placement_range(struct nouveau_bo
*nvbo
, uint32_t type
)
158 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(nvbo
->bo
.bdev
);
159 int vram_pages
= dev_priv
->vram_size
>> PAGE_SHIFT
;
161 if (dev_priv
->card_type
== NV_10
&&
162 nvbo
->tile_mode
&& (type
& TTM_PL_FLAG_VRAM
) &&
163 nvbo
->bo
.mem
.num_pages
< vram_pages
/ 4) {
165 * Make sure that the color and depth buffers are handled
166 * by independent memory controller units. Up to a 9x
167 * speed up when alpha-blending and depth-test are enabled
170 if (nvbo
->tile_flags
& NOUVEAU_GEM_TILE_ZETA
) {
171 nvbo
->placement
.fpfn
= vram_pages
/ 2;
172 nvbo
->placement
.lpfn
= ~0;
174 nvbo
->placement
.fpfn
= 0;
175 nvbo
->placement
.lpfn
= vram_pages
/ 2;
181 nouveau_bo_placement_set(struct nouveau_bo
*nvbo
, uint32_t type
, uint32_t busy
)
183 struct ttm_placement
*pl
= &nvbo
->placement
;
184 uint32_t flags
= TTM_PL_MASK_CACHING
|
185 (nvbo
->pin_refcnt
? TTM_PL_FLAG_NO_EVICT
: 0);
187 pl
->placement
= nvbo
->placements
;
188 set_placement_list(nvbo
->placements
, &pl
->num_placement
,
191 pl
->busy_placement
= nvbo
->busy_placements
;
192 set_placement_list(nvbo
->busy_placements
, &pl
->num_busy_placement
,
195 set_placement_range(nvbo
, type
);
199 nouveau_bo_pin(struct nouveau_bo
*nvbo
, uint32_t memtype
)
201 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(nvbo
->bo
.bdev
);
202 struct ttm_buffer_object
*bo
= &nvbo
->bo
;
205 if (nvbo
->pin_refcnt
&& !(memtype
& (1 << bo
->mem
.mem_type
))) {
206 NV_ERROR(nouveau_bdev(bo
->bdev
)->dev
,
207 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo
,
208 1 << bo
->mem
.mem_type
, memtype
);
212 if (nvbo
->pin_refcnt
++)
215 ret
= ttm_bo_reserve(bo
, false, false, false, 0);
219 nouveau_bo_placement_set(nvbo
, memtype
, 0);
221 ret
= nouveau_bo_validate(nvbo
, false, false, false);
223 switch (bo
->mem
.mem_type
) {
225 dev_priv
->fb_aper_free
-= bo
->mem
.size
;
228 dev_priv
->gart_info
.aper_free
-= bo
->mem
.size
;
234 ttm_bo_unreserve(bo
);
242 nouveau_bo_unpin(struct nouveau_bo
*nvbo
)
244 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(nvbo
->bo
.bdev
);
245 struct ttm_buffer_object
*bo
= &nvbo
->bo
;
248 if (--nvbo
->pin_refcnt
)
251 ret
= ttm_bo_reserve(bo
, false, false, false, 0);
255 nouveau_bo_placement_set(nvbo
, bo
->mem
.placement
, 0);
257 ret
= nouveau_bo_validate(nvbo
, false, false, false);
259 switch (bo
->mem
.mem_type
) {
261 dev_priv
->fb_aper_free
+= bo
->mem
.size
;
264 dev_priv
->gart_info
.aper_free
+= bo
->mem
.size
;
271 ttm_bo_unreserve(bo
);
276 nouveau_bo_map(struct nouveau_bo
*nvbo
)
280 ret
= ttm_bo_reserve(&nvbo
->bo
, false, false, false, 0);
284 ret
= ttm_bo_kmap(&nvbo
->bo
, 0, nvbo
->bo
.mem
.num_pages
, &nvbo
->kmap
);
285 ttm_bo_unreserve(&nvbo
->bo
);
290 nouveau_bo_unmap(struct nouveau_bo
*nvbo
)
293 ttm_bo_kunmap(&nvbo
->kmap
);
297 nouveau_bo_validate(struct nouveau_bo
*nvbo
, bool interruptible
,
298 bool no_wait_reserve
, bool no_wait_gpu
)
302 ret
= ttm_bo_validate(&nvbo
->bo
, &nvbo
->placement
, interruptible
,
303 no_wait_reserve
, no_wait_gpu
);
311 nouveau_bo_rd16(struct nouveau_bo
*nvbo
, unsigned index
)
314 u16
*mem
= ttm_kmap_obj_virtual(&nvbo
->kmap
, &is_iomem
);
317 return ioread16_native((void __force __iomem
*)mem
);
323 nouveau_bo_wr16(struct nouveau_bo
*nvbo
, unsigned index
, u16 val
)
326 u16
*mem
= ttm_kmap_obj_virtual(&nvbo
->kmap
, &is_iomem
);
329 iowrite16_native(val
, (void __force __iomem
*)mem
);
335 nouveau_bo_rd32(struct nouveau_bo
*nvbo
, unsigned index
)
338 u32
*mem
= ttm_kmap_obj_virtual(&nvbo
->kmap
, &is_iomem
);
341 return ioread32_native((void __force __iomem
*)mem
);
347 nouveau_bo_wr32(struct nouveau_bo
*nvbo
, unsigned index
, u32 val
)
350 u32
*mem
= ttm_kmap_obj_virtual(&nvbo
->kmap
, &is_iomem
);
353 iowrite32_native(val
, (void __force __iomem
*)mem
);
358 static struct ttm_tt
*
359 nouveau_ttm_tt_create(struct ttm_bo_device
*bdev
,
360 unsigned long size
, uint32_t page_flags
,
361 struct page
*dummy_read_page
)
363 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bdev
);
364 struct drm_device
*dev
= dev_priv
->dev
;
366 switch (dev_priv
->gart_info
.type
) {
368 case NOUVEAU_GART_AGP
:
369 return ttm_agp_tt_create(bdev
, dev
->agp
->bridge
,
370 size
, page_flags
, dummy_read_page
);
372 case NOUVEAU_GART_PDMA
:
373 case NOUVEAU_GART_HW
:
374 return nouveau_sgdma_create_ttm(bdev
, size
, page_flags
,
377 NV_ERROR(dev
, "Unknown GART type %d\n",
378 dev_priv
->gart_info
.type
);
386 nouveau_bo_invalidate_caches(struct ttm_bo_device
*bdev
, uint32_t flags
)
388 /* We'll do this from user space. */
393 nouveau_bo_init_mem_type(struct ttm_bo_device
*bdev
, uint32_t type
,
394 struct ttm_mem_type_manager
*man
)
396 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bdev
);
397 struct drm_device
*dev
= dev_priv
->dev
;
401 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
;
402 man
->available_caching
= TTM_PL_MASK_CACHING
;
403 man
->default_caching
= TTM_PL_FLAG_CACHED
;
406 if (dev_priv
->card_type
>= NV_50
) {
407 man
->func
= &nouveau_vram_manager
;
408 man
->io_reserve_fastpath
= false;
409 man
->use_io_reserve_lru
= true;
411 man
->func
= &ttm_bo_manager_func
;
413 man
->flags
= TTM_MEMTYPE_FLAG_FIXED
|
414 TTM_MEMTYPE_FLAG_MAPPABLE
;
415 man
->available_caching
= TTM_PL_FLAG_UNCACHED
|
417 man
->default_caching
= TTM_PL_FLAG_WC
;
420 if (dev_priv
->card_type
>= NV_50
)
421 man
->func
= &nouveau_gart_manager
;
423 man
->func
= &ttm_bo_manager_func
;
424 switch (dev_priv
->gart_info
.type
) {
425 case NOUVEAU_GART_AGP
:
426 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
;
427 man
->available_caching
= TTM_PL_FLAG_UNCACHED
|
429 man
->default_caching
= TTM_PL_FLAG_WC
;
431 case NOUVEAU_GART_PDMA
:
432 case NOUVEAU_GART_HW
:
433 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
|
434 TTM_MEMTYPE_FLAG_CMA
;
435 man
->available_caching
= TTM_PL_MASK_CACHING
;
436 man
->default_caching
= TTM_PL_FLAG_CACHED
;
439 NV_ERROR(dev
, "Unknown GART type: %d\n",
440 dev_priv
->gart_info
.type
);
445 NV_ERROR(dev
, "Unsupported memory type %u\n", (unsigned)type
);
452 nouveau_bo_evict_flags(struct ttm_buffer_object
*bo
, struct ttm_placement
*pl
)
454 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
456 switch (bo
->mem
.mem_type
) {
458 nouveau_bo_placement_set(nvbo
, TTM_PL_FLAG_TT
,
462 nouveau_bo_placement_set(nvbo
, TTM_PL_FLAG_SYSTEM
, 0);
466 *pl
= nvbo
->placement
;
470 /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
471 * TTM_PL_{VRAM,TT} directly.
475 nouveau_bo_move_accel_cleanup(struct nouveau_channel
*chan
,
476 struct nouveau_bo
*nvbo
, bool evict
,
477 bool no_wait_reserve
, bool no_wait_gpu
,
478 struct ttm_mem_reg
*new_mem
)
480 struct nouveau_fence
*fence
= NULL
;
483 ret
= nouveau_fence_new(chan
, &fence
);
487 ret
= ttm_bo_move_accel_cleanup(&nvbo
->bo
, fence
, NULL
, evict
,
488 no_wait_reserve
, no_wait_gpu
, new_mem
);
489 nouveau_fence_unref(&fence
);
494 nve0_bo_move_copy(struct nouveau_channel
*chan
, struct ttm_buffer_object
*bo
,
495 struct ttm_mem_reg
*old_mem
, struct ttm_mem_reg
*new_mem
)
497 struct nouveau_mem
*node
= old_mem
->mm_node
;
498 int ret
= RING_SPACE(chan
, 10);
500 BEGIN_NVC0(chan
, NvSubCopy
, 0x0400, 8);
501 OUT_RING (chan
, upper_32_bits(node
->vma
[0].offset
));
502 OUT_RING (chan
, lower_32_bits(node
->vma
[0].offset
));
503 OUT_RING (chan
, upper_32_bits(node
->vma
[1].offset
));
504 OUT_RING (chan
, lower_32_bits(node
->vma
[1].offset
));
505 OUT_RING (chan
, PAGE_SIZE
);
506 OUT_RING (chan
, PAGE_SIZE
);
507 OUT_RING (chan
, PAGE_SIZE
);
508 OUT_RING (chan
, new_mem
->num_pages
);
509 BEGIN_IMC0(chan
, NvSubCopy
, 0x0300, 0x0386);
515 nvc0_bo_move_init(struct nouveau_channel
*chan
, u32 handle
)
517 int ret
= RING_SPACE(chan
, 2);
519 BEGIN_NVC0(chan
, NvSubCopy
, 0x0000, 1);
520 OUT_RING (chan
, handle
);
526 nvc0_bo_move_copy(struct nouveau_channel
*chan
, struct ttm_buffer_object
*bo
,
527 struct ttm_mem_reg
*old_mem
, struct ttm_mem_reg
*new_mem
)
529 struct nouveau_mem
*node
= old_mem
->mm_node
;
530 u64 src_offset
= node
->vma
[0].offset
;
531 u64 dst_offset
= node
->vma
[1].offset
;
532 u32 page_count
= new_mem
->num_pages
;
535 page_count
= new_mem
->num_pages
;
537 int line_count
= (page_count
> 8191) ? 8191 : page_count
;
539 ret
= RING_SPACE(chan
, 11);
543 BEGIN_NVC0(chan
, NvSubCopy
, 0x030c, 8);
544 OUT_RING (chan
, upper_32_bits(src_offset
));
545 OUT_RING (chan
, lower_32_bits(src_offset
));
546 OUT_RING (chan
, upper_32_bits(dst_offset
));
547 OUT_RING (chan
, lower_32_bits(dst_offset
));
548 OUT_RING (chan
, PAGE_SIZE
);
549 OUT_RING (chan
, PAGE_SIZE
);
550 OUT_RING (chan
, PAGE_SIZE
);
551 OUT_RING (chan
, line_count
);
552 BEGIN_NVC0(chan
, NvSubCopy
, 0x0300, 1);
553 OUT_RING (chan
, 0x00000110);
555 page_count
-= line_count
;
556 src_offset
+= (PAGE_SIZE
* line_count
);
557 dst_offset
+= (PAGE_SIZE
* line_count
);
564 nvc0_bo_move_m2mf(struct nouveau_channel
*chan
, struct ttm_buffer_object
*bo
,
565 struct ttm_mem_reg
*old_mem
, struct ttm_mem_reg
*new_mem
)
567 struct nouveau_mem
*node
= old_mem
->mm_node
;
568 u64 src_offset
= node
->vma
[0].offset
;
569 u64 dst_offset
= node
->vma
[1].offset
;
570 u32 page_count
= new_mem
->num_pages
;
573 page_count
= new_mem
->num_pages
;
575 int line_count
= (page_count
> 2047) ? 2047 : page_count
;
577 ret
= RING_SPACE(chan
, 12);
581 BEGIN_NVC0(chan
, NvSubCopy
, 0x0238, 2);
582 OUT_RING (chan
, upper_32_bits(dst_offset
));
583 OUT_RING (chan
, lower_32_bits(dst_offset
));
584 BEGIN_NVC0(chan
, NvSubCopy
, 0x030c, 6);
585 OUT_RING (chan
, upper_32_bits(src_offset
));
586 OUT_RING (chan
, lower_32_bits(src_offset
));
587 OUT_RING (chan
, PAGE_SIZE
); /* src_pitch */
588 OUT_RING (chan
, PAGE_SIZE
); /* dst_pitch */
589 OUT_RING (chan
, PAGE_SIZE
); /* line_length */
590 OUT_RING (chan
, line_count
);
591 BEGIN_NVC0(chan
, NvSubCopy
, 0x0300, 1);
592 OUT_RING (chan
, 0x00100110);
594 page_count
-= line_count
;
595 src_offset
+= (PAGE_SIZE
* line_count
);
596 dst_offset
+= (PAGE_SIZE
* line_count
);
603 nva3_bo_move_copy(struct nouveau_channel
*chan
, struct ttm_buffer_object
*bo
,
604 struct ttm_mem_reg
*old_mem
, struct ttm_mem_reg
*new_mem
)
606 struct nouveau_mem
*node
= old_mem
->mm_node
;
607 u64 src_offset
= node
->vma
[0].offset
;
608 u64 dst_offset
= node
->vma
[1].offset
;
609 u32 page_count
= new_mem
->num_pages
;
612 page_count
= new_mem
->num_pages
;
614 int line_count
= (page_count
> 8191) ? 8191 : page_count
;
616 ret
= RING_SPACE(chan
, 11);
620 BEGIN_NV04(chan
, NvSubCopy
, 0x030c, 8);
621 OUT_RING (chan
, upper_32_bits(src_offset
));
622 OUT_RING (chan
, lower_32_bits(src_offset
));
623 OUT_RING (chan
, upper_32_bits(dst_offset
));
624 OUT_RING (chan
, lower_32_bits(dst_offset
));
625 OUT_RING (chan
, PAGE_SIZE
);
626 OUT_RING (chan
, PAGE_SIZE
);
627 OUT_RING (chan
, PAGE_SIZE
);
628 OUT_RING (chan
, line_count
);
629 BEGIN_NV04(chan
, NvSubCopy
, 0x0300, 1);
630 OUT_RING (chan
, 0x00000110);
632 page_count
-= line_count
;
633 src_offset
+= (PAGE_SIZE
* line_count
);
634 dst_offset
+= (PAGE_SIZE
* line_count
);
641 nv98_bo_move_exec(struct nouveau_channel
*chan
, struct ttm_buffer_object
*bo
,
642 struct ttm_mem_reg
*old_mem
, struct ttm_mem_reg
*new_mem
)
644 struct nouveau_mem
*node
= old_mem
->mm_node
;
645 int ret
= RING_SPACE(chan
, 7);
647 BEGIN_NV04(chan
, NvSubCopy
, 0x0320, 6);
648 OUT_RING (chan
, upper_32_bits(node
->vma
[0].offset
));
649 OUT_RING (chan
, lower_32_bits(node
->vma
[0].offset
));
650 OUT_RING (chan
, upper_32_bits(node
->vma
[1].offset
));
651 OUT_RING (chan
, lower_32_bits(node
->vma
[1].offset
));
652 OUT_RING (chan
, 0x00000000 /* COPY */);
653 OUT_RING (chan
, new_mem
->num_pages
<< PAGE_SHIFT
);
659 nv84_bo_move_exec(struct nouveau_channel
*chan
, struct ttm_buffer_object
*bo
,
660 struct ttm_mem_reg
*old_mem
, struct ttm_mem_reg
*new_mem
)
662 struct nouveau_mem
*node
= old_mem
->mm_node
;
663 int ret
= RING_SPACE(chan
, 7);
665 BEGIN_NV04(chan
, NvSubCopy
, 0x0304, 6);
666 OUT_RING (chan
, new_mem
->num_pages
<< PAGE_SHIFT
);
667 OUT_RING (chan
, upper_32_bits(node
->vma
[0].offset
));
668 OUT_RING (chan
, lower_32_bits(node
->vma
[0].offset
));
669 OUT_RING (chan
, upper_32_bits(node
->vma
[1].offset
));
670 OUT_RING (chan
, lower_32_bits(node
->vma
[1].offset
));
671 OUT_RING (chan
, 0x00000000 /* MODE_COPY, QUERY_NONE */);
677 nv50_bo_move_init(struct nouveau_channel
*chan
, u32 handle
)
679 int ret
= nouveau_notifier_alloc(chan
, NvNotify0
, 32, 0xfe0, 0x1000,
682 ret
= RING_SPACE(chan
, 6);
684 BEGIN_NV04(chan
, NvSubCopy
, 0x0000, 1);
685 OUT_RING (chan
, handle
);
686 BEGIN_NV04(chan
, NvSubCopy
, 0x0180, 3);
687 OUT_RING (chan
, NvNotify0
);
688 OUT_RING (chan
, NvDmaFB
);
689 OUT_RING (chan
, NvDmaFB
);
691 nouveau_ramht_remove(chan
, NvNotify0
);
699 nv50_bo_move_m2mf(struct nouveau_channel
*chan
, struct ttm_buffer_object
*bo
,
700 struct ttm_mem_reg
*old_mem
, struct ttm_mem_reg
*new_mem
)
702 struct nouveau_mem
*node
= old_mem
->mm_node
;
703 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
704 u64 length
= (new_mem
->num_pages
<< PAGE_SHIFT
);
705 u64 src_offset
= node
->vma
[0].offset
;
706 u64 dst_offset
= node
->vma
[1].offset
;
710 u32 amount
, stride
, height
;
712 amount
= min(length
, (u64
)(4 * 1024 * 1024));
714 height
= amount
/ stride
;
716 if (new_mem
->mem_type
== TTM_PL_VRAM
&&
717 nouveau_bo_tile_layout(nvbo
)) {
718 ret
= RING_SPACE(chan
, 8);
722 BEGIN_NV04(chan
, NvSubCopy
, 0x0200, 7);
725 OUT_RING (chan
, stride
);
726 OUT_RING (chan
, height
);
731 ret
= RING_SPACE(chan
, 2);
735 BEGIN_NV04(chan
, NvSubCopy
, 0x0200, 1);
738 if (old_mem
->mem_type
== TTM_PL_VRAM
&&
739 nouveau_bo_tile_layout(nvbo
)) {
740 ret
= RING_SPACE(chan
, 8);
744 BEGIN_NV04(chan
, NvSubCopy
, 0x021c, 7);
747 OUT_RING (chan
, stride
);
748 OUT_RING (chan
, height
);
753 ret
= RING_SPACE(chan
, 2);
757 BEGIN_NV04(chan
, NvSubCopy
, 0x021c, 1);
761 ret
= RING_SPACE(chan
, 14);
765 BEGIN_NV04(chan
, NvSubCopy
, 0x0238, 2);
766 OUT_RING (chan
, upper_32_bits(src_offset
));
767 OUT_RING (chan
, upper_32_bits(dst_offset
));
768 BEGIN_NV04(chan
, NvSubCopy
, 0x030c, 8);
769 OUT_RING (chan
, lower_32_bits(src_offset
));
770 OUT_RING (chan
, lower_32_bits(dst_offset
));
771 OUT_RING (chan
, stride
);
772 OUT_RING (chan
, stride
);
773 OUT_RING (chan
, stride
);
774 OUT_RING (chan
, height
);
775 OUT_RING (chan
, 0x00000101);
776 OUT_RING (chan
, 0x00000000);
777 BEGIN_NV04(chan
, NvSubCopy
, NV_MEMORY_TO_MEMORY_FORMAT_NOP
, 1);
781 src_offset
+= amount
;
782 dst_offset
+= amount
;
789 nv04_bo_move_init(struct nouveau_channel
*chan
, u32 handle
)
791 int ret
= nouveau_notifier_alloc(chan
, NvNotify0
, 32, 0xfe0, 0x1000,
794 ret
= RING_SPACE(chan
, 4);
796 BEGIN_NV04(chan
, NvSubCopy
, 0x0000, 1);
797 OUT_RING (chan
, handle
);
798 BEGIN_NV04(chan
, NvSubCopy
, 0x0180, 1);
799 OUT_RING (chan
, NvNotify0
);
806 static inline uint32_t
807 nouveau_bo_mem_ctxdma(struct ttm_buffer_object
*bo
,
808 struct nouveau_channel
*chan
, struct ttm_mem_reg
*mem
)
810 if (mem
->mem_type
== TTM_PL_TT
)
811 return chan
->gart_handle
;
812 return chan
->vram_handle
;
816 nv04_bo_move_m2mf(struct nouveau_channel
*chan
, struct ttm_buffer_object
*bo
,
817 struct ttm_mem_reg
*old_mem
, struct ttm_mem_reg
*new_mem
)
819 u32 src_offset
= old_mem
->start
<< PAGE_SHIFT
;
820 u32 dst_offset
= new_mem
->start
<< PAGE_SHIFT
;
821 u32 page_count
= new_mem
->num_pages
;
824 ret
= RING_SPACE(chan
, 3);
828 BEGIN_NV04(chan
, NvSubCopy
, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE
, 2);
829 OUT_RING (chan
, nouveau_bo_mem_ctxdma(bo
, chan
, old_mem
));
830 OUT_RING (chan
, nouveau_bo_mem_ctxdma(bo
, chan
, new_mem
));
832 page_count
= new_mem
->num_pages
;
834 int line_count
= (page_count
> 2047) ? 2047 : page_count
;
836 ret
= RING_SPACE(chan
, 11);
840 BEGIN_NV04(chan
, NvSubCopy
,
841 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN
, 8);
842 OUT_RING (chan
, src_offset
);
843 OUT_RING (chan
, dst_offset
);
844 OUT_RING (chan
, PAGE_SIZE
); /* src_pitch */
845 OUT_RING (chan
, PAGE_SIZE
); /* dst_pitch */
846 OUT_RING (chan
, PAGE_SIZE
); /* line_length */
847 OUT_RING (chan
, line_count
);
848 OUT_RING (chan
, 0x00000101);
849 OUT_RING (chan
, 0x00000000);
850 BEGIN_NV04(chan
, NvSubCopy
, NV_MEMORY_TO_MEMORY_FORMAT_NOP
, 1);
853 page_count
-= line_count
;
854 src_offset
+= (PAGE_SIZE
* line_count
);
855 dst_offset
+= (PAGE_SIZE
* line_count
);
862 nouveau_vma_getmap(struct nouveau_channel
*chan
, struct nouveau_bo
*nvbo
,
863 struct ttm_mem_reg
*mem
, struct nouveau_vma
*vma
)
865 struct nouveau_mem
*node
= mem
->mm_node
;
868 ret
= nouveau_vm_get(chan
->vm
, mem
->num_pages
<< PAGE_SHIFT
,
869 node
->page_shift
, NV_MEM_ACCESS_RO
, vma
);
873 if (mem
->mem_type
== TTM_PL_VRAM
)
874 nouveau_vm_map(vma
, node
);
876 nouveau_vm_map_sg(vma
, 0, mem
->num_pages
<< PAGE_SHIFT
, node
);
882 nouveau_bo_move_m2mf(struct ttm_buffer_object
*bo
, int evict
, bool intr
,
883 bool no_wait_reserve
, bool no_wait_gpu
,
884 struct ttm_mem_reg
*new_mem
)
886 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bo
->bdev
);
887 struct nouveau_channel
*chan
= chan
= dev_priv
->channel
;
888 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
889 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
892 mutex_lock_nested(&chan
->mutex
, NOUVEAU_KCHANNEL_MUTEX
);
894 /* create temporary vmas for the transfer and attach them to the
895 * old nouveau_mem node, these will get cleaned up after ttm has
896 * destroyed the ttm_mem_reg
898 if (dev_priv
->card_type
>= NV_50
) {
899 struct nouveau_mem
*node
= old_mem
->mm_node
;
901 ret
= nouveau_vma_getmap(chan
, nvbo
, old_mem
, &node
->vma
[0]);
905 ret
= nouveau_vma_getmap(chan
, nvbo
, new_mem
, &node
->vma
[1]);
910 ret
= dev_priv
->ttm
.move(chan
, bo
, &bo
->mem
, new_mem
);
912 ret
= nouveau_bo_move_accel_cleanup(chan
, nvbo
, evict
,
914 no_wait_gpu
, new_mem
);
918 mutex_unlock(&chan
->mutex
);
923 nouveau_bo_move_init(struct nouveau_channel
*chan
)
925 struct drm_nouveau_private
*dev_priv
= chan
->dev
->dev_private
;
926 static const struct {
930 int (*exec
)(struct nouveau_channel
*,
931 struct ttm_buffer_object
*,
932 struct ttm_mem_reg
*, struct ttm_mem_reg
*);
933 int (*init
)(struct nouveau_channel
*, u32 handle
);
935 { "COPY", 0, 0xa0b5, nve0_bo_move_copy
, nvc0_bo_move_init
},
936 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy
, nvc0_bo_move_init
},
937 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy
, nvc0_bo_move_init
},
938 { "COPY", 0, 0x85b5, nva3_bo_move_copy
, nv50_bo_move_init
},
939 { "CRYPT", 0, 0x74c1, nv84_bo_move_exec
, nv50_bo_move_init
},
940 { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf
, nvc0_bo_move_init
},
941 { "M2MF", 0, 0x5039, nv50_bo_move_m2mf
, nv50_bo_move_init
},
942 { "M2MF", 0, 0x0039, nv04_bo_move_m2mf
, nv04_bo_move_init
},
944 { "CRYPT", 0, 0x88b4, nv98_bo_move_exec
, nv50_bo_move_init
},
946 const char *name
= "CPU";
950 u32 handle
= (mthd
->engine
<< 16) | mthd
->oclass
;
951 ret
= nouveau_gpuobj_gr_new(chan
, handle
, mthd
->oclass
);
953 ret
= mthd
->init(chan
, handle
);
955 dev_priv
->ttm
.move
= mthd
->exec
;
960 } while ((++mthd
)->exec
);
962 NV_INFO(chan
->dev
, "MM: using %s for buffer copies\n", name
);
966 nouveau_bo_move_flipd(struct ttm_buffer_object
*bo
, bool evict
, bool intr
,
967 bool no_wait_reserve
, bool no_wait_gpu
,
968 struct ttm_mem_reg
*new_mem
)
970 u32 placement_memtype
= TTM_PL_FLAG_TT
| TTM_PL_MASK_CACHING
;
971 struct ttm_placement placement
;
972 struct ttm_mem_reg tmp_mem
;
975 placement
.fpfn
= placement
.lpfn
= 0;
976 placement
.num_placement
= placement
.num_busy_placement
= 1;
977 placement
.placement
= placement
.busy_placement
= &placement_memtype
;
980 tmp_mem
.mm_node
= NULL
;
981 ret
= ttm_bo_mem_space(bo
, &placement
, &tmp_mem
, intr
, no_wait_reserve
, no_wait_gpu
);
985 ret
= ttm_tt_bind(bo
->ttm
, &tmp_mem
);
989 ret
= nouveau_bo_move_m2mf(bo
, true, intr
, no_wait_reserve
, no_wait_gpu
, &tmp_mem
);
993 ret
= ttm_bo_move_ttm(bo
, true, no_wait_reserve
, no_wait_gpu
, new_mem
);
995 ttm_bo_mem_put(bo
, &tmp_mem
);
1000 nouveau_bo_move_flips(struct ttm_buffer_object
*bo
, bool evict
, bool intr
,
1001 bool no_wait_reserve
, bool no_wait_gpu
,
1002 struct ttm_mem_reg
*new_mem
)
1004 u32 placement_memtype
= TTM_PL_FLAG_TT
| TTM_PL_MASK_CACHING
;
1005 struct ttm_placement placement
;
1006 struct ttm_mem_reg tmp_mem
;
1009 placement
.fpfn
= placement
.lpfn
= 0;
1010 placement
.num_placement
= placement
.num_busy_placement
= 1;
1011 placement
.placement
= placement
.busy_placement
= &placement_memtype
;
1014 tmp_mem
.mm_node
= NULL
;
1015 ret
= ttm_bo_mem_space(bo
, &placement
, &tmp_mem
, intr
, no_wait_reserve
, no_wait_gpu
);
1019 ret
= ttm_bo_move_ttm(bo
, true, no_wait_reserve
, no_wait_gpu
, &tmp_mem
);
1023 ret
= nouveau_bo_move_m2mf(bo
, true, intr
, no_wait_reserve
, no_wait_gpu
, new_mem
);
1028 ttm_bo_mem_put(bo
, &tmp_mem
);
1033 nouveau_bo_move_ntfy(struct ttm_buffer_object
*bo
, struct ttm_mem_reg
*new_mem
)
1035 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
1036 struct nouveau_vma
*vma
;
1038 /* ttm can now (stupidly) pass the driver bos it didn't create... */
1039 if (bo
->destroy
!= nouveau_bo_del_ttm
)
1042 list_for_each_entry(vma
, &nvbo
->vma_list
, head
) {
1043 if (new_mem
&& new_mem
->mem_type
== TTM_PL_VRAM
) {
1044 nouveau_vm_map(vma
, new_mem
->mm_node
);
1046 if (new_mem
&& new_mem
->mem_type
== TTM_PL_TT
&&
1047 nvbo
->page_shift
== vma
->vm
->spg_shift
) {
1048 if (((struct nouveau_mem
*)new_mem
->mm_node
)->sg
)
1049 nouveau_vm_map_sg_table(vma
, 0, new_mem
->
1050 num_pages
<< PAGE_SHIFT
,
1053 nouveau_vm_map_sg(vma
, 0, new_mem
->
1054 num_pages
<< PAGE_SHIFT
,
1057 nouveau_vm_unmap(vma
);
1063 nouveau_bo_vm_bind(struct ttm_buffer_object
*bo
, struct ttm_mem_reg
*new_mem
,
1064 struct nouveau_tile_reg
**new_tile
)
1066 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bo
->bdev
);
1067 struct drm_device
*dev
= dev_priv
->dev
;
1068 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
1069 u64 offset
= new_mem
->start
<< PAGE_SHIFT
;
1072 if (new_mem
->mem_type
!= TTM_PL_VRAM
)
1075 if (dev_priv
->card_type
>= NV_10
) {
1076 *new_tile
= nv10_mem_set_tiling(dev
, offset
, new_mem
->size
,
1085 nouveau_bo_vm_cleanup(struct ttm_buffer_object
*bo
,
1086 struct nouveau_tile_reg
*new_tile
,
1087 struct nouveau_tile_reg
**old_tile
)
1089 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bo
->bdev
);
1090 struct drm_device
*dev
= dev_priv
->dev
;
1092 nv10_mem_put_tile_region(dev
, *old_tile
, bo
->sync_obj
);
1093 *old_tile
= new_tile
;
1097 nouveau_bo_move(struct ttm_buffer_object
*bo
, bool evict
, bool intr
,
1098 bool no_wait_reserve
, bool no_wait_gpu
,
1099 struct ttm_mem_reg
*new_mem
)
1101 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bo
->bdev
);
1102 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
1103 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
1104 struct nouveau_tile_reg
*new_tile
= NULL
;
1107 if (dev_priv
->card_type
< NV_50
) {
1108 ret
= nouveau_bo_vm_bind(bo
, new_mem
, &new_tile
);
1114 if (old_mem
->mem_type
== TTM_PL_SYSTEM
&& !bo
->ttm
) {
1115 BUG_ON(bo
->mem
.mm_node
!= NULL
);
1117 new_mem
->mm_node
= NULL
;
1121 /* CPU copy if we have no accelerated method available */
1122 if (!dev_priv
->ttm
.move
) {
1123 ret
= ttm_bo_move_memcpy(bo
, evict
, no_wait_reserve
, no_wait_gpu
, new_mem
);
1127 /* Hardware assisted copy. */
1128 if (new_mem
->mem_type
== TTM_PL_SYSTEM
)
1129 ret
= nouveau_bo_move_flipd(bo
, evict
, intr
, no_wait_reserve
, no_wait_gpu
, new_mem
);
1130 else if (old_mem
->mem_type
== TTM_PL_SYSTEM
)
1131 ret
= nouveau_bo_move_flips(bo
, evict
, intr
, no_wait_reserve
, no_wait_gpu
, new_mem
);
1133 ret
= nouveau_bo_move_m2mf(bo
, evict
, intr
, no_wait_reserve
, no_wait_gpu
, new_mem
);
1138 /* Fallback to software copy. */
1139 ret
= ttm_bo_move_memcpy(bo
, evict
, no_wait_reserve
, no_wait_gpu
, new_mem
);
1142 if (dev_priv
->card_type
< NV_50
) {
1144 nouveau_bo_vm_cleanup(bo
, NULL
, &new_tile
);
1146 nouveau_bo_vm_cleanup(bo
, new_tile
, &nvbo
->tile
);
1153 nouveau_bo_verify_access(struct ttm_buffer_object
*bo
, struct file
*filp
)
1159 nouveau_ttm_io_mem_reserve(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
)
1161 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
1162 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bdev
);
1163 struct drm_device
*dev
= dev_priv
->dev
;
1166 mem
->bus
.addr
= NULL
;
1167 mem
->bus
.offset
= 0;
1168 mem
->bus
.size
= mem
->num_pages
<< PAGE_SHIFT
;
1170 mem
->bus
.is_iomem
= false;
1171 if (!(man
->flags
& TTM_MEMTYPE_FLAG_MAPPABLE
))
1173 switch (mem
->mem_type
) {
1179 if (dev_priv
->gart_info
.type
== NOUVEAU_GART_AGP
) {
1180 mem
->bus
.offset
= mem
->start
<< PAGE_SHIFT
;
1181 mem
->bus
.base
= dev_priv
->gart_info
.aper_base
;
1182 mem
->bus
.is_iomem
= true;
1188 struct nouveau_mem
*node
= mem
->mm_node
;
1191 if (!dev_priv
->bar1_vm
) {
1192 mem
->bus
.offset
= mem
->start
<< PAGE_SHIFT
;
1193 mem
->bus
.base
= pci_resource_start(dev
->pdev
, 1);
1194 mem
->bus
.is_iomem
= true;
1198 if (dev_priv
->card_type
>= NV_C0
)
1199 page_shift
= node
->page_shift
;
1203 ret
= nouveau_vm_get(dev_priv
->bar1_vm
, mem
->bus
.size
,
1204 page_shift
, NV_MEM_ACCESS_RW
,
1209 nouveau_vm_map(&node
->bar_vma
, node
);
1211 nouveau_vm_put(&node
->bar_vma
);
1215 mem
->bus
.offset
= node
->bar_vma
.offset
;
1216 if (dev_priv
->card_type
== NV_50
) /*XXX*/
1217 mem
->bus
.offset
-= 0x0020000000ULL
;
1218 mem
->bus
.base
= pci_resource_start(dev
->pdev
, 1);
1219 mem
->bus
.is_iomem
= true;
1229 nouveau_ttm_io_mem_free(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
)
1231 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bdev
);
1232 struct nouveau_mem
*node
= mem
->mm_node
;
1234 if (!dev_priv
->bar1_vm
|| mem
->mem_type
!= TTM_PL_VRAM
)
1237 if (!node
->bar_vma
.node
)
1240 nouveau_vm_unmap(&node
->bar_vma
);
1241 nouveau_vm_put(&node
->bar_vma
);
1245 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object
*bo
)
1247 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bo
->bdev
);
1248 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
1250 /* as long as the bo isn't in vram, and isn't tiled, we've got
1251 * nothing to do here.
1253 if (bo
->mem
.mem_type
!= TTM_PL_VRAM
) {
1254 if (dev_priv
->card_type
< NV_50
||
1255 !nouveau_bo_tile_layout(nvbo
))
1259 /* make sure bo is in mappable vram */
1260 if (bo
->mem
.start
+ bo
->mem
.num_pages
< dev_priv
->fb_mappable_pages
)
1264 nvbo
->placement
.fpfn
= 0;
1265 nvbo
->placement
.lpfn
= dev_priv
->fb_mappable_pages
;
1266 nouveau_bo_placement_set(nvbo
, TTM_PL_FLAG_VRAM
, 0);
1267 return nouveau_bo_validate(nvbo
, false, true, false);
1271 nouveau_ttm_tt_populate(struct ttm_tt
*ttm
)
1273 struct ttm_dma_tt
*ttm_dma
= (void *)ttm
;
1274 struct drm_nouveau_private
*dev_priv
;
1275 struct drm_device
*dev
;
1278 bool slave
= !!(ttm
->page_flags
& TTM_PAGE_FLAG_SG
);
1280 if (ttm
->state
!= tt_unpopulated
)
1283 if (slave
&& ttm
->sg
) {
1284 /* make userspace faulting work */
1285 drm_prime_sg_to_page_addr_arrays(ttm
->sg
, ttm
->pages
,
1286 ttm_dma
->dma_address
, ttm
->num_pages
);
1287 ttm
->state
= tt_unbound
;
1291 dev_priv
= nouveau_bdev(ttm
->bdev
);
1292 dev
= dev_priv
->dev
;
1295 if (dev_priv
->gart_info
.type
== NOUVEAU_GART_AGP
) {
1296 return ttm_agp_tt_populate(ttm
);
1300 #ifdef CONFIG_SWIOTLB
1301 if (swiotlb_nr_tbl()) {
1302 return ttm_dma_populate((void *)ttm
, dev
->dev
);
1306 r
= ttm_pool_populate(ttm
);
1311 for (i
= 0; i
< ttm
->num_pages
; i
++) {
1312 ttm_dma
->dma_address
[i
] = pci_map_page(dev
->pdev
, ttm
->pages
[i
],
1314 PCI_DMA_BIDIRECTIONAL
);
1315 if (pci_dma_mapping_error(dev
->pdev
, ttm_dma
->dma_address
[i
])) {
1317 pci_unmap_page(dev
->pdev
, ttm_dma
->dma_address
[i
],
1318 PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
1319 ttm_dma
->dma_address
[i
] = 0;
1321 ttm_pool_unpopulate(ttm
);
1329 nouveau_ttm_tt_unpopulate(struct ttm_tt
*ttm
)
1331 struct ttm_dma_tt
*ttm_dma
= (void *)ttm
;
1332 struct drm_nouveau_private
*dev_priv
;
1333 struct drm_device
*dev
;
1335 bool slave
= !!(ttm
->page_flags
& TTM_PAGE_FLAG_SG
);
1340 dev_priv
= nouveau_bdev(ttm
->bdev
);
1341 dev
= dev_priv
->dev
;
1344 if (dev_priv
->gart_info
.type
== NOUVEAU_GART_AGP
) {
1345 ttm_agp_tt_unpopulate(ttm
);
1350 #ifdef CONFIG_SWIOTLB
1351 if (swiotlb_nr_tbl()) {
1352 ttm_dma_unpopulate((void *)ttm
, dev
->dev
);
1357 for (i
= 0; i
< ttm
->num_pages
; i
++) {
1358 if (ttm_dma
->dma_address
[i
]) {
1359 pci_unmap_page(dev
->pdev
, ttm_dma
->dma_address
[i
],
1360 PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
1364 ttm_pool_unpopulate(ttm
);
1368 nouveau_bo_fence(struct nouveau_bo
*nvbo
, struct nouveau_fence
*fence
)
1370 struct nouveau_fence
*old_fence
= NULL
;
1373 nouveau_fence_ref(fence
);
1375 spin_lock(&nvbo
->bo
.bdev
->fence_lock
);
1376 old_fence
= nvbo
->bo
.sync_obj
;
1377 nvbo
->bo
.sync_obj
= fence
;
1378 spin_unlock(&nvbo
->bo
.bdev
->fence_lock
);
1380 nouveau_fence_unref(&old_fence
);
1384 nouveau_bo_fence_unref(void **sync_obj
)
1386 nouveau_fence_unref((struct nouveau_fence
**)sync_obj
);
1390 nouveau_bo_fence_ref(void *sync_obj
)
1392 return nouveau_fence_ref(sync_obj
);
1396 nouveau_bo_fence_signalled(void *sync_obj
, void *sync_arg
)
1398 return nouveau_fence_done(sync_obj
);
1402 nouveau_bo_fence_wait(void *sync_obj
, void *sync_arg
, bool lazy
, bool intr
)
1404 return nouveau_fence_wait(sync_obj
, lazy
, intr
);
1408 nouveau_bo_fence_flush(void *sync_obj
, void *sync_arg
)
1413 struct ttm_bo_driver nouveau_bo_driver
= {
1414 .ttm_tt_create
= &nouveau_ttm_tt_create
,
1415 .ttm_tt_populate
= &nouveau_ttm_tt_populate
,
1416 .ttm_tt_unpopulate
= &nouveau_ttm_tt_unpopulate
,
1417 .invalidate_caches
= nouveau_bo_invalidate_caches
,
1418 .init_mem_type
= nouveau_bo_init_mem_type
,
1419 .evict_flags
= nouveau_bo_evict_flags
,
1420 .move_notify
= nouveau_bo_move_ntfy
,
1421 .move
= nouveau_bo_move
,
1422 .verify_access
= nouveau_bo_verify_access
,
1423 .sync_obj_signaled
= nouveau_bo_fence_signalled
,
1424 .sync_obj_wait
= nouveau_bo_fence_wait
,
1425 .sync_obj_flush
= nouveau_bo_fence_flush
,
1426 .sync_obj_unref
= nouveau_bo_fence_unref
,
1427 .sync_obj_ref
= nouveau_bo_fence_ref
,
1428 .fault_reserve_notify
= &nouveau_ttm_fault_reserve_notify
,
1429 .io_mem_reserve
= &nouveau_ttm_io_mem_reserve
,
1430 .io_mem_free
= &nouveau_ttm_io_mem_free
,
1433 struct nouveau_vma
*
1434 nouveau_bo_vma_find(struct nouveau_bo
*nvbo
, struct nouveau_vm
*vm
)
1436 struct nouveau_vma
*vma
;
1437 list_for_each_entry(vma
, &nvbo
->vma_list
, head
) {
1446 nouveau_bo_vma_add(struct nouveau_bo
*nvbo
, struct nouveau_vm
*vm
,
1447 struct nouveau_vma
*vma
)
1449 const u32 size
= nvbo
->bo
.mem
.num_pages
<< PAGE_SHIFT
;
1450 struct nouveau_mem
*node
= nvbo
->bo
.mem
.mm_node
;
1453 ret
= nouveau_vm_get(vm
, size
, nvbo
->page_shift
,
1454 NV_MEM_ACCESS_RW
, vma
);
1458 if (nvbo
->bo
.mem
.mem_type
== TTM_PL_VRAM
)
1459 nouveau_vm_map(vma
, nvbo
->bo
.mem
.mm_node
);
1460 else if (nvbo
->bo
.mem
.mem_type
== TTM_PL_TT
) {
1462 nouveau_vm_map_sg_table(vma
, 0, size
, node
);
1464 nouveau_vm_map_sg(vma
, 0, size
, node
);
1467 list_add_tail(&vma
->head
, &nvbo
->vma_list
);
1473 nouveau_bo_vma_del(struct nouveau_bo
*nvbo
, struct nouveau_vma
*vma
)
1476 if (nvbo
->bo
.mem
.mem_type
!= TTM_PL_SYSTEM
) {
1477 spin_lock(&nvbo
->bo
.bdev
->fence_lock
);
1478 ttm_bo_wait(&nvbo
->bo
, false, false, false);
1479 spin_unlock(&nvbo
->bo
.bdev
->fence_lock
);
1480 nouveau_vm_unmap(vma
);
1483 nouveau_vm_put(vma
);
1484 list_del(&vma
->head
);