2 * Copyright 2007 Dave Airlied
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
32 #include "nouveau_drm.h"
33 #include "nouveau_drv.h"
34 #include "nouveau_dma.h"
35 #include "nouveau_mm.h"
36 #include "nouveau_vm.h"
38 #include <linux/log2.h>
39 #include <linux/slab.h>
42 nouveau_bo_del_ttm(struct ttm_buffer_object
*bo
)
44 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bo
->bdev
);
45 struct drm_device
*dev
= dev_priv
->dev
;
46 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
48 if (unlikely(nvbo
->gem
))
49 DRM_ERROR("bo %p still attached to GEM object\n", bo
);
51 nv10_mem_put_tile_region(dev
, nvbo
->tile
, NULL
);
56 nouveau_bo_fixup_align(struct nouveau_bo
*nvbo
, u32 flags
,
57 int *align
, int *size
)
59 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(nvbo
->bo
.bdev
);
61 if (dev_priv
->card_type
< NV_50
) {
62 if (nvbo
->tile_mode
) {
63 if (dev_priv
->chipset
>= 0x40) {
65 *size
= roundup(*size
, 64 * nvbo
->tile_mode
);
67 } else if (dev_priv
->chipset
>= 0x30) {
69 *size
= roundup(*size
, 64 * nvbo
->tile_mode
);
71 } else if (dev_priv
->chipset
>= 0x20) {
73 *size
= roundup(*size
, 64 * nvbo
->tile_mode
);
75 } else if (dev_priv
->chipset
>= 0x10) {
77 *size
= roundup(*size
, 32 * nvbo
->tile_mode
);
81 *size
= roundup(*size
, (1 << nvbo
->page_shift
));
82 *align
= max((1 << nvbo
->page_shift
), *align
);
85 *size
= roundup(*size
, PAGE_SIZE
);
89 nouveau_bo_new(struct drm_device
*dev
, int size
, int align
,
90 uint32_t flags
, uint32_t tile_mode
, uint32_t tile_flags
,
91 struct nouveau_bo
**pnvbo
)
93 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
94 struct nouveau_bo
*nvbo
;
97 nvbo
= kzalloc(sizeof(struct nouveau_bo
), GFP_KERNEL
);
100 INIT_LIST_HEAD(&nvbo
->head
);
101 INIT_LIST_HEAD(&nvbo
->entry
);
102 INIT_LIST_HEAD(&nvbo
->vma_list
);
103 nvbo
->tile_mode
= tile_mode
;
104 nvbo
->tile_flags
= tile_flags
;
105 nvbo
->bo
.bdev
= &dev_priv
->ttm
.bdev
;
107 nvbo
->page_shift
= 12;
108 if (dev_priv
->bar1_vm
) {
109 if (!(flags
& TTM_PL_FLAG_TT
) && size
> 256 * 1024)
110 nvbo
->page_shift
= dev_priv
->bar1_vm
->lpg_shift
;
113 nouveau_bo_fixup_align(nvbo
, flags
, &align
, &size
);
114 nvbo
->bo
.mem
.num_pages
= size
>> PAGE_SHIFT
;
115 nouveau_bo_placement_set(nvbo
, flags
, 0);
117 ret
= ttm_bo_init(&dev_priv
->ttm
.bdev
, &nvbo
->bo
, size
,
118 ttm_bo_type_device
, &nvbo
->placement
,
119 align
>> PAGE_SHIFT
, 0, false, NULL
, size
,
122 /* ttm will call nouveau_bo_del_ttm if it fails.. */
131 set_placement_list(uint32_t *pl
, unsigned *n
, uint32_t type
, uint32_t flags
)
135 if (type
& TTM_PL_FLAG_VRAM
)
136 pl
[(*n
)++] = TTM_PL_FLAG_VRAM
| flags
;
137 if (type
& TTM_PL_FLAG_TT
)
138 pl
[(*n
)++] = TTM_PL_FLAG_TT
| flags
;
139 if (type
& TTM_PL_FLAG_SYSTEM
)
140 pl
[(*n
)++] = TTM_PL_FLAG_SYSTEM
| flags
;
144 set_placement_range(struct nouveau_bo
*nvbo
, uint32_t type
)
146 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(nvbo
->bo
.bdev
);
147 int vram_pages
= dev_priv
->vram_size
>> PAGE_SHIFT
;
149 if (dev_priv
->card_type
== NV_10
&&
150 nvbo
->tile_mode
&& (type
& TTM_PL_FLAG_VRAM
) &&
151 nvbo
->bo
.mem
.num_pages
< vram_pages
/ 2) {
153 * Make sure that the color and depth buffers are handled
154 * by independent memory controller units. Up to a 9x
155 * speed up when alpha-blending and depth-test are enabled
158 if (nvbo
->tile_flags
& NOUVEAU_GEM_TILE_ZETA
) {
159 nvbo
->placement
.fpfn
= vram_pages
/ 2;
160 nvbo
->placement
.lpfn
= ~0;
162 nvbo
->placement
.fpfn
= 0;
163 nvbo
->placement
.lpfn
= vram_pages
/ 2;
169 nouveau_bo_placement_set(struct nouveau_bo
*nvbo
, uint32_t type
, uint32_t busy
)
171 struct ttm_placement
*pl
= &nvbo
->placement
;
172 uint32_t flags
= TTM_PL_MASK_CACHING
|
173 (nvbo
->pin_refcnt
? TTM_PL_FLAG_NO_EVICT
: 0);
175 pl
->placement
= nvbo
->placements
;
176 set_placement_list(nvbo
->placements
, &pl
->num_placement
,
179 pl
->busy_placement
= nvbo
->busy_placements
;
180 set_placement_list(nvbo
->busy_placements
, &pl
->num_busy_placement
,
183 set_placement_range(nvbo
, type
);
187 nouveau_bo_pin(struct nouveau_bo
*nvbo
, uint32_t memtype
)
189 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(nvbo
->bo
.bdev
);
190 struct ttm_buffer_object
*bo
= &nvbo
->bo
;
193 if (nvbo
->pin_refcnt
&& !(memtype
& (1 << bo
->mem
.mem_type
))) {
194 NV_ERROR(nouveau_bdev(bo
->bdev
)->dev
,
195 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo
,
196 1 << bo
->mem
.mem_type
, memtype
);
200 if (nvbo
->pin_refcnt
++)
203 ret
= ttm_bo_reserve(bo
, false, false, false, 0);
207 nouveau_bo_placement_set(nvbo
, memtype
, 0);
209 ret
= nouveau_bo_validate(nvbo
, false, false, false);
211 switch (bo
->mem
.mem_type
) {
213 dev_priv
->fb_aper_free
-= bo
->mem
.size
;
216 dev_priv
->gart_info
.aper_free
-= bo
->mem
.size
;
222 ttm_bo_unreserve(bo
);
230 nouveau_bo_unpin(struct nouveau_bo
*nvbo
)
232 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(nvbo
->bo
.bdev
);
233 struct ttm_buffer_object
*bo
= &nvbo
->bo
;
236 if (--nvbo
->pin_refcnt
)
239 ret
= ttm_bo_reserve(bo
, false, false, false, 0);
243 nouveau_bo_placement_set(nvbo
, bo
->mem
.placement
, 0);
245 ret
= nouveau_bo_validate(nvbo
, false, false, false);
247 switch (bo
->mem
.mem_type
) {
249 dev_priv
->fb_aper_free
+= bo
->mem
.size
;
252 dev_priv
->gart_info
.aper_free
+= bo
->mem
.size
;
259 ttm_bo_unreserve(bo
);
264 nouveau_bo_map(struct nouveau_bo
*nvbo
)
268 ret
= ttm_bo_reserve(&nvbo
->bo
, false, false, false, 0);
272 ret
= ttm_bo_kmap(&nvbo
->bo
, 0, nvbo
->bo
.mem
.num_pages
, &nvbo
->kmap
);
273 ttm_bo_unreserve(&nvbo
->bo
);
278 nouveau_bo_unmap(struct nouveau_bo
*nvbo
)
281 ttm_bo_kunmap(&nvbo
->kmap
);
285 nouveau_bo_validate(struct nouveau_bo
*nvbo
, bool interruptible
,
286 bool no_wait_reserve
, bool no_wait_gpu
)
290 ret
= ttm_bo_validate(&nvbo
->bo
, &nvbo
->placement
, interruptible
,
291 no_wait_reserve
, no_wait_gpu
);
299 nouveau_bo_rd16(struct nouveau_bo
*nvbo
, unsigned index
)
302 u16
*mem
= ttm_kmap_obj_virtual(&nvbo
->kmap
, &is_iomem
);
305 return ioread16_native((void __force __iomem
*)mem
);
311 nouveau_bo_wr16(struct nouveau_bo
*nvbo
, unsigned index
, u16 val
)
314 u16
*mem
= ttm_kmap_obj_virtual(&nvbo
->kmap
, &is_iomem
);
317 iowrite16_native(val
, (void __force __iomem
*)mem
);
323 nouveau_bo_rd32(struct nouveau_bo
*nvbo
, unsigned index
)
326 u32
*mem
= ttm_kmap_obj_virtual(&nvbo
->kmap
, &is_iomem
);
329 return ioread32_native((void __force __iomem
*)mem
);
335 nouveau_bo_wr32(struct nouveau_bo
*nvbo
, unsigned index
, u32 val
)
338 u32
*mem
= ttm_kmap_obj_virtual(&nvbo
->kmap
, &is_iomem
);
341 iowrite32_native(val
, (void __force __iomem
*)mem
);
346 static struct ttm_backend
*
347 nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device
*bdev
)
349 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bdev
);
350 struct drm_device
*dev
= dev_priv
->dev
;
352 switch (dev_priv
->gart_info
.type
) {
354 case NOUVEAU_GART_AGP
:
355 return ttm_agp_backend_init(bdev
, dev
->agp
->bridge
);
357 case NOUVEAU_GART_PDMA
:
358 case NOUVEAU_GART_HW
:
359 return nouveau_sgdma_init_ttm(dev
);
361 NV_ERROR(dev
, "Unknown GART type %d\n",
362 dev_priv
->gart_info
.type
);
370 nouveau_bo_invalidate_caches(struct ttm_bo_device
*bdev
, uint32_t flags
)
372 /* We'll do this from user space. */
377 nouveau_bo_init_mem_type(struct ttm_bo_device
*bdev
, uint32_t type
,
378 struct ttm_mem_type_manager
*man
)
380 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bdev
);
381 struct drm_device
*dev
= dev_priv
->dev
;
385 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
;
386 man
->available_caching
= TTM_PL_MASK_CACHING
;
387 man
->default_caching
= TTM_PL_FLAG_CACHED
;
390 if (dev_priv
->card_type
>= NV_50
) {
391 man
->func
= &nouveau_vram_manager
;
392 man
->io_reserve_fastpath
= false;
393 man
->use_io_reserve_lru
= true;
395 man
->func
= &ttm_bo_manager_func
;
397 man
->flags
= TTM_MEMTYPE_FLAG_FIXED
|
398 TTM_MEMTYPE_FLAG_MAPPABLE
;
399 man
->available_caching
= TTM_PL_FLAG_UNCACHED
|
401 man
->default_caching
= TTM_PL_FLAG_WC
;
404 if (dev_priv
->card_type
>= NV_50
)
405 man
->func
= &nouveau_gart_manager
;
407 man
->func
= &ttm_bo_manager_func
;
408 switch (dev_priv
->gart_info
.type
) {
409 case NOUVEAU_GART_AGP
:
410 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
;
411 man
->available_caching
= TTM_PL_FLAG_UNCACHED
|
413 man
->default_caching
= TTM_PL_FLAG_WC
;
415 case NOUVEAU_GART_PDMA
:
416 case NOUVEAU_GART_HW
:
417 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
|
418 TTM_MEMTYPE_FLAG_CMA
;
419 man
->available_caching
= TTM_PL_MASK_CACHING
;
420 man
->default_caching
= TTM_PL_FLAG_CACHED
;
423 NV_ERROR(dev
, "Unknown GART type: %d\n",
424 dev_priv
->gart_info
.type
);
429 NV_ERROR(dev
, "Unsupported memory type %u\n", (unsigned)type
);
436 nouveau_bo_evict_flags(struct ttm_buffer_object
*bo
, struct ttm_placement
*pl
)
438 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
440 switch (bo
->mem
.mem_type
) {
442 nouveau_bo_placement_set(nvbo
, TTM_PL_FLAG_TT
,
446 nouveau_bo_placement_set(nvbo
, TTM_PL_FLAG_SYSTEM
, 0);
450 *pl
= nvbo
->placement
;
454 /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
455 * TTM_PL_{VRAM,TT} directly.
459 nouveau_bo_move_accel_cleanup(struct nouveau_channel
*chan
,
460 struct nouveau_bo
*nvbo
, bool evict
,
461 bool no_wait_reserve
, bool no_wait_gpu
,
462 struct ttm_mem_reg
*new_mem
)
464 struct nouveau_fence
*fence
= NULL
;
467 ret
= nouveau_fence_new(chan
, &fence
, true);
471 ret
= ttm_bo_move_accel_cleanup(&nvbo
->bo
, fence
, NULL
, evict
,
472 no_wait_reserve
, no_wait_gpu
, new_mem
);
473 nouveau_fence_unref(&fence
);
478 nvc0_bo_move_m2mf(struct nouveau_channel
*chan
, struct ttm_buffer_object
*bo
,
479 struct ttm_mem_reg
*old_mem
, struct ttm_mem_reg
*new_mem
)
481 struct nouveau_mem
*node
= old_mem
->mm_node
;
482 u64 src_offset
= node
->vma
[0].offset
;
483 u64 dst_offset
= node
->vma
[1].offset
;
484 u32 page_count
= new_mem
->num_pages
;
487 page_count
= new_mem
->num_pages
;
489 int line_count
= (page_count
> 2047) ? 2047 : page_count
;
491 ret
= RING_SPACE(chan
, 12);
495 BEGIN_NVC0(chan
, 2, NvSubM2MF
, 0x0238, 2);
496 OUT_RING (chan
, upper_32_bits(dst_offset
));
497 OUT_RING (chan
, lower_32_bits(dst_offset
));
498 BEGIN_NVC0(chan
, 2, NvSubM2MF
, 0x030c, 6);
499 OUT_RING (chan
, upper_32_bits(src_offset
));
500 OUT_RING (chan
, lower_32_bits(src_offset
));
501 OUT_RING (chan
, PAGE_SIZE
); /* src_pitch */
502 OUT_RING (chan
, PAGE_SIZE
); /* dst_pitch */
503 OUT_RING (chan
, PAGE_SIZE
); /* line_length */
504 OUT_RING (chan
, line_count
);
505 BEGIN_NVC0(chan
, 2, NvSubM2MF
, 0x0300, 1);
506 OUT_RING (chan
, 0x00100110);
508 page_count
-= line_count
;
509 src_offset
+= (PAGE_SIZE
* line_count
);
510 dst_offset
+= (PAGE_SIZE
* line_count
);
517 nv50_bo_move_m2mf(struct nouveau_channel
*chan
, struct ttm_buffer_object
*bo
,
518 struct ttm_mem_reg
*old_mem
, struct ttm_mem_reg
*new_mem
)
520 struct nouveau_mem
*node
= old_mem
->mm_node
;
521 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
522 u64 length
= (new_mem
->num_pages
<< PAGE_SHIFT
);
523 u64 src_offset
= node
->vma
[0].offset
;
524 u64 dst_offset
= node
->vma
[1].offset
;
528 u32 amount
, stride
, height
;
530 amount
= min(length
, (u64
)(4 * 1024 * 1024));
532 height
= amount
/ stride
;
534 if (new_mem
->mem_type
== TTM_PL_VRAM
&&
535 nouveau_bo_tile_layout(nvbo
)) {
536 ret
= RING_SPACE(chan
, 8);
540 BEGIN_RING(chan
, NvSubM2MF
, 0x0200, 7);
543 OUT_RING (chan
, stride
);
544 OUT_RING (chan
, height
);
549 ret
= RING_SPACE(chan
, 2);
553 BEGIN_RING(chan
, NvSubM2MF
, 0x0200, 1);
556 if (old_mem
->mem_type
== TTM_PL_VRAM
&&
557 nouveau_bo_tile_layout(nvbo
)) {
558 ret
= RING_SPACE(chan
, 8);
562 BEGIN_RING(chan
, NvSubM2MF
, 0x021c, 7);
565 OUT_RING (chan
, stride
);
566 OUT_RING (chan
, height
);
571 ret
= RING_SPACE(chan
, 2);
575 BEGIN_RING(chan
, NvSubM2MF
, 0x021c, 1);
579 ret
= RING_SPACE(chan
, 14);
583 BEGIN_RING(chan
, NvSubM2MF
, 0x0238, 2);
584 OUT_RING (chan
, upper_32_bits(src_offset
));
585 OUT_RING (chan
, upper_32_bits(dst_offset
));
586 BEGIN_RING(chan
, NvSubM2MF
, 0x030c, 8);
587 OUT_RING (chan
, lower_32_bits(src_offset
));
588 OUT_RING (chan
, lower_32_bits(dst_offset
));
589 OUT_RING (chan
, stride
);
590 OUT_RING (chan
, stride
);
591 OUT_RING (chan
, stride
);
592 OUT_RING (chan
, height
);
593 OUT_RING (chan
, 0x00000101);
594 OUT_RING (chan
, 0x00000000);
595 BEGIN_RING(chan
, NvSubM2MF
, NV_MEMORY_TO_MEMORY_FORMAT_NOP
, 1);
599 src_offset
+= amount
;
600 dst_offset
+= amount
;
606 static inline uint32_t
607 nouveau_bo_mem_ctxdma(struct ttm_buffer_object
*bo
,
608 struct nouveau_channel
*chan
, struct ttm_mem_reg
*mem
)
610 if (mem
->mem_type
== TTM_PL_TT
)
611 return chan
->gart_handle
;
612 return chan
->vram_handle
;
616 nv04_bo_move_m2mf(struct nouveau_channel
*chan
, struct ttm_buffer_object
*bo
,
617 struct ttm_mem_reg
*old_mem
, struct ttm_mem_reg
*new_mem
)
619 u32 src_offset
= old_mem
->start
<< PAGE_SHIFT
;
620 u32 dst_offset
= new_mem
->start
<< PAGE_SHIFT
;
621 u32 page_count
= new_mem
->num_pages
;
624 ret
= RING_SPACE(chan
, 3);
628 BEGIN_RING(chan
, NvSubM2MF
, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE
, 2);
629 OUT_RING (chan
, nouveau_bo_mem_ctxdma(bo
, chan
, old_mem
));
630 OUT_RING (chan
, nouveau_bo_mem_ctxdma(bo
, chan
, new_mem
));
632 page_count
= new_mem
->num_pages
;
634 int line_count
= (page_count
> 2047) ? 2047 : page_count
;
636 ret
= RING_SPACE(chan
, 11);
640 BEGIN_RING(chan
, NvSubM2MF
,
641 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN
, 8);
642 OUT_RING (chan
, src_offset
);
643 OUT_RING (chan
, dst_offset
);
644 OUT_RING (chan
, PAGE_SIZE
); /* src_pitch */
645 OUT_RING (chan
, PAGE_SIZE
); /* dst_pitch */
646 OUT_RING (chan
, PAGE_SIZE
); /* line_length */
647 OUT_RING (chan
, line_count
);
648 OUT_RING (chan
, 0x00000101);
649 OUT_RING (chan
, 0x00000000);
650 BEGIN_RING(chan
, NvSubM2MF
, NV_MEMORY_TO_MEMORY_FORMAT_NOP
, 1);
653 page_count
-= line_count
;
654 src_offset
+= (PAGE_SIZE
* line_count
);
655 dst_offset
+= (PAGE_SIZE
* line_count
);
662 nouveau_vma_getmap(struct nouveau_channel
*chan
, struct nouveau_bo
*nvbo
,
663 struct ttm_mem_reg
*mem
, struct nouveau_vma
*vma
)
665 struct nouveau_mem
*node
= mem
->mm_node
;
668 ret
= nouveau_vm_get(chan
->vm
, mem
->num_pages
<< PAGE_SHIFT
,
669 node
->page_shift
, NV_MEM_ACCESS_RO
, vma
);
673 if (mem
->mem_type
== TTM_PL_VRAM
)
674 nouveau_vm_map(vma
, node
);
676 nouveau_vm_map_sg(vma
, 0, mem
->num_pages
<< PAGE_SHIFT
,
683 nouveau_bo_move_m2mf(struct ttm_buffer_object
*bo
, int evict
, bool intr
,
684 bool no_wait_reserve
, bool no_wait_gpu
,
685 struct ttm_mem_reg
*new_mem
)
687 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bo
->bdev
);
688 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
689 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
690 struct nouveau_channel
*chan
;
693 chan
= nvbo
->channel
;
695 chan
= dev_priv
->channel
;
696 mutex_lock_nested(&chan
->mutex
, NOUVEAU_KCHANNEL_MUTEX
);
699 /* create temporary vmas for the transfer and attach them to the
700 * old nouveau_mem node, these will get cleaned up after ttm has
701 * destroyed the ttm_mem_reg
703 if (dev_priv
->card_type
>= NV_50
) {
704 struct nouveau_mem
*node
= old_mem
->mm_node
;
706 ret
= nouveau_vma_getmap(chan
, nvbo
, old_mem
, &node
->vma
[0]);
710 ret
= nouveau_vma_getmap(chan
, nvbo
, new_mem
, &node
->vma
[1]);
715 if (dev_priv
->card_type
< NV_50
)
716 ret
= nv04_bo_move_m2mf(chan
, bo
, &bo
->mem
, new_mem
);
718 if (dev_priv
->card_type
< NV_C0
)
719 ret
= nv50_bo_move_m2mf(chan
, bo
, &bo
->mem
, new_mem
);
721 ret
= nvc0_bo_move_m2mf(chan
, bo
, &bo
->mem
, new_mem
);
723 ret
= nouveau_bo_move_accel_cleanup(chan
, nvbo
, evict
,
725 no_wait_gpu
, new_mem
);
729 if (chan
== dev_priv
->channel
)
730 mutex_unlock(&chan
->mutex
);
735 nouveau_bo_move_flipd(struct ttm_buffer_object
*bo
, bool evict
, bool intr
,
736 bool no_wait_reserve
, bool no_wait_gpu
,
737 struct ttm_mem_reg
*new_mem
)
739 u32 placement_memtype
= TTM_PL_FLAG_TT
| TTM_PL_MASK_CACHING
;
740 struct ttm_placement placement
;
741 struct ttm_mem_reg tmp_mem
;
744 placement
.fpfn
= placement
.lpfn
= 0;
745 placement
.num_placement
= placement
.num_busy_placement
= 1;
746 placement
.placement
= placement
.busy_placement
= &placement_memtype
;
749 tmp_mem
.mm_node
= NULL
;
750 ret
= ttm_bo_mem_space(bo
, &placement
, &tmp_mem
, intr
, no_wait_reserve
, no_wait_gpu
);
754 ret
= ttm_tt_bind(bo
->ttm
, &tmp_mem
);
758 ret
= nouveau_bo_move_m2mf(bo
, true, intr
, no_wait_reserve
, no_wait_gpu
, &tmp_mem
);
762 ret
= ttm_bo_move_ttm(bo
, true, no_wait_reserve
, no_wait_gpu
, new_mem
);
764 ttm_bo_mem_put(bo
, &tmp_mem
);
769 nouveau_bo_move_flips(struct ttm_buffer_object
*bo
, bool evict
, bool intr
,
770 bool no_wait_reserve
, bool no_wait_gpu
,
771 struct ttm_mem_reg
*new_mem
)
773 u32 placement_memtype
= TTM_PL_FLAG_TT
| TTM_PL_MASK_CACHING
;
774 struct ttm_placement placement
;
775 struct ttm_mem_reg tmp_mem
;
778 placement
.fpfn
= placement
.lpfn
= 0;
779 placement
.num_placement
= placement
.num_busy_placement
= 1;
780 placement
.placement
= placement
.busy_placement
= &placement_memtype
;
783 tmp_mem
.mm_node
= NULL
;
784 ret
= ttm_bo_mem_space(bo
, &placement
, &tmp_mem
, intr
, no_wait_reserve
, no_wait_gpu
);
788 ret
= ttm_bo_move_ttm(bo
, true, no_wait_reserve
, no_wait_gpu
, &tmp_mem
);
792 ret
= nouveau_bo_move_m2mf(bo
, true, intr
, no_wait_reserve
, no_wait_gpu
, new_mem
);
797 ttm_bo_mem_put(bo
, &tmp_mem
);
802 nouveau_bo_move_ntfy(struct ttm_buffer_object
*bo
, struct ttm_mem_reg
*new_mem
)
804 struct nouveau_mem
*node
= new_mem
->mm_node
;
805 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
806 struct nouveau_vma
*vma
;
808 list_for_each_entry(vma
, &nvbo
->vma_list
, head
) {
809 if (new_mem
->mem_type
== TTM_PL_VRAM
) {
810 nouveau_vm_map(vma
, new_mem
->mm_node
);
812 if (new_mem
->mem_type
== TTM_PL_TT
&&
813 nvbo
->page_shift
== vma
->vm
->spg_shift
) {
814 nouveau_vm_map_sg(vma
, 0, new_mem
->
815 num_pages
<< PAGE_SHIFT
,
818 nouveau_vm_unmap(vma
);
824 nouveau_bo_vm_bind(struct ttm_buffer_object
*bo
, struct ttm_mem_reg
*new_mem
,
825 struct nouveau_tile_reg
**new_tile
)
827 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bo
->bdev
);
828 struct drm_device
*dev
= dev_priv
->dev
;
829 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
830 u64 offset
= new_mem
->start
<< PAGE_SHIFT
;
833 if (new_mem
->mem_type
!= TTM_PL_VRAM
)
836 if (dev_priv
->card_type
>= NV_10
) {
837 *new_tile
= nv10_mem_set_tiling(dev
, offset
, new_mem
->size
,
846 nouveau_bo_vm_cleanup(struct ttm_buffer_object
*bo
,
847 struct nouveau_tile_reg
*new_tile
,
848 struct nouveau_tile_reg
**old_tile
)
850 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bo
->bdev
);
851 struct drm_device
*dev
= dev_priv
->dev
;
853 nv10_mem_put_tile_region(dev
, *old_tile
, bo
->sync_obj
);
854 *old_tile
= new_tile
;
858 nouveau_bo_move(struct ttm_buffer_object
*bo
, bool evict
, bool intr
,
859 bool no_wait_reserve
, bool no_wait_gpu
,
860 struct ttm_mem_reg
*new_mem
)
862 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bo
->bdev
);
863 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
864 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
865 struct nouveau_tile_reg
*new_tile
= NULL
;
868 if (dev_priv
->card_type
< NV_50
) {
869 ret
= nouveau_bo_vm_bind(bo
, new_mem
, &new_tile
);
875 if (old_mem
->mem_type
== TTM_PL_SYSTEM
&& !bo
->ttm
) {
876 BUG_ON(bo
->mem
.mm_node
!= NULL
);
878 new_mem
->mm_node
= NULL
;
882 /* Software copy if the card isn't up and running yet. */
883 if (!dev_priv
->channel
) {
884 ret
= ttm_bo_move_memcpy(bo
, evict
, no_wait_reserve
, no_wait_gpu
, new_mem
);
888 /* Hardware assisted copy. */
889 if (new_mem
->mem_type
== TTM_PL_SYSTEM
)
890 ret
= nouveau_bo_move_flipd(bo
, evict
, intr
, no_wait_reserve
, no_wait_gpu
, new_mem
);
891 else if (old_mem
->mem_type
== TTM_PL_SYSTEM
)
892 ret
= nouveau_bo_move_flips(bo
, evict
, intr
, no_wait_reserve
, no_wait_gpu
, new_mem
);
894 ret
= nouveau_bo_move_m2mf(bo
, evict
, intr
, no_wait_reserve
, no_wait_gpu
, new_mem
);
899 /* Fallback to software copy. */
900 ret
= ttm_bo_move_memcpy(bo
, evict
, no_wait_reserve
, no_wait_gpu
, new_mem
);
903 if (dev_priv
->card_type
< NV_50
) {
905 nouveau_bo_vm_cleanup(bo
, NULL
, &new_tile
);
907 nouveau_bo_vm_cleanup(bo
, new_tile
, &nvbo
->tile
);
914 nouveau_bo_verify_access(struct ttm_buffer_object
*bo
, struct file
*filp
)
920 nouveau_ttm_io_mem_reserve(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
)
922 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
923 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bdev
);
924 struct drm_device
*dev
= dev_priv
->dev
;
927 mem
->bus
.addr
= NULL
;
929 mem
->bus
.size
= mem
->num_pages
<< PAGE_SHIFT
;
931 mem
->bus
.is_iomem
= false;
932 if (!(man
->flags
& TTM_MEMTYPE_FLAG_MAPPABLE
))
934 switch (mem
->mem_type
) {
940 if (dev_priv
->gart_info
.type
== NOUVEAU_GART_AGP
) {
941 mem
->bus
.offset
= mem
->start
<< PAGE_SHIFT
;
942 mem
->bus
.base
= dev_priv
->gart_info
.aper_base
;
943 mem
->bus
.is_iomem
= true;
949 struct nouveau_mem
*node
= mem
->mm_node
;
952 if (!dev_priv
->bar1_vm
) {
953 mem
->bus
.offset
= mem
->start
<< PAGE_SHIFT
;
954 mem
->bus
.base
= pci_resource_start(dev
->pdev
, 1);
955 mem
->bus
.is_iomem
= true;
959 if (dev_priv
->card_type
== NV_C0
)
960 page_shift
= node
->page_shift
;
964 ret
= nouveau_vm_get(dev_priv
->bar1_vm
, mem
->bus
.size
,
965 page_shift
, NV_MEM_ACCESS_RW
,
970 nouveau_vm_map(&node
->bar_vma
, node
);
972 nouveau_vm_put(&node
->bar_vma
);
976 mem
->bus
.offset
= node
->bar_vma
.offset
;
977 if (dev_priv
->card_type
== NV_50
) /*XXX*/
978 mem
->bus
.offset
-= 0x0020000000ULL
;
979 mem
->bus
.base
= pci_resource_start(dev
->pdev
, 1);
980 mem
->bus
.is_iomem
= true;
990 nouveau_ttm_io_mem_free(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
)
992 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bdev
);
993 struct nouveau_mem
*node
= mem
->mm_node
;
995 if (!dev_priv
->bar1_vm
|| mem
->mem_type
!= TTM_PL_VRAM
)
998 if (!node
->bar_vma
.node
)
1001 nouveau_vm_unmap(&node
->bar_vma
);
1002 nouveau_vm_put(&node
->bar_vma
);
1006 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object
*bo
)
1008 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bo
->bdev
);
1009 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
1011 /* as long as the bo isn't in vram, and isn't tiled, we've got
1012 * nothing to do here.
1014 if (bo
->mem
.mem_type
!= TTM_PL_VRAM
) {
1015 if (dev_priv
->card_type
< NV_50
||
1016 !nouveau_bo_tile_layout(nvbo
))
1020 /* make sure bo is in mappable vram */
1021 if (bo
->mem
.start
+ bo
->mem
.num_pages
< dev_priv
->fb_mappable_pages
)
1025 nvbo
->placement
.fpfn
= 0;
1026 nvbo
->placement
.lpfn
= dev_priv
->fb_mappable_pages
;
1027 nouveau_bo_placement_set(nvbo
, TTM_PL_VRAM
, 0);
1028 return nouveau_bo_validate(nvbo
, false, true, false);
1032 nouveau_bo_fence(struct nouveau_bo
*nvbo
, struct nouveau_fence
*fence
)
1034 struct nouveau_fence
*old_fence
;
1037 nouveau_fence_ref(fence
);
1039 spin_lock(&nvbo
->bo
.bdev
->fence_lock
);
1040 old_fence
= nvbo
->bo
.sync_obj
;
1041 nvbo
->bo
.sync_obj
= fence
;
1042 spin_unlock(&nvbo
->bo
.bdev
->fence_lock
);
1044 nouveau_fence_unref(&old_fence
);
1047 struct ttm_bo_driver nouveau_bo_driver
= {
1048 .create_ttm_backend_entry
= nouveau_bo_create_ttm_backend_entry
,
1049 .invalidate_caches
= nouveau_bo_invalidate_caches
,
1050 .init_mem_type
= nouveau_bo_init_mem_type
,
1051 .evict_flags
= nouveau_bo_evict_flags
,
1052 .move_notify
= nouveau_bo_move_ntfy
,
1053 .move
= nouveau_bo_move
,
1054 .verify_access
= nouveau_bo_verify_access
,
1055 .sync_obj_signaled
= __nouveau_fence_signalled
,
1056 .sync_obj_wait
= __nouveau_fence_wait
,
1057 .sync_obj_flush
= __nouveau_fence_flush
,
1058 .sync_obj_unref
= __nouveau_fence_unref
,
1059 .sync_obj_ref
= __nouveau_fence_ref
,
1060 .fault_reserve_notify
= &nouveau_ttm_fault_reserve_notify
,
1061 .io_mem_reserve
= &nouveau_ttm_io_mem_reserve
,
1062 .io_mem_free
= &nouveau_ttm_io_mem_free
,
1065 struct nouveau_vma
*
1066 nouveau_bo_vma_find(struct nouveau_bo
*nvbo
, struct nouveau_vm
*vm
)
1068 struct nouveau_vma
*vma
;
1069 list_for_each_entry(vma
, &nvbo
->vma_list
, head
) {
1078 nouveau_bo_vma_add(struct nouveau_bo
*nvbo
, struct nouveau_vm
*vm
,
1079 struct nouveau_vma
*vma
)
1081 const u32 size
= nvbo
->bo
.mem
.num_pages
<< PAGE_SHIFT
;
1082 struct nouveau_mem
*node
= nvbo
->bo
.mem
.mm_node
;
1085 ret
= nouveau_vm_get(vm
, size
, nvbo
->page_shift
,
1086 NV_MEM_ACCESS_RW
, vma
);
1090 if (nvbo
->bo
.mem
.mem_type
== TTM_PL_VRAM
)
1091 nouveau_vm_map(vma
, nvbo
->bo
.mem
.mm_node
);
1093 if (nvbo
->bo
.mem
.mem_type
== TTM_PL_TT
)
1094 nouveau_vm_map_sg(vma
, 0, size
, node
, node
->pages
);
1096 list_add_tail(&vma
->head
, &nvbo
->vma_list
);
1102 nouveau_bo_vma_del(struct nouveau_bo
*nvbo
, struct nouveau_vma
*vma
)
1105 if (nvbo
->bo
.mem
.mem_type
!= TTM_PL_SYSTEM
) {
1106 spin_lock(&nvbo
->bo
.bdev
->fence_lock
);
1107 ttm_bo_wait(&nvbo
->bo
, false, false, false);
1108 spin_unlock(&nvbo
->bo
.bdev
->fence_lock
);
1109 nouveau_vm_unmap(vma
);
1112 nouveau_vm_put(vma
);
1113 list_del(&vma
->head
);