2 * Copyright 2007 Dave Airlied
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
32 #include "nouveau_drm.h"
33 #include "nouveau_drv.h"
34 #include "nouveau_dma.h"
36 #include <linux/log2.h>
37 #include <linux/slab.h>
40 nouveau_bo_del_ttm(struct ttm_buffer_object
*bo
)
42 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bo
->bdev
);
43 struct drm_device
*dev
= dev_priv
->dev
;
44 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
46 ttm_bo_kunmap(&nvbo
->kmap
);
48 if (unlikely(nvbo
->gem
))
49 DRM_ERROR("bo %p still attached to GEM object\n", bo
);
52 nv10_mem_expire_tiling(dev
, nvbo
->tile
, NULL
);
54 spin_lock(&dev_priv
->ttm
.bo_list_lock
);
55 list_del(&nvbo
->head
);
56 spin_unlock(&dev_priv
->ttm
.bo_list_lock
);
61 nouveau_bo_fixup_align(struct drm_device
*dev
,
62 uint32_t tile_mode
, uint32_t tile_flags
,
63 int *align
, int *size
)
65 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
68 * Some of the tile_flags have a periodic structure of N*4096 bytes,
69 * align to to that as well as the page size. Align the size to the
70 * appropriate boundaries. This does imply that sizes are rounded up
71 * 3-7 pages, so be aware of this and do not waste memory by allocating
74 if (dev_priv
->card_type
== NV_50
) {
75 uint32_t block_size
= dev_priv
->vram_size
>> 15;
83 if (is_power_of_2(block_size
)) {
84 for (i
= 1; i
< 10; i
++) {
85 *align
= 12 * i
* block_size
;
86 if (!(*align
% 65536))
90 for (i
= 1; i
< 10; i
++) {
91 *align
= 8 * i
* block_size
;
92 if (!(*align
% 65536))
96 *size
= roundup(*size
, *align
);
104 if (dev_priv
->chipset
>= 0x40) {
106 *size
= roundup(*size
, 64 * tile_mode
);
108 } else if (dev_priv
->chipset
>= 0x30) {
110 *size
= roundup(*size
, 64 * tile_mode
);
112 } else if (dev_priv
->chipset
>= 0x20) {
114 *size
= roundup(*size
, 64 * tile_mode
);
116 } else if (dev_priv
->chipset
>= 0x10) {
118 *size
= roundup(*size
, 32 * tile_mode
);
123 /* ALIGN works only on powers of two. */
124 *size
= roundup(*size
, PAGE_SIZE
);
126 if (dev_priv
->card_type
== NV_50
) {
127 *size
= roundup(*size
, 65536);
128 *align
= max(65536, *align
);
133 nouveau_bo_new(struct drm_device
*dev
, struct nouveau_channel
*chan
,
134 int size
, int align
, uint32_t flags
, uint32_t tile_mode
,
135 uint32_t tile_flags
, bool no_vm
, bool mappable
,
136 struct nouveau_bo
**pnvbo
)
138 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
139 struct nouveau_bo
*nvbo
;
142 nvbo
= kzalloc(sizeof(struct nouveau_bo
), GFP_KERNEL
);
145 INIT_LIST_HEAD(&nvbo
->head
);
146 INIT_LIST_HEAD(&nvbo
->entry
);
147 nvbo
->mappable
= mappable
;
149 nvbo
->tile_mode
= tile_mode
;
150 nvbo
->tile_flags
= tile_flags
;
152 nouveau_bo_fixup_align(dev
, tile_mode
, tile_flags
, &align
, &size
);
153 align
>>= PAGE_SHIFT
;
155 nvbo
->placement
.fpfn
= 0;
156 nvbo
->placement
.lpfn
= mappable
? dev_priv
->fb_mappable_pages
: 0;
157 nouveau_bo_placement_set(nvbo
, flags
, 0);
159 nvbo
->channel
= chan
;
160 ret
= ttm_bo_init(&dev_priv
->ttm
.bdev
, &nvbo
->bo
, size
,
161 ttm_bo_type_device
, &nvbo
->placement
, align
, 0,
162 false, NULL
, size
, nouveau_bo_del_ttm
);
163 nvbo
->channel
= NULL
;
165 /* ttm will call nouveau_bo_del_ttm if it fails.. */
169 spin_lock(&dev_priv
->ttm
.bo_list_lock
);
170 list_add_tail(&nvbo
->head
, &dev_priv
->ttm
.bo_list
);
171 spin_unlock(&dev_priv
->ttm
.bo_list_lock
);
177 set_placement_list(uint32_t *pl
, unsigned *n
, uint32_t type
, uint32_t flags
)
181 if (type
& TTM_PL_FLAG_VRAM
)
182 pl
[(*n
)++] = TTM_PL_FLAG_VRAM
| flags
;
183 if (type
& TTM_PL_FLAG_TT
)
184 pl
[(*n
)++] = TTM_PL_FLAG_TT
| flags
;
185 if (type
& TTM_PL_FLAG_SYSTEM
)
186 pl
[(*n
)++] = TTM_PL_FLAG_SYSTEM
| flags
;
190 nouveau_bo_placement_set(struct nouveau_bo
*nvbo
, uint32_t type
, uint32_t busy
)
192 struct ttm_placement
*pl
= &nvbo
->placement
;
193 uint32_t flags
= TTM_PL_MASK_CACHING
|
194 (nvbo
->pin_refcnt
? TTM_PL_FLAG_NO_EVICT
: 0);
196 pl
->placement
= nvbo
->placements
;
197 set_placement_list(nvbo
->placements
, &pl
->num_placement
,
200 pl
->busy_placement
= nvbo
->busy_placements
;
201 set_placement_list(nvbo
->busy_placements
, &pl
->num_busy_placement
,
206 nouveau_bo_pin(struct nouveau_bo
*nvbo
, uint32_t memtype
)
208 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(nvbo
->bo
.bdev
);
209 struct ttm_buffer_object
*bo
= &nvbo
->bo
;
212 if (nvbo
->pin_refcnt
&& !(memtype
& (1 << bo
->mem
.mem_type
))) {
213 NV_ERROR(nouveau_bdev(bo
->bdev
)->dev
,
214 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo
,
215 1 << bo
->mem
.mem_type
, memtype
);
219 if (nvbo
->pin_refcnt
++)
222 ret
= ttm_bo_reserve(bo
, false, false, false, 0);
226 nouveau_bo_placement_set(nvbo
, memtype
, 0);
228 ret
= ttm_bo_validate(bo
, &nvbo
->placement
, false, false);
230 switch (bo
->mem
.mem_type
) {
232 dev_priv
->fb_aper_free
-= bo
->mem
.size
;
235 dev_priv
->gart_info
.aper_free
-= bo
->mem
.size
;
241 ttm_bo_unreserve(bo
);
249 nouveau_bo_unpin(struct nouveau_bo
*nvbo
)
251 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(nvbo
->bo
.bdev
);
252 struct ttm_buffer_object
*bo
= &nvbo
->bo
;
255 if (--nvbo
->pin_refcnt
)
258 ret
= ttm_bo_reserve(bo
, false, false, false, 0);
262 nouveau_bo_placement_set(nvbo
, bo
->mem
.placement
, 0);
264 ret
= ttm_bo_validate(bo
, &nvbo
->placement
, false, false);
266 switch (bo
->mem
.mem_type
) {
268 dev_priv
->fb_aper_free
+= bo
->mem
.size
;
271 dev_priv
->gart_info
.aper_free
+= bo
->mem
.size
;
278 ttm_bo_unreserve(bo
);
283 nouveau_bo_map(struct nouveau_bo
*nvbo
)
287 ret
= ttm_bo_reserve(&nvbo
->bo
, false, false, false, 0);
291 ret
= ttm_bo_kmap(&nvbo
->bo
, 0, nvbo
->bo
.mem
.num_pages
, &nvbo
->kmap
);
292 ttm_bo_unreserve(&nvbo
->bo
);
297 nouveau_bo_unmap(struct nouveau_bo
*nvbo
)
299 ttm_bo_kunmap(&nvbo
->kmap
);
303 nouveau_bo_rd16(struct nouveau_bo
*nvbo
, unsigned index
)
306 u16
*mem
= ttm_kmap_obj_virtual(&nvbo
->kmap
, &is_iomem
);
309 return ioread16_native((void __force __iomem
*)mem
);
315 nouveau_bo_wr16(struct nouveau_bo
*nvbo
, unsigned index
, u16 val
)
318 u16
*mem
= ttm_kmap_obj_virtual(&nvbo
->kmap
, &is_iomem
);
321 iowrite16_native(val
, (void __force __iomem
*)mem
);
327 nouveau_bo_rd32(struct nouveau_bo
*nvbo
, unsigned index
)
330 u32
*mem
= ttm_kmap_obj_virtual(&nvbo
->kmap
, &is_iomem
);
333 return ioread32_native((void __force __iomem
*)mem
);
339 nouveau_bo_wr32(struct nouveau_bo
*nvbo
, unsigned index
, u32 val
)
342 u32
*mem
= ttm_kmap_obj_virtual(&nvbo
->kmap
, &is_iomem
);
345 iowrite32_native(val
, (void __force __iomem
*)mem
);
350 static struct ttm_backend
*
351 nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device
*bdev
)
353 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bdev
);
354 struct drm_device
*dev
= dev_priv
->dev
;
356 switch (dev_priv
->gart_info
.type
) {
358 case NOUVEAU_GART_AGP
:
359 return ttm_agp_backend_init(bdev
, dev
->agp
->bridge
);
361 case NOUVEAU_GART_SGDMA
:
362 return nouveau_sgdma_init_ttm(dev
);
364 NV_ERROR(dev
, "Unknown GART type %d\n",
365 dev_priv
->gart_info
.type
);
373 nouveau_bo_invalidate_caches(struct ttm_bo_device
*bdev
, uint32_t flags
)
375 /* We'll do this from user space. */
380 nouveau_bo_init_mem_type(struct ttm_bo_device
*bdev
, uint32_t type
,
381 struct ttm_mem_type_manager
*man
)
383 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bdev
);
384 struct drm_device
*dev
= dev_priv
->dev
;
388 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
;
389 man
->available_caching
= TTM_PL_MASK_CACHING
;
390 man
->default_caching
= TTM_PL_FLAG_CACHED
;
393 man
->flags
= TTM_MEMTYPE_FLAG_FIXED
|
394 TTM_MEMTYPE_FLAG_MAPPABLE
|
395 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP
;
396 man
->available_caching
= TTM_PL_FLAG_UNCACHED
|
398 man
->default_caching
= TTM_PL_FLAG_WC
;
401 man
->io_offset
= drm_get_resource_start(dev
, 1);
402 man
->io_size
= drm_get_resource_len(dev
, 1);
403 if (man
->io_size
> dev_priv
->vram_size
)
404 man
->io_size
= dev_priv
->vram_size
;
406 man
->gpu_offset
= dev_priv
->vm_vram_base
;
409 switch (dev_priv
->gart_info
.type
) {
410 case NOUVEAU_GART_AGP
:
411 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
|
412 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP
;
413 man
->available_caching
= TTM_PL_FLAG_UNCACHED
;
414 man
->default_caching
= TTM_PL_FLAG_UNCACHED
;
416 case NOUVEAU_GART_SGDMA
:
417 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
|
418 TTM_MEMTYPE_FLAG_CMA
;
419 man
->available_caching
= TTM_PL_MASK_CACHING
;
420 man
->default_caching
= TTM_PL_FLAG_CACHED
;
423 NV_ERROR(dev
, "Unknown GART type: %d\n",
424 dev_priv
->gart_info
.type
);
428 man
->io_offset
= dev_priv
->gart_info
.aper_base
;
429 man
->io_size
= dev_priv
->gart_info
.aper_size
;
431 man
->gpu_offset
= dev_priv
->vm_gart_base
;
434 NV_ERROR(dev
, "Unsupported memory type %u\n", (unsigned)type
);
441 nouveau_bo_evict_flags(struct ttm_buffer_object
*bo
, struct ttm_placement
*pl
)
443 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
445 switch (bo
->mem
.mem_type
) {
447 nouveau_bo_placement_set(nvbo
, TTM_PL_FLAG_TT
,
451 nouveau_bo_placement_set(nvbo
, TTM_PL_FLAG_SYSTEM
, 0);
455 *pl
= nvbo
->placement
;
459 /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
460 * TTM_PL_{VRAM,TT} directly.
464 nouveau_bo_move_accel_cleanup(struct nouveau_channel
*chan
,
465 struct nouveau_bo
*nvbo
, bool evict
, bool no_wait
,
466 struct ttm_mem_reg
*new_mem
)
468 struct nouveau_fence
*fence
= NULL
;
471 ret
= nouveau_fence_new(chan
, &fence
, true);
475 ret
= ttm_bo_move_accel_cleanup(&nvbo
->bo
, fence
, NULL
,
476 evict
, no_wait
, new_mem
);
477 if (nvbo
->channel
&& nvbo
->channel
!= chan
)
478 ret
= nouveau_fence_wait(fence
, NULL
, false, false);
479 nouveau_fence_unref((void *)&fence
);
483 static inline uint32_t
484 nouveau_bo_mem_ctxdma(struct nouveau_bo
*nvbo
, struct nouveau_channel
*chan
,
485 struct ttm_mem_reg
*mem
)
487 if (chan
== nouveau_bdev(nvbo
->bo
.bdev
)->channel
) {
488 if (mem
->mem_type
== TTM_PL_TT
)
493 if (mem
->mem_type
== TTM_PL_TT
)
494 return chan
->gart_handle
;
495 return chan
->vram_handle
;
499 nouveau_bo_move_m2mf(struct ttm_buffer_object
*bo
, int evict
, bool intr
,
500 int no_wait
, struct ttm_mem_reg
*new_mem
)
502 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
503 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bo
->bdev
);
504 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
505 struct nouveau_channel
*chan
;
506 uint64_t src_offset
, dst_offset
;
510 chan
= nvbo
->channel
;
511 if (!chan
|| nvbo
->tile_flags
|| nvbo
->no_vm
)
512 chan
= dev_priv
->channel
;
514 src_offset
= old_mem
->mm_node
->start
<< PAGE_SHIFT
;
515 dst_offset
= new_mem
->mm_node
->start
<< PAGE_SHIFT
;
516 if (chan
!= dev_priv
->channel
) {
517 if (old_mem
->mem_type
== TTM_PL_TT
)
518 src_offset
+= dev_priv
->vm_gart_base
;
520 src_offset
+= dev_priv
->vm_vram_base
;
522 if (new_mem
->mem_type
== TTM_PL_TT
)
523 dst_offset
+= dev_priv
->vm_gart_base
;
525 dst_offset
+= dev_priv
->vm_vram_base
;
528 ret
= RING_SPACE(chan
, 3);
531 BEGIN_RING(chan
, NvSubM2MF
, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE
, 2);
532 OUT_RING(chan
, nouveau_bo_mem_ctxdma(nvbo
, chan
, old_mem
));
533 OUT_RING(chan
, nouveau_bo_mem_ctxdma(nvbo
, chan
, new_mem
));
535 if (dev_priv
->card_type
>= NV_50
) {
536 ret
= RING_SPACE(chan
, 4);
539 BEGIN_RING(chan
, NvSubM2MF
, 0x0200, 1);
541 BEGIN_RING(chan
, NvSubM2MF
, 0x021c, 1);
545 page_count
= new_mem
->num_pages
;
547 int line_count
= (page_count
> 2047) ? 2047 : page_count
;
549 if (dev_priv
->card_type
>= NV_50
) {
550 ret
= RING_SPACE(chan
, 3);
553 BEGIN_RING(chan
, NvSubM2MF
, 0x0238, 2);
554 OUT_RING(chan
, upper_32_bits(src_offset
));
555 OUT_RING(chan
, upper_32_bits(dst_offset
));
557 ret
= RING_SPACE(chan
, 11);
560 BEGIN_RING(chan
, NvSubM2MF
,
561 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN
, 8);
562 OUT_RING(chan
, lower_32_bits(src_offset
));
563 OUT_RING(chan
, lower_32_bits(dst_offset
));
564 OUT_RING(chan
, PAGE_SIZE
); /* src_pitch */
565 OUT_RING(chan
, PAGE_SIZE
); /* dst_pitch */
566 OUT_RING(chan
, PAGE_SIZE
); /* line_length */
567 OUT_RING(chan
, line_count
);
568 OUT_RING(chan
, (1<<8)|(1<<0));
570 BEGIN_RING(chan
, NvSubM2MF
, NV_MEMORY_TO_MEMORY_FORMAT_NOP
, 1);
573 page_count
-= line_count
;
574 src_offset
+= (PAGE_SIZE
* line_count
);
575 dst_offset
+= (PAGE_SIZE
* line_count
);
578 return nouveau_bo_move_accel_cleanup(chan
, nvbo
, evict
, no_wait
, new_mem
);
582 nouveau_bo_move_flipd(struct ttm_buffer_object
*bo
, bool evict
, bool intr
,
583 bool no_wait
, struct ttm_mem_reg
*new_mem
)
585 u32 placement_memtype
= TTM_PL_FLAG_TT
| TTM_PL_MASK_CACHING
;
586 struct ttm_placement placement
;
587 struct ttm_mem_reg tmp_mem
;
590 placement
.fpfn
= placement
.lpfn
= 0;
591 placement
.num_placement
= placement
.num_busy_placement
= 1;
592 placement
.placement
= placement
.busy_placement
= &placement_memtype
;
595 tmp_mem
.mm_node
= NULL
;
596 ret
= ttm_bo_mem_space(bo
, &placement
, &tmp_mem
, intr
, no_wait
);
600 ret
= ttm_tt_bind(bo
->ttm
, &tmp_mem
);
604 ret
= nouveau_bo_move_m2mf(bo
, true, intr
, no_wait
, &tmp_mem
);
608 ret
= ttm_bo_move_ttm(bo
, evict
, no_wait
, new_mem
);
610 if (tmp_mem
.mm_node
) {
611 spin_lock(&bo
->bdev
->glob
->lru_lock
);
612 drm_mm_put_block(tmp_mem
.mm_node
);
613 spin_unlock(&bo
->bdev
->glob
->lru_lock
);
620 nouveau_bo_move_flips(struct ttm_buffer_object
*bo
, bool evict
, bool intr
,
621 bool no_wait
, struct ttm_mem_reg
*new_mem
)
623 u32 placement_memtype
= TTM_PL_FLAG_TT
| TTM_PL_MASK_CACHING
;
624 struct ttm_placement placement
;
625 struct ttm_mem_reg tmp_mem
;
628 placement
.fpfn
= placement
.lpfn
= 0;
629 placement
.num_placement
= placement
.num_busy_placement
= 1;
630 placement
.placement
= placement
.busy_placement
= &placement_memtype
;
633 tmp_mem
.mm_node
= NULL
;
634 ret
= ttm_bo_mem_space(bo
, &placement
, &tmp_mem
, intr
, no_wait
);
638 ret
= ttm_bo_move_ttm(bo
, evict
, no_wait
, &tmp_mem
);
642 ret
= nouveau_bo_move_m2mf(bo
, evict
, intr
, no_wait
, new_mem
);
647 if (tmp_mem
.mm_node
) {
648 spin_lock(&bo
->bdev
->glob
->lru_lock
);
649 drm_mm_put_block(tmp_mem
.mm_node
);
650 spin_unlock(&bo
->bdev
->glob
->lru_lock
);
657 nouveau_bo_vm_bind(struct ttm_buffer_object
*bo
, struct ttm_mem_reg
*new_mem
,
658 struct nouveau_tile_reg
**new_tile
)
660 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bo
->bdev
);
661 struct drm_device
*dev
= dev_priv
->dev
;
662 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
666 if (nvbo
->no_vm
|| new_mem
->mem_type
!= TTM_PL_VRAM
) {
672 offset
= new_mem
->mm_node
->start
<< PAGE_SHIFT
;
674 if (dev_priv
->card_type
== NV_50
) {
675 ret
= nv50_mem_vm_bind_linear(dev
,
676 offset
+ dev_priv
->vm_vram_base
,
677 new_mem
->size
, nvbo
->tile_flags
,
682 } else if (dev_priv
->card_type
>= NV_10
) {
683 *new_tile
= nv10_mem_set_tiling(dev
, offset
, new_mem
->size
,
691 nouveau_bo_vm_cleanup(struct ttm_buffer_object
*bo
,
692 struct nouveau_tile_reg
*new_tile
,
693 struct nouveau_tile_reg
**old_tile
)
695 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bo
->bdev
);
696 struct drm_device
*dev
= dev_priv
->dev
;
698 if (dev_priv
->card_type
>= NV_10
&&
699 dev_priv
->card_type
< NV_50
) {
701 nv10_mem_expire_tiling(dev
, *old_tile
, bo
->sync_obj
);
703 *old_tile
= new_tile
;
708 nouveau_bo_move(struct ttm_buffer_object
*bo
, bool evict
, bool intr
,
709 bool no_wait
, struct ttm_mem_reg
*new_mem
)
711 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bo
->bdev
);
712 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
713 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
714 struct nouveau_tile_reg
*new_tile
= NULL
;
717 ret
= nouveau_bo_vm_bind(bo
, new_mem
, &new_tile
);
721 /* Software copy if the card isn't up and running yet. */
722 if (dev_priv
->init_state
!= NOUVEAU_CARD_INIT_DONE
||
723 !dev_priv
->channel
) {
724 ret
= ttm_bo_move_memcpy(bo
, evict
, no_wait
, new_mem
);
729 if (old_mem
->mem_type
== TTM_PL_SYSTEM
&& !bo
->ttm
) {
730 BUG_ON(bo
->mem
.mm_node
!= NULL
);
732 new_mem
->mm_node
= NULL
;
736 /* Hardware assisted copy. */
737 if (new_mem
->mem_type
== TTM_PL_SYSTEM
)
738 ret
= nouveau_bo_move_flipd(bo
, evict
, intr
, no_wait
, new_mem
);
739 else if (old_mem
->mem_type
== TTM_PL_SYSTEM
)
740 ret
= nouveau_bo_move_flips(bo
, evict
, intr
, no_wait
, new_mem
);
742 ret
= nouveau_bo_move_m2mf(bo
, evict
, intr
, no_wait
, new_mem
);
747 /* Fallback to software copy. */
748 ret
= ttm_bo_move_memcpy(bo
, evict
, no_wait
, new_mem
);
752 nouveau_bo_vm_cleanup(bo
, NULL
, &new_tile
);
754 nouveau_bo_vm_cleanup(bo
, new_tile
, &nvbo
->tile
);
760 nouveau_bo_verify_access(struct ttm_buffer_object
*bo
, struct file
*filp
)
765 struct ttm_bo_driver nouveau_bo_driver
= {
766 .create_ttm_backend_entry
= nouveau_bo_create_ttm_backend_entry
,
767 .invalidate_caches
= nouveau_bo_invalidate_caches
,
768 .init_mem_type
= nouveau_bo_init_mem_type
,
769 .evict_flags
= nouveau_bo_evict_flags
,
770 .move
= nouveau_bo_move
,
771 .verify_access
= nouveau_bo_verify_access
,
772 .sync_obj_signaled
= nouveau_fence_signalled
,
773 .sync_obj_wait
= nouveau_fence_wait
,
774 .sync_obj_flush
= nouveau_fence_flush
,
775 .sync_obj_unref
= nouveau_fence_unref
,
776 .sync_obj_ref
= nouveau_fence_ref
,