1 /**************************************************************************
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 #include <drm/ttm/ttm_bo_driver.h>
32 #include <drm/ttm/ttm_placement.h>
33 #include <drm/drm_vma_manager.h>
35 #include <linux/highmem.h>
36 #include <linux/wait.h>
37 #include <linux/slab.h>
38 #include <linux/vmalloc.h>
39 #include <linux/module.h>
40 #include <linux/reservation.h>
42 void ttm_bo_free_old_node(struct ttm_buffer_object
*bo
)
44 ttm_bo_mem_put(bo
, &bo
->mem
);
47 int ttm_bo_move_ttm(struct ttm_buffer_object
*bo
,
49 bool no_wait_gpu
, struct ttm_mem_reg
*new_mem
)
51 struct ttm_tt
*ttm
= bo
->ttm
;
52 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
55 if (old_mem
->mem_type
!= TTM_PL_SYSTEM
) {
57 ttm_bo_free_old_node(bo
);
58 ttm_flag_masked(&old_mem
->placement
, TTM_PL_FLAG_SYSTEM
,
60 old_mem
->mem_type
= TTM_PL_SYSTEM
;
63 ret
= ttm_tt_set_placement_caching(ttm
, new_mem
->placement
);
64 if (unlikely(ret
!= 0))
67 if (new_mem
->mem_type
!= TTM_PL_SYSTEM
) {
68 ret
= ttm_tt_bind(ttm
, new_mem
);
69 if (unlikely(ret
!= 0))
74 new_mem
->mm_node
= NULL
;
78 EXPORT_SYMBOL(ttm_bo_move_ttm
);
80 int ttm_mem_io_lock(struct ttm_mem_type_manager
*man
, bool interruptible
)
82 if (likely(man
->io_reserve_fastpath
))
86 return mutex_lock_interruptible(&man
->io_reserve_mutex
);
88 mutex_lock(&man
->io_reserve_mutex
);
91 EXPORT_SYMBOL(ttm_mem_io_lock
);
93 void ttm_mem_io_unlock(struct ttm_mem_type_manager
*man
)
95 if (likely(man
->io_reserve_fastpath
))
98 mutex_unlock(&man
->io_reserve_mutex
);
100 EXPORT_SYMBOL(ttm_mem_io_unlock
);
102 static int ttm_mem_io_evict(struct ttm_mem_type_manager
*man
)
104 struct ttm_buffer_object
*bo
;
106 if (!man
->use_io_reserve_lru
|| list_empty(&man
->io_reserve_lru
))
109 bo
= list_first_entry(&man
->io_reserve_lru
,
110 struct ttm_buffer_object
,
112 list_del_init(&bo
->io_reserve_lru
);
113 ttm_bo_unmap_virtual_locked(bo
);
119 int ttm_mem_io_reserve(struct ttm_bo_device
*bdev
,
120 struct ttm_mem_reg
*mem
)
122 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
125 if (!bdev
->driver
->io_mem_reserve
)
127 if (likely(man
->io_reserve_fastpath
))
128 return bdev
->driver
->io_mem_reserve(bdev
, mem
);
130 if (bdev
->driver
->io_mem_reserve
&&
131 mem
->bus
.io_reserved_count
++ == 0) {
133 ret
= bdev
->driver
->io_mem_reserve(bdev
, mem
);
134 if (ret
== -EAGAIN
) {
135 ret
= ttm_mem_io_evict(man
);
142 EXPORT_SYMBOL(ttm_mem_io_reserve
);
144 void ttm_mem_io_free(struct ttm_bo_device
*bdev
,
145 struct ttm_mem_reg
*mem
)
147 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
149 if (likely(man
->io_reserve_fastpath
))
152 if (bdev
->driver
->io_mem_reserve
&&
153 --mem
->bus
.io_reserved_count
== 0 &&
154 bdev
->driver
->io_mem_free
)
155 bdev
->driver
->io_mem_free(bdev
, mem
);
158 EXPORT_SYMBOL(ttm_mem_io_free
);
160 int ttm_mem_io_reserve_vm(struct ttm_buffer_object
*bo
)
162 struct ttm_mem_reg
*mem
= &bo
->mem
;
165 if (!mem
->bus
.io_reserved_vm
) {
166 struct ttm_mem_type_manager
*man
=
167 &bo
->bdev
->man
[mem
->mem_type
];
169 ret
= ttm_mem_io_reserve(bo
->bdev
, mem
);
170 if (unlikely(ret
!= 0))
172 mem
->bus
.io_reserved_vm
= true;
173 if (man
->use_io_reserve_lru
)
174 list_add_tail(&bo
->io_reserve_lru
,
175 &man
->io_reserve_lru
);
180 void ttm_mem_io_free_vm(struct ttm_buffer_object
*bo
)
182 struct ttm_mem_reg
*mem
= &bo
->mem
;
184 if (mem
->bus
.io_reserved_vm
) {
185 mem
->bus
.io_reserved_vm
= false;
186 list_del_init(&bo
->io_reserve_lru
);
187 ttm_mem_io_free(bo
->bdev
, mem
);
191 static int ttm_mem_reg_ioremap(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
,
194 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
199 (void) ttm_mem_io_lock(man
, false);
200 ret
= ttm_mem_io_reserve(bdev
, mem
);
201 ttm_mem_io_unlock(man
);
202 if (ret
|| !mem
->bus
.is_iomem
)
206 addr
= mem
->bus
.addr
;
208 if (mem
->placement
& TTM_PL_FLAG_WC
)
209 addr
= ioremap_wc(mem
->bus
.base
+ mem
->bus
.offset
, mem
->bus
.size
);
211 addr
= ioremap_nocache(mem
->bus
.base
+ mem
->bus
.offset
, mem
->bus
.size
);
213 (void) ttm_mem_io_lock(man
, false);
214 ttm_mem_io_free(bdev
, mem
);
215 ttm_mem_io_unlock(man
);
223 static void ttm_mem_reg_iounmap(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
,
226 struct ttm_mem_type_manager
*man
;
228 man
= &bdev
->man
[mem
->mem_type
];
230 if (virtual && mem
->bus
.addr
== NULL
)
232 (void) ttm_mem_io_lock(man
, false);
233 ttm_mem_io_free(bdev
, mem
);
234 ttm_mem_io_unlock(man
);
237 static int ttm_copy_io_page(void *dst
, void *src
, unsigned long page
)
240 (uint32_t *) ((unsigned long)dst
+ (page
<< PAGE_SHIFT
));
242 (uint32_t *) ((unsigned long)src
+ (page
<< PAGE_SHIFT
));
245 for (i
= 0; i
< PAGE_SIZE
/ sizeof(uint32_t); ++i
)
246 iowrite32(ioread32(srcP
++), dstP
++);
250 static int ttm_copy_io_ttm_page(struct ttm_tt
*ttm
, void *src
,
254 struct page
*d
= ttm
->pages
[page
];
260 src
= (void *)((unsigned long)src
+ (page
<< PAGE_SHIFT
));
263 dst
= kmap_atomic_prot(d
, prot
);
265 if (pgprot_val(prot
) != pgprot_val(PAGE_KERNEL
))
266 dst
= vmap(&d
, 1, 0, prot
);
273 memcpy_fromio(dst
, src
, PAGE_SIZE
);
278 if (pgprot_val(prot
) != pgprot_val(PAGE_KERNEL
))
287 static int ttm_copy_ttm_io_page(struct ttm_tt
*ttm
, void *dst
,
291 struct page
*s
= ttm
->pages
[page
];
297 dst
= (void *)((unsigned long)dst
+ (page
<< PAGE_SHIFT
));
299 src
= kmap_atomic_prot(s
, prot
);
301 if (pgprot_val(prot
) != pgprot_val(PAGE_KERNEL
))
302 src
= vmap(&s
, 1, 0, prot
);
309 memcpy_toio(dst
, src
, PAGE_SIZE
);
314 if (pgprot_val(prot
) != pgprot_val(PAGE_KERNEL
))
323 int ttm_bo_move_memcpy(struct ttm_buffer_object
*bo
,
324 bool evict
, bool no_wait_gpu
,
325 struct ttm_mem_reg
*new_mem
)
327 struct ttm_bo_device
*bdev
= bo
->bdev
;
328 struct ttm_mem_type_manager
*man
= &bdev
->man
[new_mem
->mem_type
];
329 struct ttm_tt
*ttm
= bo
->ttm
;
330 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
331 struct ttm_mem_reg old_copy
= *old_mem
;
337 unsigned long add
= 0;
340 ret
= ttm_mem_reg_ioremap(bdev
, old_mem
, &old_iomap
);
343 ret
= ttm_mem_reg_ioremap(bdev
, new_mem
, &new_iomap
);
348 * Single TTM move. NOP.
350 if (old_iomap
== NULL
&& new_iomap
== NULL
)
354 * Don't move nonexistent data. Clear destination instead.
356 if (old_iomap
== NULL
&&
357 (ttm
== NULL
|| (ttm
->state
== tt_unpopulated
&&
358 !(ttm
->page_flags
& TTM_PAGE_FLAG_SWAPPED
)))) {
359 memset_io(new_iomap
, 0, new_mem
->num_pages
*PAGE_SIZE
);
364 * TTM might be null for moves within the same region.
366 if (ttm
&& ttm
->state
== tt_unpopulated
) {
367 ret
= ttm
->bdev
->driver
->ttm_tt_populate(ttm
);
375 if ((old_mem
->mem_type
== new_mem
->mem_type
) &&
376 (new_mem
->start
< old_mem
->start
+ old_mem
->size
)) {
378 add
= new_mem
->num_pages
- 1;
381 for (i
= 0; i
< new_mem
->num_pages
; ++i
) {
382 page
= i
* dir
+ add
;
383 if (old_iomap
== NULL
) {
384 pgprot_t prot
= ttm_io_prot(old_mem
->placement
,
386 ret
= ttm_copy_ttm_io_page(ttm
, new_iomap
, page
,
388 } else if (new_iomap
== NULL
) {
389 pgprot_t prot
= ttm_io_prot(new_mem
->placement
,
391 ret
= ttm_copy_io_ttm_page(ttm
, old_iomap
, page
,
394 ret
= ttm_copy_io_page(new_iomap
, old_iomap
, page
);
402 new_mem
->mm_node
= NULL
;
404 if ((man
->flags
& TTM_MEMTYPE_FLAG_FIXED
) && (ttm
!= NULL
)) {
411 ttm_mem_reg_iounmap(bdev
, old_mem
, new_iomap
);
413 ttm_mem_reg_iounmap(bdev
, &old_copy
, old_iomap
);
416 * On error, keep the mm node!
419 ttm_bo_mem_put(bo
, &old_copy
);
422 EXPORT_SYMBOL(ttm_bo_move_memcpy
);
424 static void ttm_transfered_destroy(struct ttm_buffer_object
*bo
)
430 * ttm_buffer_object_transfer
432 * @bo: A pointer to a struct ttm_buffer_object.
433 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
434 * holding the data of @bo with the old placement.
436 * This is a utility function that may be called after an accelerated move
437 * has been scheduled. A new buffer object is created as a placeholder for
438 * the old data while it's being copied. When that buffer object is idle,
439 * it can be destroyed, releasing the space of the old placement.
444 static int ttm_buffer_object_transfer(struct ttm_buffer_object
*bo
,
445 struct ttm_buffer_object
**new_obj
)
447 struct ttm_buffer_object
*fbo
;
450 fbo
= kmalloc(sizeof(*fbo
), GFP_KERNEL
);
457 * Fix up members that we shouldn't copy directly:
458 * TODO: Explicit member copy would probably be better here.
461 INIT_LIST_HEAD(&fbo
->ddestroy
);
462 INIT_LIST_HEAD(&fbo
->lru
);
463 INIT_LIST_HEAD(&fbo
->swap
);
464 INIT_LIST_HEAD(&fbo
->io_reserve_lru
);
465 drm_vma_node_reset(&fbo
->vma_node
);
466 atomic_set(&fbo
->cpu_writers
, 0);
468 kref_init(&fbo
->list_kref
);
469 kref_init(&fbo
->kref
);
470 fbo
->destroy
= &ttm_transfered_destroy
;
472 fbo
->resv
= &fbo
->ttm_resv
;
473 reservation_object_init(fbo
->resv
);
474 ret
= ww_mutex_trylock(&fbo
->resv
->lock
);
481 pgprot_t
ttm_io_prot(uint32_t caching_flags
, pgprot_t tmp
)
483 /* Cached mappings need no adjustment */
484 if (caching_flags
& TTM_PL_FLAG_CACHED
)
487 #if defined(__i386__) || defined(__x86_64__)
488 if (caching_flags
& TTM_PL_FLAG_WC
)
489 tmp
= pgprot_writecombine(tmp
);
490 else if (boot_cpu_data
.x86
> 3)
491 tmp
= pgprot_noncached(tmp
);
493 #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
495 if (caching_flags
& TTM_PL_FLAG_WC
)
496 tmp
= pgprot_writecombine(tmp
);
498 tmp
= pgprot_noncached(tmp
);
500 #if defined(__sparc__) || defined(__mips__)
501 tmp
= pgprot_noncached(tmp
);
505 EXPORT_SYMBOL(ttm_io_prot
);
507 static int ttm_bo_ioremap(struct ttm_buffer_object
*bo
,
508 unsigned long offset
,
510 struct ttm_bo_kmap_obj
*map
)
512 struct ttm_mem_reg
*mem
= &bo
->mem
;
514 if (bo
->mem
.bus
.addr
) {
515 map
->bo_kmap_type
= ttm_bo_map_premapped
;
516 map
->virtual = (void *)(((u8
*)bo
->mem
.bus
.addr
) + offset
);
518 map
->bo_kmap_type
= ttm_bo_map_iomap
;
519 if (mem
->placement
& TTM_PL_FLAG_WC
)
520 map
->virtual = ioremap_wc(bo
->mem
.bus
.base
+ bo
->mem
.bus
.offset
+ offset
,
523 map
->virtual = ioremap_nocache(bo
->mem
.bus
.base
+ bo
->mem
.bus
.offset
+ offset
,
526 return (!map
->virtual) ? -ENOMEM
: 0;
529 static int ttm_bo_kmap_ttm(struct ttm_buffer_object
*bo
,
530 unsigned long start_page
,
531 unsigned long num_pages
,
532 struct ttm_bo_kmap_obj
*map
)
534 struct ttm_mem_reg
*mem
= &bo
->mem
; pgprot_t prot
;
535 struct ttm_tt
*ttm
= bo
->ttm
;
540 if (ttm
->state
== tt_unpopulated
) {
541 ret
= ttm
->bdev
->driver
->ttm_tt_populate(ttm
);
546 if (num_pages
== 1 && (mem
->placement
& TTM_PL_FLAG_CACHED
)) {
548 * We're mapping a single page, and the desired
549 * page protection is consistent with the bo.
552 map
->bo_kmap_type
= ttm_bo_map_kmap
;
553 map
->page
= ttm
->pages
[start_page
];
554 map
->virtual = kmap(map
->page
);
557 * We need to use vmap to get the desired page protection
558 * or to make the buffer object look contiguous.
560 prot
= ttm_io_prot(mem
->placement
, PAGE_KERNEL
);
561 map
->bo_kmap_type
= ttm_bo_map_vmap
;
562 map
->virtual = vmap(ttm
->pages
+ start_page
, num_pages
,
565 return (!map
->virtual) ? -ENOMEM
: 0;
568 int ttm_bo_kmap(struct ttm_buffer_object
*bo
,
569 unsigned long start_page
, unsigned long num_pages
,
570 struct ttm_bo_kmap_obj
*map
)
572 struct ttm_mem_type_manager
*man
=
573 &bo
->bdev
->man
[bo
->mem
.mem_type
];
574 unsigned long offset
, size
;
577 BUG_ON(!list_empty(&bo
->swap
));
580 if (num_pages
> bo
->num_pages
)
582 if (start_page
> bo
->num_pages
)
585 if (num_pages
> 1 && !capable(CAP_SYS_ADMIN
))
588 (void) ttm_mem_io_lock(man
, false);
589 ret
= ttm_mem_io_reserve(bo
->bdev
, &bo
->mem
);
590 ttm_mem_io_unlock(man
);
593 if (!bo
->mem
.bus
.is_iomem
) {
594 return ttm_bo_kmap_ttm(bo
, start_page
, num_pages
, map
);
596 offset
= start_page
<< PAGE_SHIFT
;
597 size
= num_pages
<< PAGE_SHIFT
;
598 return ttm_bo_ioremap(bo
, offset
, size
, map
);
601 EXPORT_SYMBOL(ttm_bo_kmap
);
603 void ttm_bo_kunmap(struct ttm_bo_kmap_obj
*map
)
605 struct ttm_buffer_object
*bo
= map
->bo
;
606 struct ttm_mem_type_manager
*man
=
607 &bo
->bdev
->man
[bo
->mem
.mem_type
];
611 switch (map
->bo_kmap_type
) {
612 case ttm_bo_map_iomap
:
613 iounmap(map
->virtual);
615 case ttm_bo_map_vmap
:
616 vunmap(map
->virtual);
618 case ttm_bo_map_kmap
:
621 case ttm_bo_map_premapped
:
626 (void) ttm_mem_io_lock(man
, false);
627 ttm_mem_io_free(map
->bo
->bdev
, &map
->bo
->mem
);
628 ttm_mem_io_unlock(man
);
632 EXPORT_SYMBOL(ttm_bo_kunmap
);
634 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object
*bo
,
638 struct ttm_mem_reg
*new_mem
)
640 struct ttm_bo_device
*bdev
= bo
->bdev
;
641 struct ttm_mem_type_manager
*man
= &bdev
->man
[new_mem
->mem_type
];
642 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
644 struct ttm_buffer_object
*ghost_obj
;
646 reservation_object_add_excl_fence(bo
->resv
, fence
);
648 ret
= ttm_bo_wait(bo
, false, false, false);
652 if ((man
->flags
& TTM_MEMTYPE_FLAG_FIXED
) &&
654 ttm_tt_unbind(bo
->ttm
);
655 ttm_tt_destroy(bo
->ttm
);
658 ttm_bo_free_old_node(bo
);
661 * This should help pipeline ordinary buffer moves.
663 * Hang old buffer memory on a new buffer object,
664 * and leave it to be released when the GPU
665 * operation has completed.
668 set_bit(TTM_BO_PRIV_FLAG_MOVING
, &bo
->priv_flags
);
670 ret
= ttm_buffer_object_transfer(bo
, &ghost_obj
);
674 reservation_object_add_excl_fence(ghost_obj
->resv
, fence
);
677 * If we're not moving to fixed memory, the TTM object
678 * needs to stay alive. Otherwhise hang it on the ghost
679 * bo to be unbound and destroyed.
682 if (!(man
->flags
& TTM_MEMTYPE_FLAG_FIXED
))
683 ghost_obj
->ttm
= NULL
;
687 ttm_bo_unreserve(ghost_obj
);
688 ttm_bo_unref(&ghost_obj
);
692 new_mem
->mm_node
= NULL
;
696 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup
);