1 /**************************************************************************
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 #include <drm/ttm/ttm_bo_driver.h>
32 #include <drm/ttm/ttm_placement.h>
33 #include <drm/drm_vma_manager.h>
35 #include <linux/highmem.h>
36 #include <linux/wait.h>
37 #include <linux/slab.h>
38 #include <linux/vmalloc.h>
39 #include <linux/module.h>
41 void ttm_bo_free_old_node(struct ttm_buffer_object
*bo
)
43 ttm_bo_mem_put(bo
, &bo
->mem
);
46 int ttm_bo_move_ttm(struct ttm_buffer_object
*bo
,
48 bool no_wait_gpu
, struct ttm_mem_reg
*new_mem
)
50 struct ttm_tt
*ttm
= bo
->ttm
;
51 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
54 if (old_mem
->mem_type
!= TTM_PL_SYSTEM
) {
56 ttm_bo_free_old_node(bo
);
57 ttm_flag_masked(&old_mem
->placement
, TTM_PL_FLAG_SYSTEM
,
59 old_mem
->mem_type
= TTM_PL_SYSTEM
;
62 ret
= ttm_tt_set_placement_caching(ttm
, new_mem
->placement
);
63 if (unlikely(ret
!= 0))
66 if (new_mem
->mem_type
!= TTM_PL_SYSTEM
) {
67 ret
= ttm_tt_bind(ttm
, new_mem
);
68 if (unlikely(ret
!= 0))
73 new_mem
->mm_node
= NULL
;
77 EXPORT_SYMBOL(ttm_bo_move_ttm
);
79 int ttm_mem_io_lock(struct ttm_mem_type_manager
*man
, bool interruptible
)
81 if (likely(man
->io_reserve_fastpath
))
85 return mutex_lock_interruptible(&man
->io_reserve_mutex
);
87 mutex_lock(&man
->io_reserve_mutex
);
90 EXPORT_SYMBOL(ttm_mem_io_lock
);
92 void ttm_mem_io_unlock(struct ttm_mem_type_manager
*man
)
94 if (likely(man
->io_reserve_fastpath
))
97 mutex_unlock(&man
->io_reserve_mutex
);
99 EXPORT_SYMBOL(ttm_mem_io_unlock
);
101 static int ttm_mem_io_evict(struct ttm_mem_type_manager
*man
)
103 struct ttm_buffer_object
*bo
;
105 if (!man
->use_io_reserve_lru
|| list_empty(&man
->io_reserve_lru
))
108 bo
= list_first_entry(&man
->io_reserve_lru
,
109 struct ttm_buffer_object
,
111 list_del_init(&bo
->io_reserve_lru
);
112 ttm_bo_unmap_virtual_locked(bo
);
118 int ttm_mem_io_reserve(struct ttm_bo_device
*bdev
,
119 struct ttm_mem_reg
*mem
)
121 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
124 if (!bdev
->driver
->io_mem_reserve
)
126 if (likely(man
->io_reserve_fastpath
))
127 return bdev
->driver
->io_mem_reserve(bdev
, mem
);
129 if (bdev
->driver
->io_mem_reserve
&&
130 mem
->bus
.io_reserved_count
++ == 0) {
132 ret
= bdev
->driver
->io_mem_reserve(bdev
, mem
);
133 if (ret
== -EAGAIN
) {
134 ret
= ttm_mem_io_evict(man
);
141 EXPORT_SYMBOL(ttm_mem_io_reserve
);
143 void ttm_mem_io_free(struct ttm_bo_device
*bdev
,
144 struct ttm_mem_reg
*mem
)
146 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
148 if (likely(man
->io_reserve_fastpath
))
151 if (bdev
->driver
->io_mem_reserve
&&
152 --mem
->bus
.io_reserved_count
== 0 &&
153 bdev
->driver
->io_mem_free
)
154 bdev
->driver
->io_mem_free(bdev
, mem
);
157 EXPORT_SYMBOL(ttm_mem_io_free
);
159 int ttm_mem_io_reserve_vm(struct ttm_buffer_object
*bo
)
161 struct ttm_mem_reg
*mem
= &bo
->mem
;
164 if (!mem
->bus
.io_reserved_vm
) {
165 struct ttm_mem_type_manager
*man
=
166 &bo
->bdev
->man
[mem
->mem_type
];
168 ret
= ttm_mem_io_reserve(bo
->bdev
, mem
);
169 if (unlikely(ret
!= 0))
171 mem
->bus
.io_reserved_vm
= true;
172 if (man
->use_io_reserve_lru
)
173 list_add_tail(&bo
->io_reserve_lru
,
174 &man
->io_reserve_lru
);
179 void ttm_mem_io_free_vm(struct ttm_buffer_object
*bo
)
181 struct ttm_mem_reg
*mem
= &bo
->mem
;
183 if (mem
->bus
.io_reserved_vm
) {
184 mem
->bus
.io_reserved_vm
= false;
185 list_del_init(&bo
->io_reserve_lru
);
186 ttm_mem_io_free(bo
->bdev
, mem
);
190 int ttm_mem_reg_ioremap(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
,
193 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
198 (void) ttm_mem_io_lock(man
, false);
199 ret
= ttm_mem_io_reserve(bdev
, mem
);
200 ttm_mem_io_unlock(man
);
201 if (ret
|| !mem
->bus
.is_iomem
)
205 addr
= mem
->bus
.addr
;
207 if (mem
->placement
& TTM_PL_FLAG_WC
)
208 addr
= ioremap_wc(mem
->bus
.base
+ mem
->bus
.offset
, mem
->bus
.size
);
210 addr
= ioremap_nocache(mem
->bus
.base
+ mem
->bus
.offset
, mem
->bus
.size
);
212 (void) ttm_mem_io_lock(man
, false);
213 ttm_mem_io_free(bdev
, mem
);
214 ttm_mem_io_unlock(man
);
222 void ttm_mem_reg_iounmap(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
,
225 struct ttm_mem_type_manager
*man
;
227 man
= &bdev
->man
[mem
->mem_type
];
229 if (virtual && mem
->bus
.addr
== NULL
)
231 (void) ttm_mem_io_lock(man
, false);
232 ttm_mem_io_free(bdev
, mem
);
233 ttm_mem_io_unlock(man
);
236 static int ttm_copy_io_page(void *dst
, void *src
, unsigned long page
)
239 (uint32_t *) ((unsigned long)dst
+ (page
<< PAGE_SHIFT
));
241 (uint32_t *) ((unsigned long)src
+ (page
<< PAGE_SHIFT
));
244 for (i
= 0; i
< PAGE_SIZE
/ sizeof(uint32_t); ++i
)
245 iowrite32(ioread32(srcP
++), dstP
++);
249 static int ttm_copy_io_ttm_page(struct ttm_tt
*ttm
, void *src
,
253 struct page
*d
= ttm
->pages
[page
];
259 src
= (void *)((unsigned long)src
+ (page
<< PAGE_SHIFT
));
262 dst
= kmap_atomic_prot(d
, prot
);
264 if (pgprot_val(prot
) != pgprot_val(PAGE_KERNEL
))
265 dst
= vmap(&d
, 1, 0, prot
);
272 memcpy_fromio(dst
, src
, PAGE_SIZE
);
277 if (pgprot_val(prot
) != pgprot_val(PAGE_KERNEL
))
286 static int ttm_copy_ttm_io_page(struct ttm_tt
*ttm
, void *dst
,
290 struct page
*s
= ttm
->pages
[page
];
296 dst
= (void *)((unsigned long)dst
+ (page
<< PAGE_SHIFT
));
298 src
= kmap_atomic_prot(s
, prot
);
300 if (pgprot_val(prot
) != pgprot_val(PAGE_KERNEL
))
301 src
= vmap(&s
, 1, 0, prot
);
308 memcpy_toio(dst
, src
, PAGE_SIZE
);
313 if (pgprot_val(prot
) != pgprot_val(PAGE_KERNEL
))
322 int ttm_bo_move_memcpy(struct ttm_buffer_object
*bo
,
323 bool evict
, bool no_wait_gpu
,
324 struct ttm_mem_reg
*new_mem
)
326 struct ttm_bo_device
*bdev
= bo
->bdev
;
327 struct ttm_mem_type_manager
*man
= &bdev
->man
[new_mem
->mem_type
];
328 struct ttm_tt
*ttm
= bo
->ttm
;
329 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
330 struct ttm_mem_reg old_copy
= *old_mem
;
336 unsigned long add
= 0;
339 ret
= ttm_mem_reg_ioremap(bdev
, old_mem
, &old_iomap
);
342 ret
= ttm_mem_reg_ioremap(bdev
, new_mem
, &new_iomap
);
347 * Single TTM move. NOP.
349 if (old_iomap
== NULL
&& new_iomap
== NULL
)
353 * Move nonexistent data. NOP.
355 if (old_iomap
== NULL
&& ttm
== NULL
)
359 * TTM might be null for moves within the same region.
361 if (ttm
&& ttm
->state
== tt_unpopulated
) {
362 ret
= ttm
->bdev
->driver
->ttm_tt_populate(ttm
);
370 if ((old_mem
->mem_type
== new_mem
->mem_type
) &&
371 (new_mem
->start
< old_mem
->start
+ old_mem
->size
)) {
373 add
= new_mem
->num_pages
- 1;
376 for (i
= 0; i
< new_mem
->num_pages
; ++i
) {
377 page
= i
* dir
+ add
;
378 if (old_iomap
== NULL
) {
379 pgprot_t prot
= ttm_io_prot(old_mem
->placement
,
381 ret
= ttm_copy_ttm_io_page(ttm
, new_iomap
, page
,
383 } else if (new_iomap
== NULL
) {
384 pgprot_t prot
= ttm_io_prot(new_mem
->placement
,
386 ret
= ttm_copy_io_ttm_page(ttm
, old_iomap
, page
,
389 ret
= ttm_copy_io_page(new_iomap
, old_iomap
, page
);
397 new_mem
->mm_node
= NULL
;
399 if ((man
->flags
& TTM_MEMTYPE_FLAG_FIXED
) && (ttm
!= NULL
)) {
406 ttm_mem_reg_iounmap(bdev
, old_mem
, new_iomap
);
408 ttm_mem_reg_iounmap(bdev
, &old_copy
, old_iomap
);
411 * On error, keep the mm node!
414 ttm_bo_mem_put(bo
, &old_copy
);
417 EXPORT_SYMBOL(ttm_bo_move_memcpy
);
419 static void ttm_transfered_destroy(struct ttm_buffer_object
*bo
)
425 * ttm_buffer_object_transfer
427 * @bo: A pointer to a struct ttm_buffer_object.
428 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
429 * holding the data of @bo with the old placement.
431 * This is a utility function that may be called after an accelerated move
432 * has been scheduled. A new buffer object is created as a placeholder for
433 * the old data while it's being copied. When that buffer object is idle,
434 * it can be destroyed, releasing the space of the old placement.
439 static int ttm_buffer_object_transfer(struct ttm_buffer_object
*bo
,
440 struct ttm_buffer_object
**new_obj
)
442 struct ttm_buffer_object
*fbo
;
443 struct ttm_bo_device
*bdev
= bo
->bdev
;
444 struct ttm_bo_driver
*driver
= bdev
->driver
;
447 fbo
= kmalloc(sizeof(*fbo
), GFP_KERNEL
);
454 * Fix up members that we shouldn't copy directly:
455 * TODO: Explicit member copy would probably be better here.
458 INIT_LIST_HEAD(&fbo
->ddestroy
);
459 INIT_LIST_HEAD(&fbo
->lru
);
460 INIT_LIST_HEAD(&fbo
->swap
);
461 INIT_LIST_HEAD(&fbo
->io_reserve_lru
);
462 drm_vma_node_reset(&fbo
->vma_node
);
463 atomic_set(&fbo
->cpu_writers
, 0);
465 spin_lock(&bdev
->fence_lock
);
467 fbo
->sync_obj
= driver
->sync_obj_ref(bo
->sync_obj
);
469 fbo
->sync_obj
= NULL
;
470 spin_unlock(&bdev
->fence_lock
);
471 kref_init(&fbo
->list_kref
);
472 kref_init(&fbo
->kref
);
473 fbo
->destroy
= &ttm_transfered_destroy
;
475 fbo
->resv
= &fbo
->ttm_resv
;
476 reservation_object_init(fbo
->resv
);
477 ret
= ww_mutex_trylock(&fbo
->resv
->lock
);
484 pgprot_t
ttm_io_prot(uint32_t caching_flags
, pgprot_t tmp
)
486 #if defined(__i386__) || defined(__x86_64__)
487 if (caching_flags
& TTM_PL_FLAG_WC
)
488 tmp
= pgprot_writecombine(tmp
);
489 else if (boot_cpu_data
.x86
> 3)
490 tmp
= pgprot_noncached(tmp
);
492 #elif defined(__powerpc__)
493 if (!(caching_flags
& TTM_PL_FLAG_CACHED
)) {
494 pgprot_val(tmp
) |= _PAGE_NO_CACHE
;
495 if (caching_flags
& TTM_PL_FLAG_UNCACHED
)
496 pgprot_val(tmp
) |= _PAGE_GUARDED
;
499 #if defined(__ia64__)
500 if (caching_flags
& TTM_PL_FLAG_WC
)
501 tmp
= pgprot_writecombine(tmp
);
503 tmp
= pgprot_noncached(tmp
);
505 #if defined(__sparc__) || defined(__mips__)
506 if (!(caching_flags
& TTM_PL_FLAG_CACHED
))
507 tmp
= pgprot_noncached(tmp
);
511 EXPORT_SYMBOL(ttm_io_prot
);
513 static int ttm_bo_ioremap(struct ttm_buffer_object
*bo
,
514 unsigned long offset
,
516 struct ttm_bo_kmap_obj
*map
)
518 struct ttm_mem_reg
*mem
= &bo
->mem
;
520 if (bo
->mem
.bus
.addr
) {
521 map
->bo_kmap_type
= ttm_bo_map_premapped
;
522 map
->virtual = (void *)(((u8
*)bo
->mem
.bus
.addr
) + offset
);
524 map
->bo_kmap_type
= ttm_bo_map_iomap
;
525 if (mem
->placement
& TTM_PL_FLAG_WC
)
526 map
->virtual = ioremap_wc(bo
->mem
.bus
.base
+ bo
->mem
.bus
.offset
+ offset
,
529 map
->virtual = ioremap_nocache(bo
->mem
.bus
.base
+ bo
->mem
.bus
.offset
+ offset
,
532 return (!map
->virtual) ? -ENOMEM
: 0;
535 static int ttm_bo_kmap_ttm(struct ttm_buffer_object
*bo
,
536 unsigned long start_page
,
537 unsigned long num_pages
,
538 struct ttm_bo_kmap_obj
*map
)
540 struct ttm_mem_reg
*mem
= &bo
->mem
; pgprot_t prot
;
541 struct ttm_tt
*ttm
= bo
->ttm
;
546 if (ttm
->state
== tt_unpopulated
) {
547 ret
= ttm
->bdev
->driver
->ttm_tt_populate(ttm
);
552 if (num_pages
== 1 && (mem
->placement
& TTM_PL_FLAG_CACHED
)) {
554 * We're mapping a single page, and the desired
555 * page protection is consistent with the bo.
558 map
->bo_kmap_type
= ttm_bo_map_kmap
;
559 map
->page
= ttm
->pages
[start_page
];
560 map
->virtual = kmap(map
->page
);
563 * We need to use vmap to get the desired page protection
564 * or to make the buffer object look contiguous.
566 prot
= (mem
->placement
& TTM_PL_FLAG_CACHED
) ?
568 ttm_io_prot(mem
->placement
, PAGE_KERNEL
);
569 map
->bo_kmap_type
= ttm_bo_map_vmap
;
570 map
->virtual = vmap(ttm
->pages
+ start_page
, num_pages
,
573 return (!map
->virtual) ? -ENOMEM
: 0;
576 int ttm_bo_kmap(struct ttm_buffer_object
*bo
,
577 unsigned long start_page
, unsigned long num_pages
,
578 struct ttm_bo_kmap_obj
*map
)
580 struct ttm_mem_type_manager
*man
=
581 &bo
->bdev
->man
[bo
->mem
.mem_type
];
582 unsigned long offset
, size
;
585 BUG_ON(!list_empty(&bo
->swap
));
588 if (num_pages
> bo
->num_pages
)
590 if (start_page
> bo
->num_pages
)
593 if (num_pages
> 1 && !DRM_SUSER(DRM_CURPROC
))
596 (void) ttm_mem_io_lock(man
, false);
597 ret
= ttm_mem_io_reserve(bo
->bdev
, &bo
->mem
);
598 ttm_mem_io_unlock(man
);
601 if (!bo
->mem
.bus
.is_iomem
) {
602 return ttm_bo_kmap_ttm(bo
, start_page
, num_pages
, map
);
604 offset
= start_page
<< PAGE_SHIFT
;
605 size
= num_pages
<< PAGE_SHIFT
;
606 return ttm_bo_ioremap(bo
, offset
, size
, map
);
609 EXPORT_SYMBOL(ttm_bo_kmap
);
611 void ttm_bo_kunmap(struct ttm_bo_kmap_obj
*map
)
613 struct ttm_buffer_object
*bo
= map
->bo
;
614 struct ttm_mem_type_manager
*man
=
615 &bo
->bdev
->man
[bo
->mem
.mem_type
];
619 switch (map
->bo_kmap_type
) {
620 case ttm_bo_map_iomap
:
621 iounmap(map
->virtual);
623 case ttm_bo_map_vmap
:
624 vunmap(map
->virtual);
626 case ttm_bo_map_kmap
:
629 case ttm_bo_map_premapped
:
634 (void) ttm_mem_io_lock(man
, false);
635 ttm_mem_io_free(map
->bo
->bdev
, &map
->bo
->mem
);
636 ttm_mem_io_unlock(man
);
640 EXPORT_SYMBOL(ttm_bo_kunmap
);
642 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object
*bo
,
646 struct ttm_mem_reg
*new_mem
)
648 struct ttm_bo_device
*bdev
= bo
->bdev
;
649 struct ttm_bo_driver
*driver
= bdev
->driver
;
650 struct ttm_mem_type_manager
*man
= &bdev
->man
[new_mem
->mem_type
];
651 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
653 struct ttm_buffer_object
*ghost_obj
;
654 void *tmp_obj
= NULL
;
656 spin_lock(&bdev
->fence_lock
);
658 tmp_obj
= bo
->sync_obj
;
661 bo
->sync_obj
= driver
->sync_obj_ref(sync_obj
);
663 ret
= ttm_bo_wait(bo
, false, false, false);
664 spin_unlock(&bdev
->fence_lock
);
666 driver
->sync_obj_unref(&tmp_obj
);
670 if ((man
->flags
& TTM_MEMTYPE_FLAG_FIXED
) &&
672 ttm_tt_unbind(bo
->ttm
);
673 ttm_tt_destroy(bo
->ttm
);
676 ttm_bo_free_old_node(bo
);
679 * This should help pipeline ordinary buffer moves.
681 * Hang old buffer memory on a new buffer object,
682 * and leave it to be released when the GPU
683 * operation has completed.
686 set_bit(TTM_BO_PRIV_FLAG_MOVING
, &bo
->priv_flags
);
687 spin_unlock(&bdev
->fence_lock
);
689 driver
->sync_obj_unref(&tmp_obj
);
691 ret
= ttm_buffer_object_transfer(bo
, &ghost_obj
);
696 * If we're not moving to fixed memory, the TTM object
697 * needs to stay alive. Otherwhise hang it on the ghost
698 * bo to be unbound and destroyed.
701 if (!(man
->flags
& TTM_MEMTYPE_FLAG_FIXED
))
702 ghost_obj
->ttm
= NULL
;
706 ttm_bo_unreserve(ghost_obj
);
707 ttm_bo_unref(&ghost_obj
);
711 new_mem
->mm_node
= NULL
;
715 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup
);