1 /**************************************************************************
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 #include "ttm/ttm_bo_driver.h"
32 #include "ttm/ttm_placement.h"
34 #include <linux/highmem.h>
35 #include <linux/wait.h>
36 #include <linux/slab.h>
37 #include <linux/vmalloc.h>
38 #include <linux/module.h>
40 void ttm_bo_free_old_node(struct ttm_buffer_object
*bo
)
42 ttm_bo_mem_put(bo
, &bo
->mem
);
45 int ttm_bo_move_ttm(struct ttm_buffer_object
*bo
,
46 bool evict
, bool no_wait_reserve
,
47 bool no_wait_gpu
, struct ttm_mem_reg
*new_mem
)
49 struct ttm_tt
*ttm
= bo
->ttm
;
50 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
53 if (old_mem
->mem_type
!= TTM_PL_SYSTEM
) {
55 ttm_bo_free_old_node(bo
);
56 ttm_flag_masked(&old_mem
->placement
, TTM_PL_FLAG_SYSTEM
,
58 old_mem
->mem_type
= TTM_PL_SYSTEM
;
61 ret
= ttm_tt_set_placement_caching(ttm
, new_mem
->placement
);
62 if (unlikely(ret
!= 0))
65 if (new_mem
->mem_type
!= TTM_PL_SYSTEM
) {
66 ret
= ttm_tt_bind(ttm
, new_mem
);
67 if (unlikely(ret
!= 0))
72 new_mem
->mm_node
= NULL
;
76 EXPORT_SYMBOL(ttm_bo_move_ttm
);
78 int ttm_mem_io_lock(struct ttm_mem_type_manager
*man
, bool interruptible
)
80 if (likely(man
->io_reserve_fastpath
))
84 return mutex_lock_interruptible(&man
->io_reserve_mutex
);
86 mutex_lock(&man
->io_reserve_mutex
);
90 void ttm_mem_io_unlock(struct ttm_mem_type_manager
*man
)
92 if (likely(man
->io_reserve_fastpath
))
95 mutex_unlock(&man
->io_reserve_mutex
);
98 static int ttm_mem_io_evict(struct ttm_mem_type_manager
*man
)
100 struct ttm_buffer_object
*bo
;
102 if (!man
->use_io_reserve_lru
|| list_empty(&man
->io_reserve_lru
))
105 bo
= list_first_entry(&man
->io_reserve_lru
,
106 struct ttm_buffer_object
,
108 list_del_init(&bo
->io_reserve_lru
);
109 ttm_bo_unmap_virtual_locked(bo
);
114 static int ttm_mem_io_reserve(struct ttm_bo_device
*bdev
,
115 struct ttm_mem_reg
*mem
)
117 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
120 if (!bdev
->driver
->io_mem_reserve
)
122 if (likely(man
->io_reserve_fastpath
))
123 return bdev
->driver
->io_mem_reserve(bdev
, mem
);
125 if (bdev
->driver
->io_mem_reserve
&&
126 mem
->bus
.io_reserved_count
++ == 0) {
128 ret
= bdev
->driver
->io_mem_reserve(bdev
, mem
);
129 if (ret
== -EAGAIN
) {
130 ret
= ttm_mem_io_evict(man
);
138 static void ttm_mem_io_free(struct ttm_bo_device
*bdev
,
139 struct ttm_mem_reg
*mem
)
141 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
143 if (likely(man
->io_reserve_fastpath
))
146 if (bdev
->driver
->io_mem_reserve
&&
147 --mem
->bus
.io_reserved_count
== 0 &&
148 bdev
->driver
->io_mem_free
)
149 bdev
->driver
->io_mem_free(bdev
, mem
);
153 int ttm_mem_io_reserve_vm(struct ttm_buffer_object
*bo
)
155 struct ttm_mem_reg
*mem
= &bo
->mem
;
158 if (!mem
->bus
.io_reserved_vm
) {
159 struct ttm_mem_type_manager
*man
=
160 &bo
->bdev
->man
[mem
->mem_type
];
162 ret
= ttm_mem_io_reserve(bo
->bdev
, mem
);
163 if (unlikely(ret
!= 0))
165 mem
->bus
.io_reserved_vm
= true;
166 if (man
->use_io_reserve_lru
)
167 list_add_tail(&bo
->io_reserve_lru
,
168 &man
->io_reserve_lru
);
173 void ttm_mem_io_free_vm(struct ttm_buffer_object
*bo
)
175 struct ttm_mem_reg
*mem
= &bo
->mem
;
177 if (mem
->bus
.io_reserved_vm
) {
178 mem
->bus
.io_reserved_vm
= false;
179 list_del_init(&bo
->io_reserve_lru
);
180 ttm_mem_io_free(bo
->bdev
, mem
);
184 int ttm_mem_reg_ioremap(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
,
187 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
192 (void) ttm_mem_io_lock(man
, false);
193 ret
= ttm_mem_io_reserve(bdev
, mem
);
194 ttm_mem_io_unlock(man
);
195 if (ret
|| !mem
->bus
.is_iomem
)
199 addr
= mem
->bus
.addr
;
201 if (mem
->placement
& TTM_PL_FLAG_WC
)
202 addr
= ioremap_wc(mem
->bus
.base
+ mem
->bus
.offset
, mem
->bus
.size
);
204 addr
= ioremap_nocache(mem
->bus
.base
+ mem
->bus
.offset
, mem
->bus
.size
);
206 (void) ttm_mem_io_lock(man
, false);
207 ttm_mem_io_free(bdev
, mem
);
208 ttm_mem_io_unlock(man
);
216 void ttm_mem_reg_iounmap(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
,
219 struct ttm_mem_type_manager
*man
;
221 man
= &bdev
->man
[mem
->mem_type
];
223 if (virtual && mem
->bus
.addr
== NULL
)
225 (void) ttm_mem_io_lock(man
, false);
226 ttm_mem_io_free(bdev
, mem
);
227 ttm_mem_io_unlock(man
);
230 static int ttm_copy_io_page(void *dst
, void *src
, unsigned long page
)
233 (uint32_t *) ((unsigned long)dst
+ (page
<< PAGE_SHIFT
));
235 (uint32_t *) ((unsigned long)src
+ (page
<< PAGE_SHIFT
));
238 for (i
= 0; i
< PAGE_SIZE
/ sizeof(uint32_t); ++i
)
239 iowrite32(ioread32(srcP
++), dstP
++);
243 static int ttm_copy_io_ttm_page(struct ttm_tt
*ttm
, void *src
,
247 struct page
*d
= ttm
->pages
[page
];
253 src
= (void *)((unsigned long)src
+ (page
<< PAGE_SHIFT
));
256 dst
= kmap_atomic_prot(d
, prot
);
258 if (pgprot_val(prot
) != pgprot_val(PAGE_KERNEL
))
259 dst
= vmap(&d
, 1, 0, prot
);
266 memcpy_fromio(dst
, src
, PAGE_SIZE
);
271 if (pgprot_val(prot
) != pgprot_val(PAGE_KERNEL
))
280 static int ttm_copy_ttm_io_page(struct ttm_tt
*ttm
, void *dst
,
284 struct page
*s
= ttm
->pages
[page
];
290 dst
= (void *)((unsigned long)dst
+ (page
<< PAGE_SHIFT
));
292 src
= kmap_atomic_prot(s
, prot
);
294 if (pgprot_val(prot
) != pgprot_val(PAGE_KERNEL
))
295 src
= vmap(&s
, 1, 0, prot
);
302 memcpy_toio(dst
, src
, PAGE_SIZE
);
307 if (pgprot_val(prot
) != pgprot_val(PAGE_KERNEL
))
316 int ttm_bo_move_memcpy(struct ttm_buffer_object
*bo
,
317 bool evict
, bool no_wait_reserve
, bool no_wait_gpu
,
318 struct ttm_mem_reg
*new_mem
)
320 struct ttm_bo_device
*bdev
= bo
->bdev
;
321 struct ttm_mem_type_manager
*man
= &bdev
->man
[new_mem
->mem_type
];
322 struct ttm_tt
*ttm
= bo
->ttm
;
323 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
324 struct ttm_mem_reg old_copy
= *old_mem
;
330 unsigned long add
= 0;
333 ret
= ttm_mem_reg_ioremap(bdev
, old_mem
, &old_iomap
);
336 ret
= ttm_mem_reg_ioremap(bdev
, new_mem
, &new_iomap
);
340 if (old_iomap
== NULL
&& new_iomap
== NULL
)
342 if (old_iomap
== NULL
&& ttm
== NULL
)
345 if (ttm
->state
== tt_unpopulated
) {
346 ret
= ttm
->bdev
->driver
->ttm_tt_populate(ttm
);
354 if ((old_mem
->mem_type
== new_mem
->mem_type
) &&
355 (new_mem
->start
< old_mem
->start
+ old_mem
->size
)) {
357 add
= new_mem
->num_pages
- 1;
360 for (i
= 0; i
< new_mem
->num_pages
; ++i
) {
361 page
= i
* dir
+ add
;
362 if (old_iomap
== NULL
) {
363 pgprot_t prot
= ttm_io_prot(old_mem
->placement
,
365 ret
= ttm_copy_ttm_io_page(ttm
, new_iomap
, page
,
367 } else if (new_iomap
== NULL
) {
368 pgprot_t prot
= ttm_io_prot(new_mem
->placement
,
370 ret
= ttm_copy_io_ttm_page(ttm
, old_iomap
, page
,
373 ret
= ttm_copy_io_page(new_iomap
, old_iomap
, page
);
381 new_mem
->mm_node
= NULL
;
383 if ((man
->flags
& TTM_MEMTYPE_FLAG_FIXED
) && (ttm
!= NULL
)) {
390 ttm_mem_reg_iounmap(bdev
, old_mem
, new_iomap
);
392 ttm_mem_reg_iounmap(bdev
, &old_copy
, old_iomap
);
393 ttm_bo_mem_put(bo
, &old_copy
);
396 EXPORT_SYMBOL(ttm_bo_move_memcpy
);
398 static void ttm_transfered_destroy(struct ttm_buffer_object
*bo
)
404 * ttm_buffer_object_transfer
406 * @bo: A pointer to a struct ttm_buffer_object.
407 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
408 * holding the data of @bo with the old placement.
410 * This is a utility function that may be called after an accelerated move
411 * has been scheduled. A new buffer object is created as a placeholder for
412 * the old data while it's being copied. When that buffer object is idle,
413 * it can be destroyed, releasing the space of the old placement.
418 static int ttm_buffer_object_transfer(struct ttm_buffer_object
*bo
,
419 struct ttm_buffer_object
**new_obj
)
421 struct ttm_buffer_object
*fbo
;
422 struct ttm_bo_device
*bdev
= bo
->bdev
;
423 struct ttm_bo_driver
*driver
= bdev
->driver
;
425 fbo
= kzalloc(sizeof(*fbo
), GFP_KERNEL
);
432 * Fix up members that we shouldn't copy directly:
433 * TODO: Explicit member copy would probably be better here.
436 init_waitqueue_head(&fbo
->event_queue
);
437 INIT_LIST_HEAD(&fbo
->ddestroy
);
438 INIT_LIST_HEAD(&fbo
->lru
);
439 INIT_LIST_HEAD(&fbo
->swap
);
440 INIT_LIST_HEAD(&fbo
->io_reserve_lru
);
442 atomic_set(&fbo
->cpu_writers
, 0);
444 fbo
->sync_obj
= driver
->sync_obj_ref(bo
->sync_obj
);
445 kref_init(&fbo
->list_kref
);
446 kref_init(&fbo
->kref
);
447 fbo
->destroy
= &ttm_transfered_destroy
;
454 pgprot_t
ttm_io_prot(uint32_t caching_flags
, pgprot_t tmp
)
456 #if defined(__i386__) || defined(__x86_64__)
457 if (caching_flags
& TTM_PL_FLAG_WC
)
458 tmp
= pgprot_writecombine(tmp
);
459 else if (boot_cpu_data
.x86
> 3)
460 tmp
= pgprot_noncached(tmp
);
462 #elif defined(__powerpc__)
463 if (!(caching_flags
& TTM_PL_FLAG_CACHED
)) {
464 pgprot_val(tmp
) |= _PAGE_NO_CACHE
;
465 if (caching_flags
& TTM_PL_FLAG_UNCACHED
)
466 pgprot_val(tmp
) |= _PAGE_GUARDED
;
469 #if defined(__ia64__)
470 if (caching_flags
& TTM_PL_FLAG_WC
)
471 tmp
= pgprot_writecombine(tmp
);
473 tmp
= pgprot_noncached(tmp
);
475 #if defined(__sparc__)
476 if (!(caching_flags
& TTM_PL_FLAG_CACHED
))
477 tmp
= pgprot_noncached(tmp
);
481 EXPORT_SYMBOL(ttm_io_prot
);
483 static int ttm_bo_ioremap(struct ttm_buffer_object
*bo
,
484 unsigned long offset
,
486 struct ttm_bo_kmap_obj
*map
)
488 struct ttm_mem_reg
*mem
= &bo
->mem
;
490 if (bo
->mem
.bus
.addr
) {
491 map
->bo_kmap_type
= ttm_bo_map_premapped
;
492 map
->virtual = (void *)(((u8
*)bo
->mem
.bus
.addr
) + offset
);
494 map
->bo_kmap_type
= ttm_bo_map_iomap
;
495 if (mem
->placement
& TTM_PL_FLAG_WC
)
496 map
->virtual = ioremap_wc(bo
->mem
.bus
.base
+ bo
->mem
.bus
.offset
+ offset
,
499 map
->virtual = ioremap_nocache(bo
->mem
.bus
.base
+ bo
->mem
.bus
.offset
+ offset
,
502 return (!map
->virtual) ? -ENOMEM
: 0;
505 static int ttm_bo_kmap_ttm(struct ttm_buffer_object
*bo
,
506 unsigned long start_page
,
507 unsigned long num_pages
,
508 struct ttm_bo_kmap_obj
*map
)
510 struct ttm_mem_reg
*mem
= &bo
->mem
; pgprot_t prot
;
511 struct ttm_tt
*ttm
= bo
->ttm
;
516 if (ttm
->state
== tt_unpopulated
) {
517 ret
= ttm
->bdev
->driver
->ttm_tt_populate(ttm
);
522 if (num_pages
== 1 && (mem
->placement
& TTM_PL_FLAG_CACHED
)) {
524 * We're mapping a single page, and the desired
525 * page protection is consistent with the bo.
528 map
->bo_kmap_type
= ttm_bo_map_kmap
;
529 map
->page
= ttm
->pages
[start_page
];
530 map
->virtual = kmap(map
->page
);
533 * We need to use vmap to get the desired page protection
534 * or to make the buffer object look contiguous.
536 prot
= (mem
->placement
& TTM_PL_FLAG_CACHED
) ?
538 ttm_io_prot(mem
->placement
, PAGE_KERNEL
);
539 map
->bo_kmap_type
= ttm_bo_map_vmap
;
540 map
->virtual = vmap(ttm
->pages
+ start_page
, num_pages
,
543 return (!map
->virtual) ? -ENOMEM
: 0;
546 int ttm_bo_kmap(struct ttm_buffer_object
*bo
,
547 unsigned long start_page
, unsigned long num_pages
,
548 struct ttm_bo_kmap_obj
*map
)
550 struct ttm_mem_type_manager
*man
=
551 &bo
->bdev
->man
[bo
->mem
.mem_type
];
552 unsigned long offset
, size
;
555 BUG_ON(!list_empty(&bo
->swap
));
558 if (num_pages
> bo
->num_pages
)
560 if (start_page
> bo
->num_pages
)
563 if (num_pages
> 1 && !DRM_SUSER(DRM_CURPROC
))
566 (void) ttm_mem_io_lock(man
, false);
567 ret
= ttm_mem_io_reserve(bo
->bdev
, &bo
->mem
);
568 ttm_mem_io_unlock(man
);
571 if (!bo
->mem
.bus
.is_iomem
) {
572 return ttm_bo_kmap_ttm(bo
, start_page
, num_pages
, map
);
574 offset
= start_page
<< PAGE_SHIFT
;
575 size
= num_pages
<< PAGE_SHIFT
;
576 return ttm_bo_ioremap(bo
, offset
, size
, map
);
579 EXPORT_SYMBOL(ttm_bo_kmap
);
581 void ttm_bo_kunmap(struct ttm_bo_kmap_obj
*map
)
583 struct ttm_buffer_object
*bo
= map
->bo
;
584 struct ttm_mem_type_manager
*man
=
585 &bo
->bdev
->man
[bo
->mem
.mem_type
];
589 switch (map
->bo_kmap_type
) {
590 case ttm_bo_map_iomap
:
591 iounmap(map
->virtual);
593 case ttm_bo_map_vmap
:
594 vunmap(map
->virtual);
596 case ttm_bo_map_kmap
:
599 case ttm_bo_map_premapped
:
604 (void) ttm_mem_io_lock(man
, false);
605 ttm_mem_io_free(map
->bo
->bdev
, &map
->bo
->mem
);
606 ttm_mem_io_unlock(man
);
610 EXPORT_SYMBOL(ttm_bo_kunmap
);
612 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object
*bo
,
615 bool evict
, bool no_wait_reserve
,
617 struct ttm_mem_reg
*new_mem
)
619 struct ttm_bo_device
*bdev
= bo
->bdev
;
620 struct ttm_bo_driver
*driver
= bdev
->driver
;
621 struct ttm_mem_type_manager
*man
= &bdev
->man
[new_mem
->mem_type
];
622 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
624 struct ttm_buffer_object
*ghost_obj
;
625 void *tmp_obj
= NULL
;
627 spin_lock(&bdev
->fence_lock
);
629 tmp_obj
= bo
->sync_obj
;
632 bo
->sync_obj
= driver
->sync_obj_ref(sync_obj
);
633 bo
->sync_obj_arg
= sync_obj_arg
;
635 ret
= ttm_bo_wait(bo
, false, false, false);
636 spin_unlock(&bdev
->fence_lock
);
638 driver
->sync_obj_unref(&tmp_obj
);
642 if ((man
->flags
& TTM_MEMTYPE_FLAG_FIXED
) &&
644 ttm_tt_unbind(bo
->ttm
);
645 ttm_tt_destroy(bo
->ttm
);
648 ttm_bo_free_old_node(bo
);
651 * This should help pipeline ordinary buffer moves.
653 * Hang old buffer memory on a new buffer object,
654 * and leave it to be released when the GPU
655 * operation has completed.
658 set_bit(TTM_BO_PRIV_FLAG_MOVING
, &bo
->priv_flags
);
659 spin_unlock(&bdev
->fence_lock
);
661 driver
->sync_obj_unref(&tmp_obj
);
663 ret
= ttm_buffer_object_transfer(bo
, &ghost_obj
);
668 * If we're not moving to fixed memory, the TTM object
669 * needs to stay alive. Otherwhise hang it on the ghost
670 * bo to be unbound and destroyed.
673 if (!(man
->flags
& TTM_MEMTYPE_FLAG_FIXED
))
674 ghost_obj
->ttm
= NULL
;
678 ttm_bo_unreserve(ghost_obj
);
679 ttm_bo_unref(&ghost_obj
);
683 new_mem
->mm_node
= NULL
;
687 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup
);