1 /**************************************************************************
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 #include "ttm/ttm_bo_driver.h"
32 #include "ttm/ttm_placement.h"
34 #include <linux/highmem.h>
35 #include <linux/wait.h>
36 #include <linux/vmalloc.h>
37 #include <linux/module.h>
39 void ttm_bo_free_old_node(struct ttm_buffer_object
*bo
)
41 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
43 if (old_mem
->mm_node
) {
44 spin_lock(&bo
->glob
->lru_lock
);
45 drm_mm_put_block(old_mem
->mm_node
);
46 spin_unlock(&bo
->glob
->lru_lock
);
48 old_mem
->mm_node
= NULL
;
51 int ttm_bo_move_ttm(struct ttm_buffer_object
*bo
,
52 bool evict
, bool no_wait
, struct ttm_mem_reg
*new_mem
)
54 struct ttm_tt
*ttm
= bo
->ttm
;
55 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
56 uint32_t save_flags
= old_mem
->placement
;
59 if (old_mem
->mem_type
!= TTM_PL_SYSTEM
) {
61 ttm_bo_free_old_node(bo
);
62 ttm_flag_masked(&old_mem
->placement
, TTM_PL_FLAG_SYSTEM
,
64 old_mem
->mem_type
= TTM_PL_SYSTEM
;
65 save_flags
= old_mem
->placement
;
68 ret
= ttm_tt_set_placement_caching(ttm
, new_mem
->placement
);
69 if (unlikely(ret
!= 0))
72 if (new_mem
->mem_type
!= TTM_PL_SYSTEM
) {
73 ret
= ttm_tt_bind(ttm
, new_mem
);
74 if (unlikely(ret
!= 0))
79 new_mem
->mm_node
= NULL
;
80 ttm_flag_masked(&save_flags
, new_mem
->placement
, TTM_PL_MASK_MEMTYPE
);
83 EXPORT_SYMBOL(ttm_bo_move_ttm
);
85 int ttm_mem_reg_ioremap(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
,
88 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
89 unsigned long bus_offset
;
90 unsigned long bus_size
;
91 unsigned long bus_base
;
96 ret
= ttm_bo_pci_offset(bdev
, mem
, &bus_base
, &bus_offset
, &bus_size
);
97 if (ret
|| bus_size
== 0)
100 if (!(man
->flags
& TTM_MEMTYPE_FLAG_NEEDS_IOREMAP
))
101 addr
= (void *)(((u8
*) man
->io_addr
) + bus_offset
);
103 if (mem
->placement
& TTM_PL_FLAG_WC
)
104 addr
= ioremap_wc(bus_base
+ bus_offset
, bus_size
);
106 addr
= ioremap_nocache(bus_base
+ bus_offset
, bus_size
);
114 void ttm_mem_reg_iounmap(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
,
117 struct ttm_mem_type_manager
*man
;
119 man
= &bdev
->man
[mem
->mem_type
];
121 if (virtual && (man
->flags
& TTM_MEMTYPE_FLAG_NEEDS_IOREMAP
))
125 static int ttm_copy_io_page(void *dst
, void *src
, unsigned long page
)
128 (uint32_t *) ((unsigned long)dst
+ (page
<< PAGE_SHIFT
));
130 (uint32_t *) ((unsigned long)src
+ (page
<< PAGE_SHIFT
));
133 for (i
= 0; i
< PAGE_SIZE
/ sizeof(uint32_t); ++i
)
134 iowrite32(ioread32(srcP
++), dstP
++);
138 static int ttm_copy_io_ttm_page(struct ttm_tt
*ttm
, void *src
,
142 struct page
*d
= ttm_tt_get_page(ttm
, page
);
148 src
= (void *)((unsigned long)src
+ (page
<< PAGE_SHIFT
));
151 dst
= kmap_atomic_prot(d
, KM_USER0
, prot
);
153 if (pgprot_val(prot
) != pgprot_val(PAGE_KERNEL
))
154 dst
= vmap(&d
, 1, 0, prot
);
161 memcpy_fromio(dst
, src
, PAGE_SIZE
);
164 kunmap_atomic(dst
, KM_USER0
);
166 if (pgprot_val(prot
) != pgprot_val(PAGE_KERNEL
))
175 static int ttm_copy_ttm_io_page(struct ttm_tt
*ttm
, void *dst
,
179 struct page
*s
= ttm_tt_get_page(ttm
, page
);
185 dst
= (void *)((unsigned long)dst
+ (page
<< PAGE_SHIFT
));
187 src
= kmap_atomic_prot(s
, KM_USER0
, prot
);
189 if (pgprot_val(prot
) != pgprot_val(PAGE_KERNEL
))
190 src
= vmap(&s
, 1, 0, prot
);
197 memcpy_toio(dst
, src
, PAGE_SIZE
);
200 kunmap_atomic(src
, KM_USER0
);
202 if (pgprot_val(prot
) != pgprot_val(PAGE_KERNEL
))
211 int ttm_bo_move_memcpy(struct ttm_buffer_object
*bo
,
212 bool evict
, bool no_wait
, struct ttm_mem_reg
*new_mem
)
214 struct ttm_bo_device
*bdev
= bo
->bdev
;
215 struct ttm_mem_type_manager
*man
= &bdev
->man
[new_mem
->mem_type
];
216 struct ttm_tt
*ttm
= bo
->ttm
;
217 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
218 struct ttm_mem_reg old_copy
= *old_mem
;
222 uint32_t save_flags
= old_mem
->placement
;
225 unsigned long add
= 0;
228 ret
= ttm_mem_reg_ioremap(bdev
, old_mem
, &old_iomap
);
231 ret
= ttm_mem_reg_ioremap(bdev
, new_mem
, &new_iomap
);
235 if (old_iomap
== NULL
&& new_iomap
== NULL
)
237 if (old_iomap
== NULL
&& ttm
== NULL
)
243 if ((old_mem
->mem_type
== new_mem
->mem_type
) &&
244 (new_mem
->mm_node
->start
<
245 old_mem
->mm_node
->start
+ old_mem
->mm_node
->size
)) {
247 add
= new_mem
->num_pages
- 1;
250 for (i
= 0; i
< new_mem
->num_pages
; ++i
) {
251 page
= i
* dir
+ add
;
252 if (old_iomap
== NULL
) {
253 pgprot_t prot
= ttm_io_prot(old_mem
->placement
,
255 ret
= ttm_copy_ttm_io_page(ttm
, new_iomap
, page
,
257 } else if (new_iomap
== NULL
) {
258 pgprot_t prot
= ttm_io_prot(new_mem
->placement
,
260 ret
= ttm_copy_io_ttm_page(ttm
, old_iomap
, page
,
263 ret
= ttm_copy_io_page(new_iomap
, old_iomap
, page
);
269 ttm_bo_free_old_node(bo
);
272 new_mem
->mm_node
= NULL
;
273 ttm_flag_masked(&save_flags
, new_mem
->placement
, TTM_PL_MASK_MEMTYPE
);
275 if ((man
->flags
& TTM_MEMTYPE_FLAG_FIXED
) && (ttm
!= NULL
)) {
282 ttm_mem_reg_iounmap(bdev
, new_mem
, new_iomap
);
284 ttm_mem_reg_iounmap(bdev
, &old_copy
, old_iomap
);
287 EXPORT_SYMBOL(ttm_bo_move_memcpy
);
289 static void ttm_transfered_destroy(struct ttm_buffer_object
*bo
)
295 * ttm_buffer_object_transfer
297 * @bo: A pointer to a struct ttm_buffer_object.
298 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
299 * holding the data of @bo with the old placement.
301 * This is a utility function that may be called after an accelerated move
302 * has been scheduled. A new buffer object is created as a placeholder for
303 * the old data while it's being copied. When that buffer object is idle,
304 * it can be destroyed, releasing the space of the old placement.
309 static int ttm_buffer_object_transfer(struct ttm_buffer_object
*bo
,
310 struct ttm_buffer_object
**new_obj
)
312 struct ttm_buffer_object
*fbo
;
313 struct ttm_bo_device
*bdev
= bo
->bdev
;
314 struct ttm_bo_driver
*driver
= bdev
->driver
;
316 fbo
= kzalloc(sizeof(*fbo
), GFP_KERNEL
);
323 * Fix up members that we shouldn't copy directly:
324 * TODO: Explicit member copy would probably be better here.
327 spin_lock_init(&fbo
->lock
);
328 init_waitqueue_head(&fbo
->event_queue
);
329 INIT_LIST_HEAD(&fbo
->ddestroy
);
330 INIT_LIST_HEAD(&fbo
->lru
);
331 INIT_LIST_HEAD(&fbo
->swap
);
334 fbo
->sync_obj
= driver
->sync_obj_ref(bo
->sync_obj
);
335 if (fbo
->mem
.mm_node
)
336 fbo
->mem
.mm_node
->private = (void *)fbo
;
337 kref_init(&fbo
->list_kref
);
338 kref_init(&fbo
->kref
);
339 fbo
->destroy
= &ttm_transfered_destroy
;
345 pgprot_t
ttm_io_prot(uint32_t caching_flags
, pgprot_t tmp
)
347 #if defined(__i386__) || defined(__x86_64__)
348 if (caching_flags
& TTM_PL_FLAG_WC
)
349 tmp
= pgprot_writecombine(tmp
);
350 else if (boot_cpu_data
.x86
> 3)
351 tmp
= pgprot_noncached(tmp
);
353 #elif defined(__powerpc__)
354 if (!(caching_flags
& TTM_PL_FLAG_CACHED
)) {
355 pgprot_val(tmp
) |= _PAGE_NO_CACHE
;
356 if (caching_flags
& TTM_PL_FLAG_UNCACHED
)
357 pgprot_val(tmp
) |= _PAGE_GUARDED
;
360 #if defined(__ia64__)
361 if (caching_flags
& TTM_PL_FLAG_WC
)
362 tmp
= pgprot_writecombine(tmp
);
364 tmp
= pgprot_noncached(tmp
);
366 #if defined(__sparc__)
367 if (!(caching_flags
& TTM_PL_FLAG_CACHED
))
368 tmp
= pgprot_noncached(tmp
);
372 EXPORT_SYMBOL(ttm_io_prot
);
374 static int ttm_bo_ioremap(struct ttm_buffer_object
*bo
,
375 unsigned long bus_base
,
376 unsigned long bus_offset
,
377 unsigned long bus_size
,
378 struct ttm_bo_kmap_obj
*map
)
380 struct ttm_bo_device
*bdev
= bo
->bdev
;
381 struct ttm_mem_reg
*mem
= &bo
->mem
;
382 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
384 if (!(man
->flags
& TTM_MEMTYPE_FLAG_NEEDS_IOREMAP
)) {
385 map
->bo_kmap_type
= ttm_bo_map_premapped
;
386 map
->virtual = (void *)(((u8
*) man
->io_addr
) + bus_offset
);
388 map
->bo_kmap_type
= ttm_bo_map_iomap
;
389 if (mem
->placement
& TTM_PL_FLAG_WC
)
390 map
->virtual = ioremap_wc(bus_base
+ bus_offset
,
393 map
->virtual = ioremap_nocache(bus_base
+ bus_offset
,
396 return (!map
->virtual) ? -ENOMEM
: 0;
399 static int ttm_bo_kmap_ttm(struct ttm_buffer_object
*bo
,
400 unsigned long start_page
,
401 unsigned long num_pages
,
402 struct ttm_bo_kmap_obj
*map
)
404 struct ttm_mem_reg
*mem
= &bo
->mem
; pgprot_t prot
;
405 struct ttm_tt
*ttm
= bo
->ttm
;
410 if (num_pages
== 1 && (mem
->placement
& TTM_PL_FLAG_CACHED
)) {
412 * We're mapping a single page, and the desired
413 * page protection is consistent with the bo.
416 map
->bo_kmap_type
= ttm_bo_map_kmap
;
417 map
->page
= ttm_tt_get_page(ttm
, start_page
);
418 map
->virtual = kmap(map
->page
);
421 * Populate the part we're mapping;
423 for (i
= start_page
; i
< start_page
+ num_pages
; ++i
) {
424 d
= ttm_tt_get_page(ttm
, i
);
430 * We need to use vmap to get the desired page protection
431 * or to make the buffer object look contiguous.
433 prot
= (mem
->placement
& TTM_PL_FLAG_CACHED
) ?
435 ttm_io_prot(mem
->placement
, PAGE_KERNEL
);
436 map
->bo_kmap_type
= ttm_bo_map_vmap
;
437 map
->virtual = vmap(ttm
->pages
+ start_page
, num_pages
,
440 return (!map
->virtual) ? -ENOMEM
: 0;
443 int ttm_bo_kmap(struct ttm_buffer_object
*bo
,
444 unsigned long start_page
, unsigned long num_pages
,
445 struct ttm_bo_kmap_obj
*map
)
448 unsigned long bus_base
;
449 unsigned long bus_offset
;
450 unsigned long bus_size
;
452 BUG_ON(!list_empty(&bo
->swap
));
454 if (num_pages
> bo
->num_pages
)
456 if (start_page
> bo
->num_pages
)
459 if (num_pages
> 1 && !DRM_SUSER(DRM_CURPROC
))
462 ret
= ttm_bo_pci_offset(bo
->bdev
, &bo
->mem
, &bus_base
,
463 &bus_offset
, &bus_size
);
467 return ttm_bo_kmap_ttm(bo
, start_page
, num_pages
, map
);
469 bus_offset
+= start_page
<< PAGE_SHIFT
;
470 bus_size
= num_pages
<< PAGE_SHIFT
;
471 return ttm_bo_ioremap(bo
, bus_base
, bus_offset
, bus_size
, map
);
474 EXPORT_SYMBOL(ttm_bo_kmap
);
476 void ttm_bo_kunmap(struct ttm_bo_kmap_obj
*map
)
480 switch (map
->bo_kmap_type
) {
481 case ttm_bo_map_iomap
:
482 iounmap(map
->virtual);
484 case ttm_bo_map_vmap
:
485 vunmap(map
->virtual);
487 case ttm_bo_map_kmap
:
490 case ttm_bo_map_premapped
:
498 EXPORT_SYMBOL(ttm_bo_kunmap
);
500 int ttm_bo_pfn_prot(struct ttm_buffer_object
*bo
,
501 unsigned long dst_offset
,
502 unsigned long *pfn
, pgprot_t
*prot
)
504 struct ttm_mem_reg
*mem
= &bo
->mem
;
505 struct ttm_bo_device
*bdev
= bo
->bdev
;
506 unsigned long bus_offset
;
507 unsigned long bus_size
;
508 unsigned long bus_base
;
510 ret
= ttm_bo_pci_offset(bdev
, mem
, &bus_base
, &bus_offset
,
515 *pfn
= (bus_base
+ bus_offset
+ dst_offset
) >> PAGE_SHIFT
;
520 *pfn
= page_to_pfn(ttm_tt_get_page(bo
->ttm
,
523 *prot
= (mem
->placement
& TTM_PL_FLAG_CACHED
) ?
524 PAGE_KERNEL
: ttm_io_prot(mem
->placement
, PAGE_KERNEL
);
529 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object
*bo
,
532 bool evict
, bool no_wait
,
533 struct ttm_mem_reg
*new_mem
)
535 struct ttm_bo_device
*bdev
= bo
->bdev
;
536 struct ttm_bo_driver
*driver
= bdev
->driver
;
537 struct ttm_mem_type_manager
*man
= &bdev
->man
[new_mem
->mem_type
];
538 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
540 uint32_t save_flags
= old_mem
->placement
;
541 struct ttm_buffer_object
*ghost_obj
;
542 void *tmp_obj
= NULL
;
544 spin_lock(&bo
->lock
);
546 tmp_obj
= bo
->sync_obj
;
549 bo
->sync_obj
= driver
->sync_obj_ref(sync_obj
);
550 bo
->sync_obj_arg
= sync_obj_arg
;
552 ret
= ttm_bo_wait(bo
, false, false, false);
553 spin_unlock(&bo
->lock
);
555 driver
->sync_obj_unref(&tmp_obj
);
559 ttm_bo_free_old_node(bo
);
560 if ((man
->flags
& TTM_MEMTYPE_FLAG_FIXED
) &&
562 ttm_tt_unbind(bo
->ttm
);
563 ttm_tt_destroy(bo
->ttm
);
568 * This should help pipeline ordinary buffer moves.
570 * Hang old buffer memory on a new buffer object,
571 * and leave it to be released when the GPU
572 * operation has completed.
575 set_bit(TTM_BO_PRIV_FLAG_MOVING
, &bo
->priv_flags
);
576 spin_unlock(&bo
->lock
);
578 driver
->sync_obj_unref(&tmp_obj
);
580 ret
= ttm_buffer_object_transfer(bo
, &ghost_obj
);
585 * If we're not moving to fixed memory, the TTM object
586 * needs to stay alive. Otherwhise hang it on the ghost
587 * bo to be unbound and destroyed.
590 if (!(man
->flags
& TTM_MEMTYPE_FLAG_FIXED
))
591 ghost_obj
->ttm
= NULL
;
595 ttm_bo_unreserve(ghost_obj
);
596 ttm_bo_unref(&ghost_obj
);
600 new_mem
->mm_node
= NULL
;
601 ttm_flag_masked(&save_flags
, new_mem
->placement
, TTM_PL_MASK_MEMTYPE
);
604 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup
);