1 /**************************************************************************
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 #include "ttm/ttm_bo_driver.h"
32 #include "ttm/ttm_placement.h"
34 #include <linux/highmem.h>
35 #include <linux/wait.h>
36 #include <linux/slab.h>
37 #include <linux/vmalloc.h>
38 #include <linux/module.h>
40 void ttm_bo_free_old_node(struct ttm_buffer_object
*bo
)
42 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
44 if (old_mem
->mm_node
) {
45 spin_lock(&bo
->glob
->lru_lock
);
46 drm_mm_put_block(old_mem
->mm_node
);
47 spin_unlock(&bo
->glob
->lru_lock
);
49 old_mem
->mm_node
= NULL
;
52 int ttm_bo_move_ttm(struct ttm_buffer_object
*bo
,
53 bool evict
, bool no_wait
, struct ttm_mem_reg
*new_mem
)
55 struct ttm_tt
*ttm
= bo
->ttm
;
56 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
59 if (old_mem
->mem_type
!= TTM_PL_SYSTEM
) {
61 ttm_bo_free_old_node(bo
);
62 ttm_flag_masked(&old_mem
->placement
, TTM_PL_FLAG_SYSTEM
,
64 old_mem
->mem_type
= TTM_PL_SYSTEM
;
67 ret
= ttm_tt_set_placement_caching(ttm
, new_mem
->placement
);
68 if (unlikely(ret
!= 0))
71 if (new_mem
->mem_type
!= TTM_PL_SYSTEM
) {
72 ret
= ttm_tt_bind(ttm
, new_mem
);
73 if (unlikely(ret
!= 0))
78 new_mem
->mm_node
= NULL
;
82 EXPORT_SYMBOL(ttm_bo_move_ttm
);
84 int ttm_mem_reg_ioremap(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
,
87 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
88 unsigned long bus_offset
;
89 unsigned long bus_size
;
90 unsigned long bus_base
;
95 ret
= ttm_bo_pci_offset(bdev
, mem
, &bus_base
, &bus_offset
, &bus_size
);
96 if (ret
|| bus_size
== 0)
99 if (!(man
->flags
& TTM_MEMTYPE_FLAG_NEEDS_IOREMAP
))
100 addr
= (void *)(((u8
*) man
->io_addr
) + bus_offset
);
102 if (mem
->placement
& TTM_PL_FLAG_WC
)
103 addr
= ioremap_wc(bus_base
+ bus_offset
, bus_size
);
105 addr
= ioremap_nocache(bus_base
+ bus_offset
, bus_size
);
113 void ttm_mem_reg_iounmap(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
,
116 struct ttm_mem_type_manager
*man
;
118 man
= &bdev
->man
[mem
->mem_type
];
120 if (virtual && (man
->flags
& TTM_MEMTYPE_FLAG_NEEDS_IOREMAP
))
124 static int ttm_copy_io_page(void *dst
, void *src
, unsigned long page
)
127 (uint32_t *) ((unsigned long)dst
+ (page
<< PAGE_SHIFT
));
129 (uint32_t *) ((unsigned long)src
+ (page
<< PAGE_SHIFT
));
132 for (i
= 0; i
< PAGE_SIZE
/ sizeof(uint32_t); ++i
)
133 iowrite32(ioread32(srcP
++), dstP
++);
137 static int ttm_copy_io_ttm_page(struct ttm_tt
*ttm
, void *src
,
141 struct page
*d
= ttm_tt_get_page(ttm
, page
);
147 src
= (void *)((unsigned long)src
+ (page
<< PAGE_SHIFT
));
150 dst
= kmap_atomic_prot(d
, KM_USER0
, prot
);
152 if (pgprot_val(prot
) != pgprot_val(PAGE_KERNEL
))
153 dst
= vmap(&d
, 1, 0, prot
);
160 memcpy_fromio(dst
, src
, PAGE_SIZE
);
163 kunmap_atomic(dst
, KM_USER0
);
165 if (pgprot_val(prot
) != pgprot_val(PAGE_KERNEL
))
174 static int ttm_copy_ttm_io_page(struct ttm_tt
*ttm
, void *dst
,
178 struct page
*s
= ttm_tt_get_page(ttm
, page
);
184 dst
= (void *)((unsigned long)dst
+ (page
<< PAGE_SHIFT
));
186 src
= kmap_atomic_prot(s
, KM_USER0
, prot
);
188 if (pgprot_val(prot
) != pgprot_val(PAGE_KERNEL
))
189 src
= vmap(&s
, 1, 0, prot
);
196 memcpy_toio(dst
, src
, PAGE_SIZE
);
199 kunmap_atomic(src
, KM_USER0
);
201 if (pgprot_val(prot
) != pgprot_val(PAGE_KERNEL
))
210 int ttm_bo_move_memcpy(struct ttm_buffer_object
*bo
,
211 bool evict
, bool no_wait
, struct ttm_mem_reg
*new_mem
)
213 struct ttm_bo_device
*bdev
= bo
->bdev
;
214 struct ttm_mem_type_manager
*man
= &bdev
->man
[new_mem
->mem_type
];
215 struct ttm_tt
*ttm
= bo
->ttm
;
216 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
217 struct ttm_mem_reg old_copy
= *old_mem
;
223 unsigned long add
= 0;
226 ret
= ttm_mem_reg_ioremap(bdev
, old_mem
, &old_iomap
);
229 ret
= ttm_mem_reg_ioremap(bdev
, new_mem
, &new_iomap
);
233 if (old_iomap
== NULL
&& new_iomap
== NULL
)
235 if (old_iomap
== NULL
&& ttm
== NULL
)
241 if ((old_mem
->mem_type
== new_mem
->mem_type
) &&
242 (new_mem
->mm_node
->start
<
243 old_mem
->mm_node
->start
+ old_mem
->mm_node
->size
)) {
245 add
= new_mem
->num_pages
- 1;
248 for (i
= 0; i
< new_mem
->num_pages
; ++i
) {
249 page
= i
* dir
+ add
;
250 if (old_iomap
== NULL
) {
251 pgprot_t prot
= ttm_io_prot(old_mem
->placement
,
253 ret
= ttm_copy_ttm_io_page(ttm
, new_iomap
, page
,
255 } else if (new_iomap
== NULL
) {
256 pgprot_t prot
= ttm_io_prot(new_mem
->placement
,
258 ret
= ttm_copy_io_ttm_page(ttm
, old_iomap
, page
,
261 ret
= ttm_copy_io_page(new_iomap
, old_iomap
, page
);
267 ttm_bo_free_old_node(bo
);
270 new_mem
->mm_node
= NULL
;
272 if ((man
->flags
& TTM_MEMTYPE_FLAG_FIXED
) && (ttm
!= NULL
)) {
279 ttm_mem_reg_iounmap(bdev
, new_mem
, new_iomap
);
281 ttm_mem_reg_iounmap(bdev
, &old_copy
, old_iomap
);
284 EXPORT_SYMBOL(ttm_bo_move_memcpy
);
286 static void ttm_transfered_destroy(struct ttm_buffer_object
*bo
)
292 * ttm_buffer_object_transfer
294 * @bo: A pointer to a struct ttm_buffer_object.
295 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
296 * holding the data of @bo with the old placement.
298 * This is a utility function that may be called after an accelerated move
299 * has been scheduled. A new buffer object is created as a placeholder for
300 * the old data while it's being copied. When that buffer object is idle,
301 * it can be destroyed, releasing the space of the old placement.
306 static int ttm_buffer_object_transfer(struct ttm_buffer_object
*bo
,
307 struct ttm_buffer_object
**new_obj
)
309 struct ttm_buffer_object
*fbo
;
310 struct ttm_bo_device
*bdev
= bo
->bdev
;
311 struct ttm_bo_driver
*driver
= bdev
->driver
;
313 fbo
= kzalloc(sizeof(*fbo
), GFP_KERNEL
);
320 * Fix up members that we shouldn't copy directly:
321 * TODO: Explicit member copy would probably be better here.
324 spin_lock_init(&fbo
->lock
);
325 init_waitqueue_head(&fbo
->event_queue
);
326 INIT_LIST_HEAD(&fbo
->ddestroy
);
327 INIT_LIST_HEAD(&fbo
->lru
);
328 INIT_LIST_HEAD(&fbo
->swap
);
331 fbo
->sync_obj
= driver
->sync_obj_ref(bo
->sync_obj
);
332 if (fbo
->mem
.mm_node
)
333 fbo
->mem
.mm_node
->private = (void *)fbo
;
334 kref_init(&fbo
->list_kref
);
335 kref_init(&fbo
->kref
);
336 fbo
->destroy
= &ttm_transfered_destroy
;
342 pgprot_t
ttm_io_prot(uint32_t caching_flags
, pgprot_t tmp
)
344 #if defined(__i386__) || defined(__x86_64__)
345 if (caching_flags
& TTM_PL_FLAG_WC
)
346 tmp
= pgprot_writecombine(tmp
);
347 else if (boot_cpu_data
.x86
> 3)
348 tmp
= pgprot_noncached(tmp
);
350 #elif defined(__powerpc__)
351 if (!(caching_flags
& TTM_PL_FLAG_CACHED
)) {
352 pgprot_val(tmp
) |= _PAGE_NO_CACHE
;
353 if (caching_flags
& TTM_PL_FLAG_UNCACHED
)
354 pgprot_val(tmp
) |= _PAGE_GUARDED
;
357 #if defined(__ia64__)
358 if (caching_flags
& TTM_PL_FLAG_WC
)
359 tmp
= pgprot_writecombine(tmp
);
361 tmp
= pgprot_noncached(tmp
);
363 #if defined(__sparc__)
364 if (!(caching_flags
& TTM_PL_FLAG_CACHED
))
365 tmp
= pgprot_noncached(tmp
);
369 EXPORT_SYMBOL(ttm_io_prot
);
371 static int ttm_bo_ioremap(struct ttm_buffer_object
*bo
,
372 unsigned long bus_base
,
373 unsigned long bus_offset
,
374 unsigned long bus_size
,
375 struct ttm_bo_kmap_obj
*map
)
377 struct ttm_bo_device
*bdev
= bo
->bdev
;
378 struct ttm_mem_reg
*mem
= &bo
->mem
;
379 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
381 if (!(man
->flags
& TTM_MEMTYPE_FLAG_NEEDS_IOREMAP
)) {
382 map
->bo_kmap_type
= ttm_bo_map_premapped
;
383 map
->virtual = (void *)(((u8
*) man
->io_addr
) + bus_offset
);
385 map
->bo_kmap_type
= ttm_bo_map_iomap
;
386 if (mem
->placement
& TTM_PL_FLAG_WC
)
387 map
->virtual = ioremap_wc(bus_base
+ bus_offset
,
390 map
->virtual = ioremap_nocache(bus_base
+ bus_offset
,
393 return (!map
->virtual) ? -ENOMEM
: 0;
396 static int ttm_bo_kmap_ttm(struct ttm_buffer_object
*bo
,
397 unsigned long start_page
,
398 unsigned long num_pages
,
399 struct ttm_bo_kmap_obj
*map
)
401 struct ttm_mem_reg
*mem
= &bo
->mem
; pgprot_t prot
;
402 struct ttm_tt
*ttm
= bo
->ttm
;
407 if (num_pages
== 1 && (mem
->placement
& TTM_PL_FLAG_CACHED
)) {
409 * We're mapping a single page, and the desired
410 * page protection is consistent with the bo.
413 map
->bo_kmap_type
= ttm_bo_map_kmap
;
414 map
->page
= ttm_tt_get_page(ttm
, start_page
);
415 map
->virtual = kmap(map
->page
);
418 * Populate the part we're mapping;
420 for (i
= start_page
; i
< start_page
+ num_pages
; ++i
) {
421 d
= ttm_tt_get_page(ttm
, i
);
427 * We need to use vmap to get the desired page protection
428 * or to make the buffer object look contiguous.
430 prot
= (mem
->placement
& TTM_PL_FLAG_CACHED
) ?
432 ttm_io_prot(mem
->placement
, PAGE_KERNEL
);
433 map
->bo_kmap_type
= ttm_bo_map_vmap
;
434 map
->virtual = vmap(ttm
->pages
+ start_page
, num_pages
,
437 return (!map
->virtual) ? -ENOMEM
: 0;
440 int ttm_bo_kmap(struct ttm_buffer_object
*bo
,
441 unsigned long start_page
, unsigned long num_pages
,
442 struct ttm_bo_kmap_obj
*map
)
445 unsigned long bus_base
;
446 unsigned long bus_offset
;
447 unsigned long bus_size
;
449 BUG_ON(!list_empty(&bo
->swap
));
451 if (num_pages
> bo
->num_pages
)
453 if (start_page
> bo
->num_pages
)
456 if (num_pages
> 1 && !DRM_SUSER(DRM_CURPROC
))
459 ret
= ttm_bo_pci_offset(bo
->bdev
, &bo
->mem
, &bus_base
,
460 &bus_offset
, &bus_size
);
464 return ttm_bo_kmap_ttm(bo
, start_page
, num_pages
, map
);
466 bus_offset
+= start_page
<< PAGE_SHIFT
;
467 bus_size
= num_pages
<< PAGE_SHIFT
;
468 return ttm_bo_ioremap(bo
, bus_base
, bus_offset
, bus_size
, map
);
471 EXPORT_SYMBOL(ttm_bo_kmap
);
473 void ttm_bo_kunmap(struct ttm_bo_kmap_obj
*map
)
477 switch (map
->bo_kmap_type
) {
478 case ttm_bo_map_iomap
:
479 iounmap(map
->virtual);
481 case ttm_bo_map_vmap
:
482 vunmap(map
->virtual);
484 case ttm_bo_map_kmap
:
487 case ttm_bo_map_premapped
:
495 EXPORT_SYMBOL(ttm_bo_kunmap
);
497 int ttm_bo_pfn_prot(struct ttm_buffer_object
*bo
,
498 unsigned long dst_offset
,
499 unsigned long *pfn
, pgprot_t
*prot
)
501 struct ttm_mem_reg
*mem
= &bo
->mem
;
502 struct ttm_bo_device
*bdev
= bo
->bdev
;
503 unsigned long bus_offset
;
504 unsigned long bus_size
;
505 unsigned long bus_base
;
507 ret
= ttm_bo_pci_offset(bdev
, mem
, &bus_base
, &bus_offset
,
512 *pfn
= (bus_base
+ bus_offset
+ dst_offset
) >> PAGE_SHIFT
;
517 *pfn
= page_to_pfn(ttm_tt_get_page(bo
->ttm
,
520 *prot
= (mem
->placement
& TTM_PL_FLAG_CACHED
) ?
521 PAGE_KERNEL
: ttm_io_prot(mem
->placement
, PAGE_KERNEL
);
526 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object
*bo
,
529 bool evict
, bool no_wait
,
530 struct ttm_mem_reg
*new_mem
)
532 struct ttm_bo_device
*bdev
= bo
->bdev
;
533 struct ttm_bo_driver
*driver
= bdev
->driver
;
534 struct ttm_mem_type_manager
*man
= &bdev
->man
[new_mem
->mem_type
];
535 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
537 struct ttm_buffer_object
*ghost_obj
;
538 void *tmp_obj
= NULL
;
540 spin_lock(&bo
->lock
);
542 tmp_obj
= bo
->sync_obj
;
545 bo
->sync_obj
= driver
->sync_obj_ref(sync_obj
);
546 bo
->sync_obj_arg
= sync_obj_arg
;
548 ret
= ttm_bo_wait(bo
, false, false, false);
549 spin_unlock(&bo
->lock
);
551 driver
->sync_obj_unref(&tmp_obj
);
555 ttm_bo_free_old_node(bo
);
556 if ((man
->flags
& TTM_MEMTYPE_FLAG_FIXED
) &&
558 ttm_tt_unbind(bo
->ttm
);
559 ttm_tt_destroy(bo
->ttm
);
564 * This should help pipeline ordinary buffer moves.
566 * Hang old buffer memory on a new buffer object,
567 * and leave it to be released when the GPU
568 * operation has completed.
571 set_bit(TTM_BO_PRIV_FLAG_MOVING
, &bo
->priv_flags
);
572 spin_unlock(&bo
->lock
);
574 driver
->sync_obj_unref(&tmp_obj
);
576 ret
= ttm_buffer_object_transfer(bo
, &ghost_obj
);
581 * If we're not moving to fixed memory, the TTM object
582 * needs to stay alive. Otherwhise hang it on the ghost
583 * bo to be unbound and destroyed.
586 if (!(man
->flags
& TTM_MEMTYPE_FLAG_FIXED
))
587 ghost_obj
->ttm
= NULL
;
591 ttm_bo_unreserve(ghost_obj
);
592 ttm_bo_unref(&ghost_obj
);
596 new_mem
->mm_node
= NULL
;
600 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup
);