1 /**************************************************************************
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 #include "ttm/ttm_bo_driver.h"
32 #include "ttm/ttm_placement.h"
34 #include <linux/highmem.h>
35 #include <linux/wait.h>
36 #include <linux/vmalloc.h>
37 #include <linux/version.h>
38 #include <linux/module.h>
40 void ttm_bo_free_old_node(struct ttm_buffer_object
*bo
)
42 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
44 if (old_mem
->mm_node
) {
45 spin_lock(&bo
->bdev
->lru_lock
);
46 drm_mm_put_block(old_mem
->mm_node
);
47 spin_unlock(&bo
->bdev
->lru_lock
);
49 old_mem
->mm_node
= NULL
;
52 int ttm_bo_move_ttm(struct ttm_buffer_object
*bo
,
53 bool evict
, bool no_wait
, struct ttm_mem_reg
*new_mem
)
55 struct ttm_tt
*ttm
= bo
->ttm
;
56 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
57 uint32_t save_flags
= old_mem
->placement
;
60 if (old_mem
->mem_type
!= TTM_PL_SYSTEM
) {
62 ttm_bo_free_old_node(bo
);
63 ttm_flag_masked(&old_mem
->placement
, TTM_PL_FLAG_SYSTEM
,
65 old_mem
->mem_type
= TTM_PL_SYSTEM
;
66 save_flags
= old_mem
->placement
;
69 ret
= ttm_tt_set_placement_caching(ttm
, new_mem
->placement
);
70 if (unlikely(ret
!= 0))
73 if (new_mem
->mem_type
!= TTM_PL_SYSTEM
) {
74 ret
= ttm_tt_bind(ttm
, new_mem
);
75 if (unlikely(ret
!= 0))
80 new_mem
->mm_node
= NULL
;
81 ttm_flag_masked(&save_flags
, new_mem
->placement
, TTM_PL_MASK_MEMTYPE
);
84 EXPORT_SYMBOL(ttm_bo_move_ttm
);
86 int ttm_mem_reg_ioremap(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
,
89 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
90 unsigned long bus_offset
;
91 unsigned long bus_size
;
92 unsigned long bus_base
;
97 ret
= ttm_bo_pci_offset(bdev
, mem
, &bus_base
, &bus_offset
, &bus_size
);
98 if (ret
|| bus_size
== 0)
101 if (!(man
->flags
& TTM_MEMTYPE_FLAG_NEEDS_IOREMAP
))
102 addr
= (void *)(((u8
*) man
->io_addr
) + bus_offset
);
104 if (mem
->placement
& TTM_PL_FLAG_WC
)
105 addr
= ioremap_wc(bus_base
+ bus_offset
, bus_size
);
107 addr
= ioremap_nocache(bus_base
+ bus_offset
, bus_size
);
115 void ttm_mem_reg_iounmap(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
,
118 struct ttm_mem_type_manager
*man
;
120 man
= &bdev
->man
[mem
->mem_type
];
122 if (virtual && (man
->flags
& TTM_MEMTYPE_FLAG_NEEDS_IOREMAP
))
126 static int ttm_copy_io_page(void *dst
, void *src
, unsigned long page
)
129 (uint32_t *) ((unsigned long)dst
+ (page
<< PAGE_SHIFT
));
131 (uint32_t *) ((unsigned long)src
+ (page
<< PAGE_SHIFT
));
134 for (i
= 0; i
< PAGE_SIZE
/ sizeof(uint32_t); ++i
)
135 iowrite32(ioread32(srcP
++), dstP
++);
139 static int ttm_copy_io_ttm_page(struct ttm_tt
*ttm
, void *src
,
142 struct page
*d
= ttm_tt_get_page(ttm
, page
);
148 src
= (void *)((unsigned long)src
+ (page
<< PAGE_SHIFT
));
153 memcpy_fromio(dst
, src
, PAGE_SIZE
);
158 static int ttm_copy_ttm_io_page(struct ttm_tt
*ttm
, void *dst
,
161 struct page
*s
= ttm_tt_get_page(ttm
, page
);
167 dst
= (void *)((unsigned long)dst
+ (page
<< PAGE_SHIFT
));
172 memcpy_toio(dst
, src
, PAGE_SIZE
);
177 int ttm_bo_move_memcpy(struct ttm_buffer_object
*bo
,
178 bool evict
, bool no_wait
, struct ttm_mem_reg
*new_mem
)
180 struct ttm_bo_device
*bdev
= bo
->bdev
;
181 struct ttm_mem_type_manager
*man
= &bdev
->man
[new_mem
->mem_type
];
182 struct ttm_tt
*ttm
= bo
->ttm
;
183 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
184 struct ttm_mem_reg old_copy
= *old_mem
;
188 uint32_t save_flags
= old_mem
->placement
;
191 unsigned long add
= 0;
194 ret
= ttm_mem_reg_ioremap(bdev
, old_mem
, &old_iomap
);
197 ret
= ttm_mem_reg_ioremap(bdev
, new_mem
, &new_iomap
);
201 if (old_iomap
== NULL
&& new_iomap
== NULL
)
203 if (old_iomap
== NULL
&& ttm
== NULL
)
209 if ((old_mem
->mem_type
== new_mem
->mem_type
) &&
210 (new_mem
->mm_node
->start
<
211 old_mem
->mm_node
->start
+ old_mem
->mm_node
->size
)) {
213 add
= new_mem
->num_pages
- 1;
216 for (i
= 0; i
< new_mem
->num_pages
; ++i
) {
217 page
= i
* dir
+ add
;
218 if (old_iomap
== NULL
)
219 ret
= ttm_copy_ttm_io_page(ttm
, new_iomap
, page
);
220 else if (new_iomap
== NULL
)
221 ret
= ttm_copy_io_ttm_page(ttm
, old_iomap
, page
);
223 ret
= ttm_copy_io_page(new_iomap
, old_iomap
, page
);
229 ttm_bo_free_old_node(bo
);
232 new_mem
->mm_node
= NULL
;
233 ttm_flag_masked(&save_flags
, new_mem
->placement
, TTM_PL_MASK_MEMTYPE
);
235 if ((man
->flags
& TTM_MEMTYPE_FLAG_FIXED
) && (ttm
!= NULL
)) {
242 ttm_mem_reg_iounmap(bdev
, new_mem
, new_iomap
);
244 ttm_mem_reg_iounmap(bdev
, &old_copy
, old_iomap
);
247 EXPORT_SYMBOL(ttm_bo_move_memcpy
);
249 static void ttm_transfered_destroy(struct ttm_buffer_object
*bo
)
255 * ttm_buffer_object_transfer
257 * @bo: A pointer to a struct ttm_buffer_object.
258 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
259 * holding the data of @bo with the old placement.
261 * This is a utility function that may be called after an accelerated move
262 * has been scheduled. A new buffer object is created as a placeholder for
263 * the old data while it's being copied. When that buffer object is idle,
264 * it can be destroyed, releasing the space of the old placement.
269 static int ttm_buffer_object_transfer(struct ttm_buffer_object
*bo
,
270 struct ttm_buffer_object
**new_obj
)
272 struct ttm_buffer_object
*fbo
;
273 struct ttm_bo_device
*bdev
= bo
->bdev
;
274 struct ttm_bo_driver
*driver
= bdev
->driver
;
276 fbo
= kzalloc(sizeof(*fbo
), GFP_KERNEL
);
283 * Fix up members that we shouldn't copy directly:
284 * TODO: Explicit member copy would probably be better here.
287 spin_lock_init(&fbo
->lock
);
288 init_waitqueue_head(&fbo
->event_queue
);
289 INIT_LIST_HEAD(&fbo
->ddestroy
);
290 INIT_LIST_HEAD(&fbo
->lru
);
291 INIT_LIST_HEAD(&fbo
->swap
);
294 fbo
->sync_obj
= driver
->sync_obj_ref(bo
->sync_obj
);
295 if (fbo
->mem
.mm_node
)
296 fbo
->mem
.mm_node
->private = (void *)fbo
;
297 kref_init(&fbo
->list_kref
);
298 kref_init(&fbo
->kref
);
299 fbo
->destroy
= &ttm_transfered_destroy
;
305 pgprot_t
ttm_io_prot(uint32_t caching_flags
, pgprot_t tmp
)
307 #if defined(__i386__) || defined(__x86_64__)
308 if (caching_flags
& TTM_PL_FLAG_WC
)
309 tmp
= pgprot_writecombine(tmp
);
310 else if (boot_cpu_data
.x86
> 3)
311 tmp
= pgprot_noncached(tmp
);
313 #elif defined(__powerpc__)
314 if (!(caching_flags
& TTM_PL_FLAG_CACHED
)) {
315 pgprot_val(tmp
) |= _PAGE_NO_CACHE
;
316 if (caching_flags
& TTM_PL_FLAG_UNCACHED
)
317 pgprot_val(tmp
) |= _PAGE_GUARDED
;
320 #if defined(__ia64__)
321 if (caching_flags
& TTM_PL_FLAG_WC
)
322 tmp
= pgprot_writecombine(tmp
);
324 tmp
= pgprot_noncached(tmp
);
326 #if defined(__sparc__)
327 if (!(caching_flags
& TTM_PL_FLAG_CACHED
))
328 tmp
= pgprot_noncached(tmp
);
333 static int ttm_bo_ioremap(struct ttm_buffer_object
*bo
,
334 unsigned long bus_base
,
335 unsigned long bus_offset
,
336 unsigned long bus_size
,
337 struct ttm_bo_kmap_obj
*map
)
339 struct ttm_bo_device
*bdev
= bo
->bdev
;
340 struct ttm_mem_reg
*mem
= &bo
->mem
;
341 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
343 if (!(man
->flags
& TTM_MEMTYPE_FLAG_NEEDS_IOREMAP
)) {
344 map
->bo_kmap_type
= ttm_bo_map_premapped
;
345 map
->virtual = (void *)(((u8
*) man
->io_addr
) + bus_offset
);
347 map
->bo_kmap_type
= ttm_bo_map_iomap
;
348 if (mem
->placement
& TTM_PL_FLAG_WC
)
349 map
->virtual = ioremap_wc(bus_base
+ bus_offset
,
352 map
->virtual = ioremap_nocache(bus_base
+ bus_offset
,
355 return (!map
->virtual) ? -ENOMEM
: 0;
358 static int ttm_bo_kmap_ttm(struct ttm_buffer_object
*bo
,
359 unsigned long start_page
,
360 unsigned long num_pages
,
361 struct ttm_bo_kmap_obj
*map
)
363 struct ttm_mem_reg
*mem
= &bo
->mem
; pgprot_t prot
;
364 struct ttm_tt
*ttm
= bo
->ttm
;
369 if (num_pages
== 1 && (mem
->placement
& TTM_PL_FLAG_CACHED
)) {
371 * We're mapping a single page, and the desired
372 * page protection is consistent with the bo.
375 map
->bo_kmap_type
= ttm_bo_map_kmap
;
376 map
->page
= ttm_tt_get_page(ttm
, start_page
);
377 map
->virtual = kmap(map
->page
);
380 * Populate the part we're mapping;
382 for (i
= start_page
; i
< start_page
+ num_pages
; ++i
) {
383 d
= ttm_tt_get_page(ttm
, i
);
389 * We need to use vmap to get the desired page protection
390 * or to make the buffer object look contigous.
392 prot
= (mem
->placement
& TTM_PL_FLAG_CACHED
) ?
394 ttm_io_prot(mem
->placement
, PAGE_KERNEL
);
395 map
->bo_kmap_type
= ttm_bo_map_vmap
;
396 map
->virtual = vmap(ttm
->pages
+ start_page
, num_pages
,
399 return (!map
->virtual) ? -ENOMEM
: 0;
402 int ttm_bo_kmap(struct ttm_buffer_object
*bo
,
403 unsigned long start_page
, unsigned long num_pages
,
404 struct ttm_bo_kmap_obj
*map
)
407 unsigned long bus_base
;
408 unsigned long bus_offset
;
409 unsigned long bus_size
;
411 BUG_ON(!list_empty(&bo
->swap
));
413 if (num_pages
> bo
->num_pages
)
415 if (start_page
> bo
->num_pages
)
418 if (num_pages
> 1 && !DRM_SUSER(DRM_CURPROC
))
421 ret
= ttm_bo_pci_offset(bo
->bdev
, &bo
->mem
, &bus_base
,
422 &bus_offset
, &bus_size
);
426 return ttm_bo_kmap_ttm(bo
, start_page
, num_pages
, map
);
428 bus_offset
+= start_page
<< PAGE_SHIFT
;
429 bus_size
= num_pages
<< PAGE_SHIFT
;
430 return ttm_bo_ioremap(bo
, bus_base
, bus_offset
, bus_size
, map
);
433 EXPORT_SYMBOL(ttm_bo_kmap
);
435 void ttm_bo_kunmap(struct ttm_bo_kmap_obj
*map
)
439 switch (map
->bo_kmap_type
) {
440 case ttm_bo_map_iomap
:
441 iounmap(map
->virtual);
443 case ttm_bo_map_vmap
:
444 vunmap(map
->virtual);
446 case ttm_bo_map_kmap
:
449 case ttm_bo_map_premapped
:
457 EXPORT_SYMBOL(ttm_bo_kunmap
);
459 int ttm_bo_pfn_prot(struct ttm_buffer_object
*bo
,
460 unsigned long dst_offset
,
461 unsigned long *pfn
, pgprot_t
*prot
)
463 struct ttm_mem_reg
*mem
= &bo
->mem
;
464 struct ttm_bo_device
*bdev
= bo
->bdev
;
465 unsigned long bus_offset
;
466 unsigned long bus_size
;
467 unsigned long bus_base
;
469 ret
= ttm_bo_pci_offset(bdev
, mem
, &bus_base
, &bus_offset
,
474 *pfn
= (bus_base
+ bus_offset
+ dst_offset
) >> PAGE_SHIFT
;
479 *pfn
= page_to_pfn(ttm_tt_get_page(bo
->ttm
,
482 *prot
= (mem
->placement
& TTM_PL_FLAG_CACHED
) ?
483 PAGE_KERNEL
: ttm_io_prot(mem
->placement
, PAGE_KERNEL
);
488 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object
*bo
,
491 bool evict
, bool no_wait
,
492 struct ttm_mem_reg
*new_mem
)
494 struct ttm_bo_device
*bdev
= bo
->bdev
;
495 struct ttm_bo_driver
*driver
= bdev
->driver
;
496 struct ttm_mem_type_manager
*man
= &bdev
->man
[new_mem
->mem_type
];
497 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
499 uint32_t save_flags
= old_mem
->placement
;
500 struct ttm_buffer_object
*ghost_obj
;
501 void *tmp_obj
= NULL
;
503 spin_lock(&bo
->lock
);
505 tmp_obj
= bo
->sync_obj
;
508 bo
->sync_obj
= driver
->sync_obj_ref(sync_obj
);
509 bo
->sync_obj_arg
= sync_obj_arg
;
511 ret
= ttm_bo_wait(bo
, false, false, false);
512 spin_unlock(&bo
->lock
);
513 driver
->sync_obj_unref(&bo
->sync_obj
);
518 ttm_bo_free_old_node(bo
);
519 if ((man
->flags
& TTM_MEMTYPE_FLAG_FIXED
) &&
521 ttm_tt_unbind(bo
->ttm
);
522 ttm_tt_destroy(bo
->ttm
);
527 * This should help pipeline ordinary buffer moves.
529 * Hang old buffer memory on a new buffer object,
530 * and leave it to be released when the GPU
531 * operation has completed.
534 set_bit(TTM_BO_PRIV_FLAG_MOVING
, &bo
->priv_flags
);
535 spin_unlock(&bo
->lock
);
537 ret
= ttm_buffer_object_transfer(bo
, &ghost_obj
);
542 * If we're not moving to fixed memory, the TTM object
543 * needs to stay alive. Otherwhise hang it on the ghost
544 * bo to be unbound and destroyed.
547 if (!(man
->flags
& TTM_MEMTYPE_FLAG_FIXED
))
548 ghost_obj
->ttm
= NULL
;
552 ttm_bo_unreserve(ghost_obj
);
553 ttm_bo_unref(&ghost_obj
);
557 new_mem
->mm_node
= NULL
;
558 ttm_flag_masked(&save_flags
, new_mem
->placement
, TTM_PL_MASK_MEMTYPE
);
561 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup
);