2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
32 #include <linux/list.h>
33 #include <linux/slab.h>
35 #include "radeon_drm.h"
37 #include "radeon_trace.h"
40 int radeon_ttm_init(struct radeon_device
*rdev
);
41 void radeon_ttm_fini(struct radeon_device
*rdev
);
42 static void radeon_bo_clear_surface_reg(struct radeon_bo
*bo
);
45 * To exclude mutual BO access we rely on bo_reserve exclusion, as all
46 * function are calling it.
49 void radeon_bo_clear_va(struct radeon_bo
*bo
)
51 struct radeon_bo_va
*bo_va
, *tmp
;
53 list_for_each_entry_safe(bo_va
, tmp
, &bo
->va
, bo_list
) {
54 /* remove from all vm address space */
55 mutex_lock(&bo_va
->vm
->mutex
);
56 list_del(&bo_va
->vm_list
);
57 mutex_unlock(&bo_va
->vm
->mutex
);
58 list_del(&bo_va
->bo_list
);
63 static void radeon_ttm_bo_destroy(struct ttm_buffer_object
*tbo
)
67 bo
= container_of(tbo
, struct radeon_bo
, tbo
);
68 mutex_lock(&bo
->rdev
->gem
.mutex
);
69 list_del_init(&bo
->list
);
70 mutex_unlock(&bo
->rdev
->gem
.mutex
);
71 radeon_bo_clear_surface_reg(bo
);
72 radeon_bo_clear_va(bo
);
73 drm_gem_object_release(&bo
->gem_base
);
77 bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object
*bo
)
79 if (bo
->destroy
== &radeon_ttm_bo_destroy
)
84 void radeon_ttm_placement_from_domain(struct radeon_bo
*rbo
, u32 domain
)
88 rbo
->placement
.fpfn
= 0;
89 rbo
->placement
.lpfn
= 0;
90 rbo
->placement
.placement
= rbo
->placements
;
91 rbo
->placement
.busy_placement
= rbo
->placements
;
92 if (domain
& RADEON_GEM_DOMAIN_VRAM
)
93 rbo
->placements
[c
++] = TTM_PL_FLAG_WC
| TTM_PL_FLAG_UNCACHED
|
95 if (domain
& RADEON_GEM_DOMAIN_GTT
)
96 rbo
->placements
[c
++] = TTM_PL_MASK_CACHING
| TTM_PL_FLAG_TT
;
97 if (domain
& RADEON_GEM_DOMAIN_CPU
)
98 rbo
->placements
[c
++] = TTM_PL_MASK_CACHING
| TTM_PL_FLAG_SYSTEM
;
100 rbo
->placements
[c
++] = TTM_PL_MASK_CACHING
| TTM_PL_FLAG_SYSTEM
;
101 rbo
->placement
.num_placement
= c
;
102 rbo
->placement
.num_busy_placement
= c
;
105 int radeon_bo_create(struct radeon_device
*rdev
,
106 unsigned long size
, int byte_align
, bool kernel
, u32 domain
,
107 struct sg_table
*sg
, struct radeon_bo
**bo_ptr
)
109 struct radeon_bo
*bo
;
110 enum ttm_bo_type type
;
111 unsigned long page_align
= roundup(byte_align
, PAGE_SIZE
) >> PAGE_SHIFT
;
112 unsigned long max_size
= 0;
116 size
= ALIGN(size
, PAGE_SIZE
);
118 if (unlikely(rdev
->mman
.bdev
.dev_mapping
== NULL
)) {
119 rdev
->mman
.bdev
.dev_mapping
= rdev
->ddev
->dev_mapping
;
122 type
= ttm_bo_type_kernel
;
124 type
= ttm_bo_type_sg
;
126 type
= ttm_bo_type_device
;
130 /* maximun bo size is the minimun btw visible vram and gtt size */
131 max_size
= min(rdev
->mc
.visible_vram_size
, rdev
->mc
.gtt_size
);
132 if ((page_align
<< PAGE_SHIFT
) >= max_size
) {
133 printk(KERN_WARNING
"%s:%d alloc size %ldM bigger than %ldMb limit\n",
134 __func__
, __LINE__
, page_align
>> (20 - PAGE_SHIFT
), max_size
>> 20);
138 acc_size
= ttm_bo_dma_acc_size(&rdev
->mman
.bdev
, size
,
139 sizeof(struct radeon_bo
));
142 bo
= kzalloc(sizeof(struct radeon_bo
), GFP_KERNEL
);
145 r
= drm_gem_object_init(rdev
->ddev
, &bo
->gem_base
, size
);
151 bo
->gem_base
.driver_private
= NULL
;
152 bo
->surface_reg
= -1;
153 INIT_LIST_HEAD(&bo
->list
);
154 INIT_LIST_HEAD(&bo
->va
);
155 radeon_ttm_placement_from_domain(bo
, domain
);
156 /* Kernel allocation are uninterruptible */
157 mutex_lock(&rdev
->vram_mutex
);
158 r
= ttm_bo_init(&rdev
->mman
.bdev
, &bo
->tbo
, size
, type
,
159 &bo
->placement
, page_align
, 0, !kernel
, NULL
,
160 acc_size
, sg
, &radeon_ttm_bo_destroy
);
161 mutex_unlock(&rdev
->vram_mutex
);
162 if (unlikely(r
!= 0)) {
163 if (r
!= -ERESTARTSYS
) {
164 if (domain
== RADEON_GEM_DOMAIN_VRAM
) {
165 domain
|= RADEON_GEM_DOMAIN_GTT
;
169 "object_init failed for (%lu, 0x%08X)\n",
176 trace_radeon_bo_create(bo
);
181 int radeon_bo_kmap(struct radeon_bo
*bo
, void **ptr
)
192 r
= ttm_bo_kmap(&bo
->tbo
, 0, bo
->tbo
.num_pages
, &bo
->kmap
);
196 bo
->kptr
= ttm_kmap_obj_virtual(&bo
->kmap
, &is_iomem
);
200 radeon_bo_check_tiling(bo
, 0, 0);
204 void radeon_bo_kunmap(struct radeon_bo
*bo
)
206 if (bo
->kptr
== NULL
)
209 radeon_bo_check_tiling(bo
, 0, 0);
210 ttm_bo_kunmap(&bo
->kmap
);
213 void radeon_bo_unref(struct radeon_bo
**bo
)
215 struct ttm_buffer_object
*tbo
;
216 struct radeon_device
*rdev
;
222 mutex_lock(&rdev
->vram_mutex
);
224 mutex_unlock(&rdev
->vram_mutex
);
229 int radeon_bo_pin_restricted(struct radeon_bo
*bo
, u32 domain
, u64 max_offset
,
237 *gpu_addr
= radeon_bo_gpu_offset(bo
);
239 if (max_offset
!= 0) {
242 if (domain
== RADEON_GEM_DOMAIN_VRAM
)
243 domain_start
= bo
->rdev
->mc
.vram_start
;
245 domain_start
= bo
->rdev
->mc
.gtt_start
;
246 WARN_ON_ONCE(max_offset
<
247 (radeon_bo_gpu_offset(bo
) - domain_start
));
252 radeon_ttm_placement_from_domain(bo
, domain
);
253 if (domain
== RADEON_GEM_DOMAIN_VRAM
) {
254 /* force to pin into visible video ram */
255 bo
->placement
.lpfn
= bo
->rdev
->mc
.visible_vram_size
>> PAGE_SHIFT
;
258 u64 lpfn
= max_offset
>> PAGE_SHIFT
;
260 if (!bo
->placement
.lpfn
)
261 bo
->placement
.lpfn
= bo
->rdev
->mc
.gtt_size
>> PAGE_SHIFT
;
263 if (lpfn
< bo
->placement
.lpfn
)
264 bo
->placement
.lpfn
= lpfn
;
266 for (i
= 0; i
< bo
->placement
.num_placement
; i
++)
267 bo
->placements
[i
] |= TTM_PL_FLAG_NO_EVICT
;
268 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, false, false, false);
269 if (likely(r
== 0)) {
271 if (gpu_addr
!= NULL
)
272 *gpu_addr
= radeon_bo_gpu_offset(bo
);
274 if (unlikely(r
!= 0))
275 dev_err(bo
->rdev
->dev
, "%p pin failed\n", bo
);
279 int radeon_bo_pin(struct radeon_bo
*bo
, u32 domain
, u64
*gpu_addr
)
281 return radeon_bo_pin_restricted(bo
, domain
, 0, gpu_addr
);
284 int radeon_bo_unpin(struct radeon_bo
*bo
)
288 if (!bo
->pin_count
) {
289 dev_warn(bo
->rdev
->dev
, "%p unpin not necessary\n", bo
);
295 for (i
= 0; i
< bo
->placement
.num_placement
; i
++)
296 bo
->placements
[i
] &= ~TTM_PL_FLAG_NO_EVICT
;
297 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, false, false, false);
298 if (unlikely(r
!= 0))
299 dev_err(bo
->rdev
->dev
, "%p validate failed for unpin\n", bo
);
303 int radeon_bo_evict_vram(struct radeon_device
*rdev
)
305 /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
306 if (0 && (rdev
->flags
& RADEON_IS_IGP
)) {
307 if (rdev
->mc
.igp_sideport_enabled
== false)
308 /* Useless to evict on IGP chips */
311 return ttm_bo_evict_mm(&rdev
->mman
.bdev
, TTM_PL_VRAM
);
314 void radeon_bo_force_delete(struct radeon_device
*rdev
)
316 struct radeon_bo
*bo
, *n
;
318 if (list_empty(&rdev
->gem
.objects
)) {
321 dev_err(rdev
->dev
, "Userspace still has active objects !\n");
322 list_for_each_entry_safe(bo
, n
, &rdev
->gem
.objects
, list
) {
323 mutex_lock(&rdev
->ddev
->struct_mutex
);
324 dev_err(rdev
->dev
, "%p %p %lu %lu force free\n",
325 &bo
->gem_base
, bo
, (unsigned long)bo
->gem_base
.size
,
326 *((unsigned long *)&bo
->gem_base
.refcount
));
327 mutex_lock(&bo
->rdev
->gem
.mutex
);
328 list_del_init(&bo
->list
);
329 mutex_unlock(&bo
->rdev
->gem
.mutex
);
330 /* this should unref the ttm bo */
331 drm_gem_object_unreference(&bo
->gem_base
);
332 mutex_unlock(&rdev
->ddev
->struct_mutex
);
336 int radeon_bo_init(struct radeon_device
*rdev
)
338 /* Add an MTRR for the VRAM */
339 rdev
->mc
.vram_mtrr
= mtrr_add(rdev
->mc
.aper_base
, rdev
->mc
.aper_size
,
340 MTRR_TYPE_WRCOMB
, 1);
341 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
342 rdev
->mc
.mc_vram_size
>> 20,
343 (unsigned long long)rdev
->mc
.aper_size
>> 20);
344 DRM_INFO("RAM width %dbits %cDR\n",
345 rdev
->mc
.vram_width
, rdev
->mc
.vram_is_ddr
? 'D' : 'S');
346 return radeon_ttm_init(rdev
);
349 void radeon_bo_fini(struct radeon_device
*rdev
)
351 radeon_ttm_fini(rdev
);
354 void radeon_bo_list_add_object(struct radeon_bo_list
*lobj
,
355 struct list_head
*head
)
358 list_add(&lobj
->tv
.head
, head
);
360 list_add_tail(&lobj
->tv
.head
, head
);
364 int radeon_bo_list_validate(struct list_head
*head
)
366 struct radeon_bo_list
*lobj
;
367 struct radeon_bo
*bo
;
371 r
= ttm_eu_reserve_buffers(head
);
372 if (unlikely(r
!= 0)) {
375 list_for_each_entry(lobj
, head
, tv
.head
) {
377 if (!bo
->pin_count
) {
378 domain
= lobj
->wdomain
? lobj
->wdomain
: lobj
->rdomain
;
381 radeon_ttm_placement_from_domain(bo
, domain
);
382 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
,
385 if (r
!= -ERESTARTSYS
&& domain
== RADEON_GEM_DOMAIN_VRAM
) {
386 domain
|= RADEON_GEM_DOMAIN_GTT
;
392 lobj
->gpu_offset
= radeon_bo_gpu_offset(bo
);
393 lobj
->tiling_flags
= bo
->tiling_flags
;
398 int radeon_bo_fbdev_mmap(struct radeon_bo
*bo
,
399 struct vm_area_struct
*vma
)
401 return ttm_fbdev_mmap(vma
, &bo
->tbo
);
404 int radeon_bo_get_surface_reg(struct radeon_bo
*bo
)
406 struct radeon_device
*rdev
= bo
->rdev
;
407 struct radeon_surface_reg
*reg
;
408 struct radeon_bo
*old_object
;
412 BUG_ON(!atomic_read(&bo
->tbo
.reserved
));
414 if (!bo
->tiling_flags
)
417 if (bo
->surface_reg
>= 0) {
418 reg
= &rdev
->surface_regs
[bo
->surface_reg
];
424 for (i
= 0; i
< RADEON_GEM_MAX_SURFACES
; i
++) {
426 reg
= &rdev
->surface_regs
[i
];
430 old_object
= reg
->bo
;
431 if (old_object
->pin_count
== 0)
435 /* if we are all out */
436 if (i
== RADEON_GEM_MAX_SURFACES
) {
439 /* find someone with a surface reg and nuke their BO */
440 reg
= &rdev
->surface_regs
[steal
];
441 old_object
= reg
->bo
;
442 /* blow away the mapping */
443 DRM_DEBUG("stealing surface reg %d from %p\n", steal
, old_object
);
444 ttm_bo_unmap_virtual(&old_object
->tbo
);
445 old_object
->surface_reg
= -1;
453 radeon_set_surface_reg(rdev
, i
, bo
->tiling_flags
, bo
->pitch
,
454 bo
->tbo
.mem
.start
<< PAGE_SHIFT
,
455 bo
->tbo
.num_pages
<< PAGE_SHIFT
);
459 static void radeon_bo_clear_surface_reg(struct radeon_bo
*bo
)
461 struct radeon_device
*rdev
= bo
->rdev
;
462 struct radeon_surface_reg
*reg
;
464 if (bo
->surface_reg
== -1)
467 reg
= &rdev
->surface_regs
[bo
->surface_reg
];
468 radeon_clear_surface_reg(rdev
, bo
->surface_reg
);
471 bo
->surface_reg
= -1;
474 int radeon_bo_set_tiling_flags(struct radeon_bo
*bo
,
475 uint32_t tiling_flags
, uint32_t pitch
)
477 struct radeon_device
*rdev
= bo
->rdev
;
480 if (rdev
->family
>= CHIP_CEDAR
) {
481 unsigned bankw
, bankh
, mtaspect
, tilesplit
, stilesplit
;
483 bankw
= (tiling_flags
>> RADEON_TILING_EG_BANKW_SHIFT
) & RADEON_TILING_EG_BANKW_MASK
;
484 bankh
= (tiling_flags
>> RADEON_TILING_EG_BANKH_SHIFT
) & RADEON_TILING_EG_BANKH_MASK
;
485 mtaspect
= (tiling_flags
>> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT
) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK
;
486 tilesplit
= (tiling_flags
>> RADEON_TILING_EG_TILE_SPLIT_SHIFT
) & RADEON_TILING_EG_TILE_SPLIT_MASK
;
487 stilesplit
= (tiling_flags
>> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT
) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK
;
521 if (stilesplit
> 6) {
525 r
= radeon_bo_reserve(bo
, false);
526 if (unlikely(r
!= 0))
528 bo
->tiling_flags
= tiling_flags
;
530 radeon_bo_unreserve(bo
);
534 void radeon_bo_get_tiling_flags(struct radeon_bo
*bo
,
535 uint32_t *tiling_flags
,
538 BUG_ON(!atomic_read(&bo
->tbo
.reserved
));
540 *tiling_flags
= bo
->tiling_flags
;
545 int radeon_bo_check_tiling(struct radeon_bo
*bo
, bool has_moved
,
548 BUG_ON(!atomic_read(&bo
->tbo
.reserved
));
550 if (!(bo
->tiling_flags
& RADEON_TILING_SURFACE
))
554 radeon_bo_clear_surface_reg(bo
);
558 if (bo
->tbo
.mem
.mem_type
!= TTM_PL_VRAM
) {
562 if (bo
->surface_reg
>= 0)
563 radeon_bo_clear_surface_reg(bo
);
567 if ((bo
->surface_reg
>= 0) && !has_moved
)
570 return radeon_bo_get_surface_reg(bo
);
573 void radeon_bo_move_notify(struct ttm_buffer_object
*bo
,
574 struct ttm_mem_reg
*mem
)
576 struct radeon_bo
*rbo
;
577 if (!radeon_ttm_bo_is_radeon_bo(bo
))
579 rbo
= container_of(bo
, struct radeon_bo
, tbo
);
580 radeon_bo_check_tiling(rbo
, 0, 1);
581 radeon_vm_bo_invalidate(rbo
->rdev
, rbo
);
584 int radeon_bo_fault_reserve_notify(struct ttm_buffer_object
*bo
)
586 struct radeon_device
*rdev
;
587 struct radeon_bo
*rbo
;
588 unsigned long offset
, size
;
591 if (!radeon_ttm_bo_is_radeon_bo(bo
))
593 rbo
= container_of(bo
, struct radeon_bo
, tbo
);
594 radeon_bo_check_tiling(rbo
, 0, 0);
596 if (bo
->mem
.mem_type
== TTM_PL_VRAM
) {
597 size
= bo
->mem
.num_pages
<< PAGE_SHIFT
;
598 offset
= bo
->mem
.start
<< PAGE_SHIFT
;
599 if ((offset
+ size
) > rdev
->mc
.visible_vram_size
) {
600 /* hurrah the memory is not visible ! */
601 radeon_ttm_placement_from_domain(rbo
, RADEON_GEM_DOMAIN_VRAM
);
602 rbo
->placement
.lpfn
= rdev
->mc
.visible_vram_size
>> PAGE_SHIFT
;
603 r
= ttm_bo_validate(bo
, &rbo
->placement
, false, true, false);
604 if (unlikely(r
!= 0))
606 offset
= bo
->mem
.start
<< PAGE_SHIFT
;
607 /* this should not happen */
608 if ((offset
+ size
) > rdev
->mc
.visible_vram_size
)
615 int radeon_bo_wait(struct radeon_bo
*bo
, u32
*mem_type
, bool no_wait
)
619 r
= ttm_bo_reserve(&bo
->tbo
, true, no_wait
, false, 0);
620 if (unlikely(r
!= 0))
622 spin_lock(&bo
->tbo
.bdev
->fence_lock
);
624 *mem_type
= bo
->tbo
.mem
.mem_type
;
625 if (bo
->tbo
.sync_obj
)
626 r
= ttm_bo_wait(&bo
->tbo
, true, true, no_wait
);
627 spin_unlock(&bo
->tbo
.bdev
->fence_lock
);
628 ttm_bo_unreserve(&bo
->tbo
);
634 * radeon_bo_reserve - reserve bo
636 * @no_wait: don't sleep while trying to reserve (return -EBUSY)
639 * -EBUSY: buffer is busy and @no_wait is true
640 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
641 * a signal. Release all buffer reservations and return to user-space.
643 int radeon_bo_reserve(struct radeon_bo
*bo
, bool no_wait
)
647 r
= ttm_bo_reserve(&bo
->tbo
, true, no_wait
, false, 0);
648 if (unlikely(r
!= 0)) {
649 if (r
!= -ERESTARTSYS
)
650 dev_err(bo
->rdev
->dev
, "%p reserve failed\n", bo
);
656 /* object have to be reserved */
657 struct radeon_bo_va
*radeon_bo_va(struct radeon_bo
*rbo
, struct radeon_vm
*vm
)
659 struct radeon_bo_va
*bo_va
;
661 list_for_each_entry(bo_va
, &rbo
->va
, bo_list
) {
662 if (bo_va
->vm
== vm
) {