2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
32 #include <linux/list.h>
33 #include <linux/slab.h>
35 #include "radeon_drm.h"
37 #include "radeon_trace.h"
40 int radeon_ttm_init(struct radeon_device
*rdev
);
41 void radeon_ttm_fini(struct radeon_device
*rdev
);
42 static void radeon_bo_clear_surface_reg(struct radeon_bo
*bo
);
45 * To exclude mutual BO access we rely on bo_reserve exclusion, as all
46 * function are calling it.
49 void radeon_bo_clear_va(struct radeon_bo
*bo
)
51 struct radeon_bo_va
*bo_va
, *tmp
;
53 list_for_each_entry_safe(bo_va
, tmp
, &bo
->va
, bo_list
) {
54 /* remove from all vm address space */
55 mutex_lock(&bo_va
->vm
->mutex
);
56 list_del(&bo_va
->vm_list
);
57 mutex_unlock(&bo_va
->vm
->mutex
);
58 list_del(&bo_va
->bo_list
);
63 static void radeon_ttm_bo_destroy(struct ttm_buffer_object
*tbo
)
67 bo
= container_of(tbo
, struct radeon_bo
, tbo
);
68 mutex_lock(&bo
->rdev
->gem
.mutex
);
69 list_del_init(&bo
->list
);
70 mutex_unlock(&bo
->rdev
->gem
.mutex
);
71 radeon_bo_clear_surface_reg(bo
);
72 radeon_bo_clear_va(bo
);
73 drm_gem_object_release(&bo
->gem_base
);
77 bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object
*bo
)
79 if (bo
->destroy
== &radeon_ttm_bo_destroy
)
84 void radeon_ttm_placement_from_domain(struct radeon_bo
*rbo
, u32 domain
)
88 rbo
->placement
.fpfn
= 0;
89 rbo
->placement
.lpfn
= 0;
90 rbo
->placement
.placement
= rbo
->placements
;
91 rbo
->placement
.busy_placement
= rbo
->placements
;
92 if (domain
& RADEON_GEM_DOMAIN_VRAM
)
93 rbo
->placements
[c
++] = TTM_PL_FLAG_WC
| TTM_PL_FLAG_UNCACHED
|
95 if (domain
& RADEON_GEM_DOMAIN_GTT
)
96 rbo
->placements
[c
++] = TTM_PL_MASK_CACHING
| TTM_PL_FLAG_TT
;
97 if (domain
& RADEON_GEM_DOMAIN_CPU
)
98 rbo
->placements
[c
++] = TTM_PL_MASK_CACHING
| TTM_PL_FLAG_SYSTEM
;
100 rbo
->placements
[c
++] = TTM_PL_MASK_CACHING
| TTM_PL_FLAG_SYSTEM
;
101 rbo
->placement
.num_placement
= c
;
102 rbo
->placement
.num_busy_placement
= c
;
105 int radeon_bo_create(struct radeon_device
*rdev
,
106 unsigned long size
, int byte_align
, bool kernel
, u32 domain
,
107 struct radeon_bo
**bo_ptr
)
109 struct radeon_bo
*bo
;
110 enum ttm_bo_type type
;
111 unsigned long page_align
= roundup(byte_align
, PAGE_SIZE
) >> PAGE_SHIFT
;
112 unsigned long max_size
= 0;
116 size
= ALIGN(size
, PAGE_SIZE
);
118 if (unlikely(rdev
->mman
.bdev
.dev_mapping
== NULL
)) {
119 rdev
->mman
.bdev
.dev_mapping
= rdev
->ddev
->dev_mapping
;
122 type
= ttm_bo_type_kernel
;
124 type
= ttm_bo_type_device
;
128 /* maximun bo size is the minimun btw visible vram and gtt size */
129 max_size
= min(rdev
->mc
.visible_vram_size
, rdev
->mc
.gtt_size
);
130 if ((page_align
<< PAGE_SHIFT
) >= max_size
) {
131 printk(KERN_WARNING
"%s:%d alloc size %ldM bigger than %ldMb limit\n",
132 __func__
, __LINE__
, page_align
>> (20 - PAGE_SHIFT
), max_size
>> 20);
136 acc_size
= ttm_bo_dma_acc_size(&rdev
->mman
.bdev
, size
,
137 sizeof(struct radeon_bo
));
140 bo
= kzalloc(sizeof(struct radeon_bo
), GFP_KERNEL
);
143 r
= drm_gem_object_init(rdev
->ddev
, &bo
->gem_base
, size
);
149 bo
->gem_base
.driver_private
= NULL
;
150 bo
->surface_reg
= -1;
151 INIT_LIST_HEAD(&bo
->list
);
152 INIT_LIST_HEAD(&bo
->va
);
153 radeon_ttm_placement_from_domain(bo
, domain
);
154 /* Kernel allocation are uninterruptible */
155 mutex_lock(&rdev
->vram_mutex
);
156 r
= ttm_bo_init(&rdev
->mman
.bdev
, &bo
->tbo
, size
, type
,
157 &bo
->placement
, page_align
, 0, !kernel
, NULL
,
158 acc_size
, &radeon_ttm_bo_destroy
);
159 mutex_unlock(&rdev
->vram_mutex
);
160 if (unlikely(r
!= 0)) {
161 if (r
!= -ERESTARTSYS
) {
162 if (domain
== RADEON_GEM_DOMAIN_VRAM
) {
163 domain
|= RADEON_GEM_DOMAIN_GTT
;
167 "object_init failed for (%lu, 0x%08X)\n",
174 trace_radeon_bo_create(bo
);
179 int radeon_bo_kmap(struct radeon_bo
*bo
, void **ptr
)
190 r
= ttm_bo_kmap(&bo
->tbo
, 0, bo
->tbo
.num_pages
, &bo
->kmap
);
194 bo
->kptr
= ttm_kmap_obj_virtual(&bo
->kmap
, &is_iomem
);
198 radeon_bo_check_tiling(bo
, 0, 0);
202 void radeon_bo_kunmap(struct radeon_bo
*bo
)
204 if (bo
->kptr
== NULL
)
207 radeon_bo_check_tiling(bo
, 0, 0);
208 ttm_bo_kunmap(&bo
->kmap
);
211 void radeon_bo_unref(struct radeon_bo
**bo
)
213 struct ttm_buffer_object
*tbo
;
214 struct radeon_device
*rdev
;
220 mutex_lock(&rdev
->vram_mutex
);
222 mutex_unlock(&rdev
->vram_mutex
);
227 int radeon_bo_pin_restricted(struct radeon_bo
*bo
, u32 domain
, u64 max_offset
,
235 *gpu_addr
= radeon_bo_gpu_offset(bo
);
236 WARN_ON_ONCE(max_offset
!= 0);
239 radeon_ttm_placement_from_domain(bo
, domain
);
240 if (domain
== RADEON_GEM_DOMAIN_VRAM
) {
241 /* force to pin into visible video ram */
242 bo
->placement
.lpfn
= bo
->rdev
->mc
.visible_vram_size
>> PAGE_SHIFT
;
245 u64 lpfn
= max_offset
>> PAGE_SHIFT
;
247 if (!bo
->placement
.lpfn
)
248 bo
->placement
.lpfn
= bo
->rdev
->mc
.gtt_size
>> PAGE_SHIFT
;
250 if (lpfn
< bo
->placement
.lpfn
)
251 bo
->placement
.lpfn
= lpfn
;
253 for (i
= 0; i
< bo
->placement
.num_placement
; i
++)
254 bo
->placements
[i
] |= TTM_PL_FLAG_NO_EVICT
;
255 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, false, false, false);
256 if (likely(r
== 0)) {
258 if (gpu_addr
!= NULL
)
259 *gpu_addr
= radeon_bo_gpu_offset(bo
);
261 if (unlikely(r
!= 0))
262 dev_err(bo
->rdev
->dev
, "%p pin failed\n", bo
);
266 int radeon_bo_pin(struct radeon_bo
*bo
, u32 domain
, u64
*gpu_addr
)
268 return radeon_bo_pin_restricted(bo
, domain
, 0, gpu_addr
);
271 int radeon_bo_unpin(struct radeon_bo
*bo
)
275 if (!bo
->pin_count
) {
276 dev_warn(bo
->rdev
->dev
, "%p unpin not necessary\n", bo
);
282 for (i
= 0; i
< bo
->placement
.num_placement
; i
++)
283 bo
->placements
[i
] &= ~TTM_PL_FLAG_NO_EVICT
;
284 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, false, false, false);
285 if (unlikely(r
!= 0))
286 dev_err(bo
->rdev
->dev
, "%p validate failed for unpin\n", bo
);
290 int radeon_bo_evict_vram(struct radeon_device
*rdev
)
292 /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
293 if (0 && (rdev
->flags
& RADEON_IS_IGP
)) {
294 if (rdev
->mc
.igp_sideport_enabled
== false)
295 /* Useless to evict on IGP chips */
298 return ttm_bo_evict_mm(&rdev
->mman
.bdev
, TTM_PL_VRAM
);
301 void radeon_bo_force_delete(struct radeon_device
*rdev
)
303 struct radeon_bo
*bo
, *n
;
305 if (list_empty(&rdev
->gem
.objects
)) {
308 dev_err(rdev
->dev
, "Userspace still has active objects !\n");
309 list_for_each_entry_safe(bo
, n
, &rdev
->gem
.objects
, list
) {
310 mutex_lock(&rdev
->ddev
->struct_mutex
);
311 dev_err(rdev
->dev
, "%p %p %lu %lu force free\n",
312 &bo
->gem_base
, bo
, (unsigned long)bo
->gem_base
.size
,
313 *((unsigned long *)&bo
->gem_base
.refcount
));
314 mutex_lock(&bo
->rdev
->gem
.mutex
);
315 list_del_init(&bo
->list
);
316 mutex_unlock(&bo
->rdev
->gem
.mutex
);
317 /* this should unref the ttm bo */
318 drm_gem_object_unreference(&bo
->gem_base
);
319 mutex_unlock(&rdev
->ddev
->struct_mutex
);
323 int radeon_bo_init(struct radeon_device
*rdev
)
325 /* Add an MTRR for the VRAM */
326 rdev
->mc
.vram_mtrr
= mtrr_add(rdev
->mc
.aper_base
, rdev
->mc
.aper_size
,
327 MTRR_TYPE_WRCOMB
, 1);
328 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
329 rdev
->mc
.mc_vram_size
>> 20,
330 (unsigned long long)rdev
->mc
.aper_size
>> 20);
331 DRM_INFO("RAM width %dbits %cDR\n",
332 rdev
->mc
.vram_width
, rdev
->mc
.vram_is_ddr
? 'D' : 'S');
333 return radeon_ttm_init(rdev
);
336 void radeon_bo_fini(struct radeon_device
*rdev
)
338 radeon_ttm_fini(rdev
);
341 void radeon_bo_list_add_object(struct radeon_bo_list
*lobj
,
342 struct list_head
*head
)
345 list_add(&lobj
->tv
.head
, head
);
347 list_add_tail(&lobj
->tv
.head
, head
);
351 int radeon_bo_list_validate(struct list_head
*head
)
353 struct radeon_bo_list
*lobj
;
354 struct radeon_bo
*bo
;
358 r
= ttm_eu_reserve_buffers(head
);
359 if (unlikely(r
!= 0)) {
362 list_for_each_entry(lobj
, head
, tv
.head
) {
364 if (!bo
->pin_count
) {
365 domain
= lobj
->wdomain
? lobj
->wdomain
: lobj
->rdomain
;
368 radeon_ttm_placement_from_domain(bo
, domain
);
369 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
,
372 if (r
!= -ERESTARTSYS
&& domain
== RADEON_GEM_DOMAIN_VRAM
) {
373 domain
|= RADEON_GEM_DOMAIN_GTT
;
379 lobj
->gpu_offset
= radeon_bo_gpu_offset(bo
);
380 lobj
->tiling_flags
= bo
->tiling_flags
;
385 int radeon_bo_fbdev_mmap(struct radeon_bo
*bo
,
386 struct vm_area_struct
*vma
)
388 return ttm_fbdev_mmap(vma
, &bo
->tbo
);
391 int radeon_bo_get_surface_reg(struct radeon_bo
*bo
)
393 struct radeon_device
*rdev
= bo
->rdev
;
394 struct radeon_surface_reg
*reg
;
395 struct radeon_bo
*old_object
;
399 BUG_ON(!atomic_read(&bo
->tbo
.reserved
));
401 if (!bo
->tiling_flags
)
404 if (bo
->surface_reg
>= 0) {
405 reg
= &rdev
->surface_regs
[bo
->surface_reg
];
411 for (i
= 0; i
< RADEON_GEM_MAX_SURFACES
; i
++) {
413 reg
= &rdev
->surface_regs
[i
];
417 old_object
= reg
->bo
;
418 if (old_object
->pin_count
== 0)
422 /* if we are all out */
423 if (i
== RADEON_GEM_MAX_SURFACES
) {
426 /* find someone with a surface reg and nuke their BO */
427 reg
= &rdev
->surface_regs
[steal
];
428 old_object
= reg
->bo
;
429 /* blow away the mapping */
430 DRM_DEBUG("stealing surface reg %d from %p\n", steal
, old_object
);
431 ttm_bo_unmap_virtual(&old_object
->tbo
);
432 old_object
->surface_reg
= -1;
440 radeon_set_surface_reg(rdev
, i
, bo
->tiling_flags
, bo
->pitch
,
441 bo
->tbo
.mem
.start
<< PAGE_SHIFT
,
442 bo
->tbo
.num_pages
<< PAGE_SHIFT
);
446 static void radeon_bo_clear_surface_reg(struct radeon_bo
*bo
)
448 struct radeon_device
*rdev
= bo
->rdev
;
449 struct radeon_surface_reg
*reg
;
451 if (bo
->surface_reg
== -1)
454 reg
= &rdev
->surface_regs
[bo
->surface_reg
];
455 radeon_clear_surface_reg(rdev
, bo
->surface_reg
);
458 bo
->surface_reg
= -1;
461 int radeon_bo_set_tiling_flags(struct radeon_bo
*bo
,
462 uint32_t tiling_flags
, uint32_t pitch
)
466 r
= radeon_bo_reserve(bo
, false);
467 if (unlikely(r
!= 0))
469 bo
->tiling_flags
= tiling_flags
;
471 radeon_bo_unreserve(bo
);
475 void radeon_bo_get_tiling_flags(struct radeon_bo
*bo
,
476 uint32_t *tiling_flags
,
479 BUG_ON(!atomic_read(&bo
->tbo
.reserved
));
481 *tiling_flags
= bo
->tiling_flags
;
486 int radeon_bo_check_tiling(struct radeon_bo
*bo
, bool has_moved
,
489 BUG_ON(!atomic_read(&bo
->tbo
.reserved
));
491 if (!(bo
->tiling_flags
& RADEON_TILING_SURFACE
))
495 radeon_bo_clear_surface_reg(bo
);
499 if (bo
->tbo
.mem
.mem_type
!= TTM_PL_VRAM
) {
503 if (bo
->surface_reg
>= 0)
504 radeon_bo_clear_surface_reg(bo
);
508 if ((bo
->surface_reg
>= 0) && !has_moved
)
511 return radeon_bo_get_surface_reg(bo
);
514 void radeon_bo_move_notify(struct ttm_buffer_object
*bo
,
515 struct ttm_mem_reg
*mem
)
517 struct radeon_bo
*rbo
;
518 if (!radeon_ttm_bo_is_radeon_bo(bo
))
520 rbo
= container_of(bo
, struct radeon_bo
, tbo
);
521 radeon_bo_check_tiling(rbo
, 0, 1);
522 radeon_vm_bo_invalidate(rbo
->rdev
, rbo
);
525 int radeon_bo_fault_reserve_notify(struct ttm_buffer_object
*bo
)
527 struct radeon_device
*rdev
;
528 struct radeon_bo
*rbo
;
529 unsigned long offset
, size
;
532 if (!radeon_ttm_bo_is_radeon_bo(bo
))
534 rbo
= container_of(bo
, struct radeon_bo
, tbo
);
535 radeon_bo_check_tiling(rbo
, 0, 0);
537 if (bo
->mem
.mem_type
== TTM_PL_VRAM
) {
538 size
= bo
->mem
.num_pages
<< PAGE_SHIFT
;
539 offset
= bo
->mem
.start
<< PAGE_SHIFT
;
540 if ((offset
+ size
) > rdev
->mc
.visible_vram_size
) {
541 /* hurrah the memory is not visible ! */
542 radeon_ttm_placement_from_domain(rbo
, RADEON_GEM_DOMAIN_VRAM
);
543 rbo
->placement
.lpfn
= rdev
->mc
.visible_vram_size
>> PAGE_SHIFT
;
544 r
= ttm_bo_validate(bo
, &rbo
->placement
, false, true, false);
545 if (unlikely(r
!= 0))
547 offset
= bo
->mem
.start
<< PAGE_SHIFT
;
548 /* this should not happen */
549 if ((offset
+ size
) > rdev
->mc
.visible_vram_size
)
556 int radeon_bo_wait(struct radeon_bo
*bo
, u32
*mem_type
, bool no_wait
)
560 r
= ttm_bo_reserve(&bo
->tbo
, true, no_wait
, false, 0);
561 if (unlikely(r
!= 0))
563 spin_lock(&bo
->tbo
.bdev
->fence_lock
);
565 *mem_type
= bo
->tbo
.mem
.mem_type
;
566 if (bo
->tbo
.sync_obj
)
567 r
= ttm_bo_wait(&bo
->tbo
, true, true, no_wait
);
568 spin_unlock(&bo
->tbo
.bdev
->fence_lock
);
569 ttm_bo_unreserve(&bo
->tbo
);
575 * radeon_bo_reserve - reserve bo
577 * @no_wait: don't sleep while trying to reserve (return -EBUSY)
580 * -EBUSY: buffer is busy and @no_wait is true
581 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
582 * a signal. Release all buffer reservations and return to user-space.
584 int radeon_bo_reserve(struct radeon_bo
*bo
, bool no_wait
)
588 r
= ttm_bo_reserve(&bo
->tbo
, true, no_wait
, false, 0);
589 if (unlikely(r
!= 0)) {
590 if (r
!= -ERESTARTSYS
)
591 dev_err(bo
->rdev
->dev
, "%p reserve failed\n", bo
);
597 /* object have to be reserved */
598 struct radeon_bo_va
*radeon_bo_va(struct radeon_bo
*rbo
, struct radeon_vm
*vm
)
600 struct radeon_bo_va
*bo_va
;
602 list_for_each_entry(bo_va
, &rbo
->va
, bo_list
) {
603 if (bo_va
->vm
== vm
) {