2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
32 #include <linux/list.h>
34 #include "radeon_drm.h"
37 struct radeon_object
{
38 struct ttm_buffer_object tobj
;
39 struct list_head list
;
40 struct radeon_device
*rdev
;
41 struct drm_gem_object
*gobj
;
42 struct ttm_bo_kmap_obj kmap
;
49 int radeon_ttm_init(struct radeon_device
*rdev
);
50 void radeon_ttm_fini(struct radeon_device
*rdev
);
53 * To exclude mutual BO access we rely on bo_reserve exclusion, as all
54 * function are calling it.
57 static int radeon_object_reserve(struct radeon_object
*robj
, bool interruptible
)
59 return ttm_bo_reserve(&robj
->tobj
, interruptible
, false, false, 0);
62 static void radeon_object_unreserve(struct radeon_object
*robj
)
64 ttm_bo_unreserve(&robj
->tobj
);
67 static void radeon_ttm_object_object_destroy(struct ttm_buffer_object
*tobj
)
69 struct radeon_object
*robj
;
71 robj
= container_of(tobj
, struct radeon_object
, tobj
);
72 list_del_init(&robj
->list
);
76 static inline void radeon_object_gpu_addr(struct radeon_object
*robj
)
78 /* Default gpu address */
79 robj
->gpu_addr
= 0xFFFFFFFFFFFFFFFFULL
;
80 if (robj
->tobj
.mem
.mm_node
== NULL
) {
83 robj
->gpu_addr
= ((u64
)robj
->tobj
.mem
.mm_node
->start
) << PAGE_SHIFT
;
84 switch (robj
->tobj
.mem
.mem_type
) {
86 robj
->gpu_addr
+= (u64
)robj
->rdev
->mc
.vram_location
;
89 robj
->gpu_addr
+= (u64
)robj
->rdev
->mc
.gtt_location
;
92 DRM_ERROR("Unknown placement %d\n", robj
->tobj
.mem
.mem_type
);
93 robj
->gpu_addr
= 0xFFFFFFFFFFFFFFFFULL
;
98 static inline uint32_t radeon_object_flags_from_domain(uint32_t domain
)
101 if (domain
& RADEON_GEM_DOMAIN_VRAM
) {
102 flags
|= TTM_PL_FLAG_VRAM
;
104 if (domain
& RADEON_GEM_DOMAIN_GTT
) {
105 flags
|= TTM_PL_FLAG_TT
;
107 if (domain
& RADEON_GEM_DOMAIN_CPU
) {
108 flags
|= TTM_PL_FLAG_SYSTEM
;
111 flags
|= TTM_PL_FLAG_SYSTEM
;
116 int radeon_object_create(struct radeon_device
*rdev
,
117 struct drm_gem_object
*gobj
,
122 struct radeon_object
**robj_ptr
)
124 struct radeon_object
*robj
;
125 enum ttm_bo_type type
;
129 if (unlikely(rdev
->mman
.bdev
.dev_mapping
== NULL
)) {
130 rdev
->mman
.bdev
.dev_mapping
= rdev
->ddev
->dev_mapping
;
133 type
= ttm_bo_type_kernel
;
135 type
= ttm_bo_type_device
;
138 robj
= kzalloc(sizeof(struct radeon_object
), GFP_KERNEL
);
144 INIT_LIST_HEAD(&robj
->list
);
146 flags
= radeon_object_flags_from_domain(domain
);
147 r
= ttm_buffer_object_init(&rdev
->mman
.bdev
, &robj
->tobj
, size
, type
, flags
,
148 0, 0, false, NULL
, size
,
149 &radeon_ttm_object_object_destroy
);
150 if (unlikely(r
!= 0)) {
151 /* ttm call radeon_ttm_object_object_destroy if error happen */
152 DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n",
158 list_add_tail(&robj
->list
, &rdev
->gem
.objects
);
163 int radeon_object_kmap(struct radeon_object
*robj
, void **ptr
)
167 spin_lock(&robj
->tobj
.lock
);
172 spin_unlock(&robj
->tobj
.lock
);
175 spin_unlock(&robj
->tobj
.lock
);
176 r
= ttm_bo_kmap(&robj
->tobj
, 0, robj
->tobj
.num_pages
, &robj
->kmap
);
180 spin_lock(&robj
->tobj
.lock
);
181 robj
->kptr
= ttm_kmap_obj_virtual(&robj
->kmap
, &robj
->is_iomem
);
182 spin_unlock(&robj
->tobj
.lock
);
189 void radeon_object_kunmap(struct radeon_object
*robj
)
191 spin_lock(&robj
->tobj
.lock
);
192 if (robj
->kptr
== NULL
) {
193 spin_unlock(&robj
->tobj
.lock
);
197 spin_unlock(&robj
->tobj
.lock
);
198 ttm_bo_kunmap(&robj
->kmap
);
201 void radeon_object_unref(struct radeon_object
**robj
)
203 struct ttm_buffer_object
*tobj
;
205 if ((*robj
) == NULL
) {
208 tobj
= &((*robj
)->tobj
);
215 int radeon_object_mmap(struct radeon_object
*robj
, uint64_t *offset
)
217 *offset
= robj
->tobj
.addr_space_offset
;
221 int radeon_object_pin(struct radeon_object
*robj
, uint32_t domain
,
229 flags
= radeon_object_flags_from_domain(domain
);
230 spin_lock(&robj
->tobj
.lock
);
231 if (robj
->pin_count
) {
233 if (gpu_addr
!= NULL
) {
234 *gpu_addr
= robj
->gpu_addr
;
236 spin_unlock(&robj
->tobj
.lock
);
239 spin_unlock(&robj
->tobj
.lock
);
240 r
= radeon_object_reserve(robj
, false);
241 if (unlikely(r
!= 0)) {
242 DRM_ERROR("radeon: failed to reserve object for pinning it.\n");
245 if (robj
->rdev
->fbdev_robj
== robj
) {
246 mutex_lock(&robj
->rdev
->fbdev_info
->lock
);
247 radeon_object_kunmap(robj
);
249 tmp
= robj
->tobj
.mem
.placement
;
250 ttm_flag_masked(&tmp
, flags
, TTM_PL_MASK_MEM
);
251 robj
->tobj
.proposed_placement
= tmp
| TTM_PL_FLAG_NO_EVICT
| TTM_PL_MASK_CACHING
;
252 r
= ttm_buffer_object_validate(&robj
->tobj
,
253 robj
->tobj
.proposed_placement
,
255 radeon_object_gpu_addr(robj
);
256 if (gpu_addr
!= NULL
) {
257 *gpu_addr
= robj
->gpu_addr
;
260 if (unlikely(r
!= 0)) {
261 DRM_ERROR("radeon: failed to pin object.\n");
263 radeon_object_unreserve(robj
);
264 if (robj
->rdev
->fbdev_robj
== robj
) {
266 r
= radeon_object_kmap(robj
, &fbptr
);
269 robj
->rdev
->fbdev_info
->screen_base
= fbptr
;
270 robj
->rdev
->fbdev_info
->fix
.smem_start
= (unsigned long)fbptr
;
272 mutex_unlock(&robj
->rdev
->fbdev_info
->lock
);
277 void radeon_object_unpin(struct radeon_object
*robj
)
283 spin_lock(&robj
->tobj
.lock
);
284 if (!robj
->pin_count
) {
285 spin_unlock(&robj
->tobj
.lock
);
286 printk(KERN_WARNING
"Unpin not necessary for %p !\n", robj
);
290 if (robj
->pin_count
) {
291 spin_unlock(&robj
->tobj
.lock
);
294 spin_unlock(&robj
->tobj
.lock
);
295 r
= radeon_object_reserve(robj
, false);
296 if (unlikely(r
!= 0)) {
297 DRM_ERROR("radeon: failed to reserve object for unpinning it.\n");
300 if (robj
->rdev
->fbdev_robj
== robj
) {
301 mutex_lock(&robj
->rdev
->fbdev_info
->lock
);
302 radeon_object_kunmap(robj
);
304 flags
= robj
->tobj
.mem
.placement
;
305 robj
->tobj
.proposed_placement
= flags
& ~TTM_PL_FLAG_NO_EVICT
;
306 r
= ttm_buffer_object_validate(&robj
->tobj
,
307 robj
->tobj
.proposed_placement
,
309 if (unlikely(r
!= 0)) {
310 DRM_ERROR("radeon: failed to unpin buffer.\n");
312 radeon_object_unreserve(robj
);
313 if (robj
->rdev
->fbdev_robj
== robj
) {
315 r
= radeon_object_kmap(robj
, &fbptr
);
318 robj
->rdev
->fbdev_info
->screen_base
= fbptr
;
319 robj
->rdev
->fbdev_info
->fix
.smem_start
= (unsigned long)fbptr
;
321 mutex_unlock(&robj
->rdev
->fbdev_info
->lock
);
325 int radeon_object_wait(struct radeon_object
*robj
)
329 /* FIXME: should use block reservation instead */
330 r
= radeon_object_reserve(robj
, true);
331 if (unlikely(r
!= 0)) {
332 DRM_ERROR("radeon: failed to reserve object for waiting.\n");
335 spin_lock(&robj
->tobj
.lock
);
336 if (robj
->tobj
.sync_obj
) {
337 r
= ttm_bo_wait(&robj
->tobj
, true, false, false);
339 spin_unlock(&robj
->tobj
.lock
);
340 radeon_object_unreserve(robj
);
344 int radeon_object_evict_vram(struct radeon_device
*rdev
)
346 if (rdev
->flags
& RADEON_IS_IGP
) {
347 /* Useless to evict on IGP chips */
350 return ttm_bo_evict_mm(&rdev
->mman
.bdev
, TTM_PL_VRAM
);
353 void radeon_object_force_delete(struct radeon_device
*rdev
)
355 struct radeon_object
*robj
, *n
;
356 struct drm_gem_object
*gobj
;
358 if (list_empty(&rdev
->gem
.objects
)) {
361 DRM_ERROR("Userspace still has active objects !\n");
362 list_for_each_entry_safe(robj
, n
, &rdev
->gem
.objects
, list
) {
363 mutex_lock(&rdev
->ddev
->struct_mutex
);
365 DRM_ERROR("Force free for (%p,%p,%lu,%lu)\n",
366 gobj
, robj
, (unsigned long)gobj
->size
,
367 *((unsigned long *)&gobj
->refcount
));
368 list_del_init(&robj
->list
);
369 radeon_object_unref(&robj
);
370 gobj
->driver_private
= NULL
;
371 drm_gem_object_unreference(gobj
);
372 mutex_unlock(&rdev
->ddev
->struct_mutex
);
376 int radeon_object_init(struct radeon_device
*rdev
)
378 return radeon_ttm_init(rdev
);
381 void radeon_object_fini(struct radeon_device
*rdev
)
383 radeon_ttm_fini(rdev
);
386 void radeon_object_list_add_object(struct radeon_object_list
*lobj
,
387 struct list_head
*head
)
390 list_add(&lobj
->list
, head
);
392 list_add_tail(&lobj
->list
, head
);
396 int radeon_object_list_reserve(struct list_head
*head
)
398 struct radeon_object_list
*lobj
;
402 list_for_each(i
, head
) {
403 lobj
= list_entry(i
, struct radeon_object_list
, list
);
404 if (!lobj
->robj
->pin_count
) {
405 r
= radeon_object_reserve(lobj
->robj
, true);
406 if (unlikely(r
!= 0)) {
407 DRM_ERROR("radeon: failed to reserve object.\n");
416 void radeon_object_list_unreserve(struct list_head
*head
)
418 struct radeon_object_list
*lobj
;
421 list_for_each(i
, head
) {
422 lobj
= list_entry(i
, struct radeon_object_list
, list
);
423 if (!lobj
->robj
->pin_count
) {
424 radeon_object_unreserve(lobj
->robj
);
430 int radeon_object_list_validate(struct list_head
*head
, void *fence
)
432 struct radeon_object_list
*lobj
;
433 struct radeon_object
*robj
;
434 struct radeon_fence
*old_fence
= NULL
;
439 r
= radeon_object_list_reserve(head
);
440 if (unlikely(r
!= 0)) {
441 radeon_object_list_unreserve(head
);
444 list_for_each(i
, head
) {
445 lobj
= list_entry(i
, struct radeon_object_list
, list
);
448 flags
= radeon_object_flags_from_domain(lobj
->wdomain
);
449 flags
|= TTM_PL_FLAG_TT
;
451 flags
= radeon_object_flags_from_domain(lobj
->rdomain
);
452 flags
|= TTM_PL_FLAG_TT
;
453 flags
|= TTM_PL_FLAG_VRAM
;
455 if (!robj
->pin_count
) {
456 robj
->tobj
.proposed_placement
= flags
| TTM_PL_MASK_CACHING
;
457 r
= ttm_buffer_object_validate(&robj
->tobj
,
458 robj
->tobj
.proposed_placement
,
461 radeon_object_list_unreserve(head
);
462 DRM_ERROR("radeon: failed to validate.\n");
465 radeon_object_gpu_addr(robj
);
467 lobj
->gpu_offset
= robj
->gpu_addr
;
469 old_fence
= (struct radeon_fence
*)robj
->tobj
.sync_obj
;
470 robj
->tobj
.sync_obj
= radeon_fence_ref(fence
);
471 robj
->tobj
.sync_obj_arg
= NULL
;
474 radeon_fence_unref(&old_fence
);
480 void radeon_object_list_unvalidate(struct list_head
*head
)
482 struct radeon_object_list
*lobj
;
483 struct radeon_fence
*old_fence
= NULL
;
486 list_for_each(i
, head
) {
487 lobj
= list_entry(i
, struct radeon_object_list
, list
);
488 old_fence
= (struct radeon_fence
*)lobj
->robj
->tobj
.sync_obj
;
489 lobj
->robj
->tobj
.sync_obj
= NULL
;
491 radeon_fence_unref(&old_fence
);
494 radeon_object_list_unreserve(head
);
497 void radeon_object_list_clean(struct list_head
*head
)
499 radeon_object_list_unreserve(head
);
502 int radeon_object_fbdev_mmap(struct radeon_object
*robj
,
503 struct vm_area_struct
*vma
)
505 return ttm_fbdev_mmap(vma
, &robj
->tobj
);
508 unsigned long radeon_object_size(struct radeon_object
*robj
)
510 return robj
->tobj
.num_pages
<< PAGE_SHIFT
;