2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
29 #include <linux/pci.h>
31 #include <drm/drm_debugfs.h>
32 #include <drm/drm_device.h>
33 #include <drm/drm_file.h>
34 #include <drm/radeon_drm.h>
38 void radeon_gem_object_free(struct drm_gem_object
*gobj
)
40 struct radeon_bo
*robj
= gem_to_radeon_bo(gobj
);
43 radeon_mn_unregister(robj
);
44 radeon_bo_unref(&robj
);
48 int radeon_gem_object_create(struct radeon_device
*rdev
, unsigned long size
,
49 int alignment
, int initial_domain
,
50 u32 flags
, bool kernel
,
51 struct drm_gem_object
**obj
)
53 struct radeon_bo
*robj
;
54 unsigned long max_size
;
58 /* At least align on page size */
59 if (alignment
< PAGE_SIZE
) {
60 alignment
= PAGE_SIZE
;
63 /* Maximum bo size is the unpinned gtt size since we use the gtt to
64 * handle vram to system pool migrations.
66 max_size
= rdev
->mc
.gtt_size
- rdev
->gart_pin_size
;
67 if (size
> max_size
) {
68 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
69 size
>> 20, max_size
>> 20);
74 r
= radeon_bo_create(rdev
, size
, alignment
, kernel
, initial_domain
,
75 flags
, NULL
, NULL
, &robj
);
77 if (r
!= -ERESTARTSYS
) {
78 if (initial_domain
== RADEON_GEM_DOMAIN_VRAM
) {
79 initial_domain
|= RADEON_GEM_DOMAIN_GTT
;
82 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
83 size
, initial_domain
, alignment
, r
);
87 *obj
= &robj
->tbo
.base
;
88 robj
->pid
= task_pid_nr(current
);
90 mutex_lock(&rdev
->gem
.mutex
);
91 list_add_tail(&robj
->list
, &rdev
->gem
.objects
);
92 mutex_unlock(&rdev
->gem
.mutex
);
97 static int radeon_gem_set_domain(struct drm_gem_object
*gobj
,
98 uint32_t rdomain
, uint32_t wdomain
)
100 struct radeon_bo
*robj
;
104 /* FIXME: reeimplement */
105 robj
= gem_to_radeon_bo(gobj
);
106 /* work out where to validate the buffer to */
113 pr_warn("Set domain without domain !\n");
116 if (domain
== RADEON_GEM_DOMAIN_CPU
) {
117 /* Asking for cpu access wait for object idle */
118 r
= dma_resv_wait_timeout_rcu(robj
->tbo
.base
.resv
, true, true, 30 * HZ
);
122 if (r
< 0 && r
!= -EINTR
) {
123 pr_err("Failed to wait for object: %li\n", r
);
127 if (domain
== RADEON_GEM_DOMAIN_VRAM
&& robj
->prime_shared_count
) {
128 /* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */
134 int radeon_gem_init(struct radeon_device
*rdev
)
136 INIT_LIST_HEAD(&rdev
->gem
.objects
);
140 void radeon_gem_fini(struct radeon_device
*rdev
)
142 radeon_bo_force_delete(rdev
);
146 * Call from drm_gem_handle_create which appear in both new and open ioctl
149 int radeon_gem_object_open(struct drm_gem_object
*obj
, struct drm_file
*file_priv
)
151 struct radeon_bo
*rbo
= gem_to_radeon_bo(obj
);
152 struct radeon_device
*rdev
= rbo
->rdev
;
153 struct radeon_fpriv
*fpriv
= file_priv
->driver_priv
;
154 struct radeon_vm
*vm
= &fpriv
->vm
;
155 struct radeon_bo_va
*bo_va
;
158 if ((rdev
->family
< CHIP_CAYMAN
) ||
159 (!rdev
->accel_working
)) {
163 r
= radeon_bo_reserve(rbo
, false);
168 bo_va
= radeon_vm_bo_find(vm
, rbo
);
170 bo_va
= radeon_vm_bo_add(rdev
, vm
, rbo
);
174 radeon_bo_unreserve(rbo
);
179 void radeon_gem_object_close(struct drm_gem_object
*obj
,
180 struct drm_file
*file_priv
)
182 struct radeon_bo
*rbo
= gem_to_radeon_bo(obj
);
183 struct radeon_device
*rdev
= rbo
->rdev
;
184 struct radeon_fpriv
*fpriv
= file_priv
->driver_priv
;
185 struct radeon_vm
*vm
= &fpriv
->vm
;
186 struct radeon_bo_va
*bo_va
;
189 if ((rdev
->family
< CHIP_CAYMAN
) ||
190 (!rdev
->accel_working
)) {
194 r
= radeon_bo_reserve(rbo
, true);
196 dev_err(rdev
->dev
, "leaking bo va because "
197 "we fail to reserve bo (%d)\n", r
);
200 bo_va
= radeon_vm_bo_find(vm
, rbo
);
202 if (--bo_va
->ref_count
== 0) {
203 radeon_vm_bo_rmv(rdev
, bo_va
);
206 radeon_bo_unreserve(rbo
);
209 static int radeon_gem_handle_lockup(struct radeon_device
*rdev
, int r
)
212 r
= radeon_gpu_reset(rdev
);
222 int radeon_gem_info_ioctl(struct drm_device
*dev
, void *data
,
223 struct drm_file
*filp
)
225 struct radeon_device
*rdev
= dev
->dev_private
;
226 struct drm_radeon_gem_info
*args
= data
;
227 struct ttm_mem_type_manager
*man
;
229 man
= &rdev
->mman
.bdev
.man
[TTM_PL_VRAM
];
231 args
->vram_size
= (u64
)man
->size
<< PAGE_SHIFT
;
232 args
->vram_visible
= rdev
->mc
.visible_vram_size
;
233 args
->vram_visible
-= rdev
->vram_pin_size
;
234 args
->gart_size
= rdev
->mc
.gtt_size
;
235 args
->gart_size
-= rdev
->gart_pin_size
;
240 int radeon_gem_pread_ioctl(struct drm_device
*dev
, void *data
,
241 struct drm_file
*filp
)
243 /* TODO: implement */
244 DRM_ERROR("unimplemented %s\n", __func__
);
248 int radeon_gem_pwrite_ioctl(struct drm_device
*dev
, void *data
,
249 struct drm_file
*filp
)
251 /* TODO: implement */
252 DRM_ERROR("unimplemented %s\n", __func__
);
256 int radeon_gem_create_ioctl(struct drm_device
*dev
, void *data
,
257 struct drm_file
*filp
)
259 struct radeon_device
*rdev
= dev
->dev_private
;
260 struct drm_radeon_gem_create
*args
= data
;
261 struct drm_gem_object
*gobj
;
265 down_read(&rdev
->exclusive_lock
);
266 /* create a gem object to contain this object in */
267 args
->size
= roundup(args
->size
, PAGE_SIZE
);
268 r
= radeon_gem_object_create(rdev
, args
->size
, args
->alignment
,
269 args
->initial_domain
, args
->flags
,
272 up_read(&rdev
->exclusive_lock
);
273 r
= radeon_gem_handle_lockup(rdev
, r
);
276 r
= drm_gem_handle_create(filp
, gobj
, &handle
);
277 /* drop reference from allocate - handle holds it now */
278 drm_gem_object_put_unlocked(gobj
);
280 up_read(&rdev
->exclusive_lock
);
281 r
= radeon_gem_handle_lockup(rdev
, r
);
284 args
->handle
= handle
;
285 up_read(&rdev
->exclusive_lock
);
289 int radeon_gem_userptr_ioctl(struct drm_device
*dev
, void *data
,
290 struct drm_file
*filp
)
292 struct ttm_operation_ctx ctx
= { true, false };
293 struct radeon_device
*rdev
= dev
->dev_private
;
294 struct drm_radeon_gem_userptr
*args
= data
;
295 struct drm_gem_object
*gobj
;
296 struct radeon_bo
*bo
;
300 args
->addr
= untagged_addr(args
->addr
);
302 if (offset_in_page(args
->addr
| args
->size
))
305 /* reject unknown flag values */
306 if (args
->flags
& ~(RADEON_GEM_USERPTR_READONLY
|
307 RADEON_GEM_USERPTR_ANONONLY
| RADEON_GEM_USERPTR_VALIDATE
|
308 RADEON_GEM_USERPTR_REGISTER
))
311 if (args
->flags
& RADEON_GEM_USERPTR_READONLY
) {
312 /* readonly pages not tested on older hardware */
313 if (rdev
->family
< CHIP_R600
)
316 } else if (!(args
->flags
& RADEON_GEM_USERPTR_ANONONLY
) ||
317 !(args
->flags
& RADEON_GEM_USERPTR_REGISTER
)) {
319 /* if we want to write to it we must require anonymous
320 memory and install a MMU notifier */
324 down_read(&rdev
->exclusive_lock
);
326 /* create a gem object to contain this object in */
327 r
= radeon_gem_object_create(rdev
, args
->size
, 0,
328 RADEON_GEM_DOMAIN_CPU
, 0,
333 bo
= gem_to_radeon_bo(gobj
);
334 r
= radeon_ttm_tt_set_userptr(bo
->tbo
.ttm
, args
->addr
, args
->flags
);
338 if (args
->flags
& RADEON_GEM_USERPTR_REGISTER
) {
339 r
= radeon_mn_register(bo
, args
->addr
);
344 if (args
->flags
& RADEON_GEM_USERPTR_VALIDATE
) {
345 down_read(¤t
->mm
->mmap_sem
);
346 r
= radeon_bo_reserve(bo
, true);
348 up_read(¤t
->mm
->mmap_sem
);
352 radeon_ttm_placement_from_domain(bo
, RADEON_GEM_DOMAIN_GTT
);
353 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, &ctx
);
354 radeon_bo_unreserve(bo
);
355 up_read(¤t
->mm
->mmap_sem
);
360 r
= drm_gem_handle_create(filp
, gobj
, &handle
);
361 /* drop reference from allocate - handle holds it now */
362 drm_gem_object_put_unlocked(gobj
);
366 args
->handle
= handle
;
367 up_read(&rdev
->exclusive_lock
);
371 drm_gem_object_put_unlocked(gobj
);
374 up_read(&rdev
->exclusive_lock
);
375 r
= radeon_gem_handle_lockup(rdev
, r
);
380 int radeon_gem_set_domain_ioctl(struct drm_device
*dev
, void *data
,
381 struct drm_file
*filp
)
383 /* transition the BO to a domain -
384 * just validate the BO into a certain domain */
385 struct radeon_device
*rdev
= dev
->dev_private
;
386 struct drm_radeon_gem_set_domain
*args
= data
;
387 struct drm_gem_object
*gobj
;
388 struct radeon_bo
*robj
;
391 /* for now if someone requests domain CPU -
392 * just make sure the buffer is finished with */
393 down_read(&rdev
->exclusive_lock
);
395 /* just do a BO wait for now */
396 gobj
= drm_gem_object_lookup(filp
, args
->handle
);
398 up_read(&rdev
->exclusive_lock
);
401 robj
= gem_to_radeon_bo(gobj
);
403 r
= radeon_gem_set_domain(gobj
, args
->read_domains
, args
->write_domain
);
405 drm_gem_object_put_unlocked(gobj
);
406 up_read(&rdev
->exclusive_lock
);
407 r
= radeon_gem_handle_lockup(robj
->rdev
, r
);
411 int radeon_mode_dumb_mmap(struct drm_file
*filp
,
412 struct drm_device
*dev
,
413 uint32_t handle
, uint64_t *offset_p
)
415 struct drm_gem_object
*gobj
;
416 struct radeon_bo
*robj
;
418 gobj
= drm_gem_object_lookup(filp
, handle
);
422 robj
= gem_to_radeon_bo(gobj
);
423 if (radeon_ttm_tt_has_userptr(robj
->tbo
.ttm
)) {
424 drm_gem_object_put_unlocked(gobj
);
427 *offset_p
= radeon_bo_mmap_offset(robj
);
428 drm_gem_object_put_unlocked(gobj
);
432 int radeon_gem_mmap_ioctl(struct drm_device
*dev
, void *data
,
433 struct drm_file
*filp
)
435 struct drm_radeon_gem_mmap
*args
= data
;
437 return radeon_mode_dumb_mmap(filp
, dev
, args
->handle
, &args
->addr_ptr
);
440 int radeon_gem_busy_ioctl(struct drm_device
*dev
, void *data
,
441 struct drm_file
*filp
)
443 struct drm_radeon_gem_busy
*args
= data
;
444 struct drm_gem_object
*gobj
;
445 struct radeon_bo
*robj
;
447 uint32_t cur_placement
= 0;
449 gobj
= drm_gem_object_lookup(filp
, args
->handle
);
453 robj
= gem_to_radeon_bo(gobj
);
455 r
= dma_resv_test_signaled_rcu(robj
->tbo
.base
.resv
, true);
461 cur_placement
= READ_ONCE(robj
->tbo
.mem
.mem_type
);
462 args
->domain
= radeon_mem_type_to_domain(cur_placement
);
463 drm_gem_object_put_unlocked(gobj
);
467 int radeon_gem_wait_idle_ioctl(struct drm_device
*dev
, void *data
,
468 struct drm_file
*filp
)
470 struct radeon_device
*rdev
= dev
->dev_private
;
471 struct drm_radeon_gem_wait_idle
*args
= data
;
472 struct drm_gem_object
*gobj
;
473 struct radeon_bo
*robj
;
475 uint32_t cur_placement
= 0;
478 gobj
= drm_gem_object_lookup(filp
, args
->handle
);
482 robj
= gem_to_radeon_bo(gobj
);
484 ret
= dma_resv_wait_timeout_rcu(robj
->tbo
.base
.resv
, true, true, 30 * HZ
);
490 /* Flush HDP cache via MMIO if necessary */
491 cur_placement
= READ_ONCE(robj
->tbo
.mem
.mem_type
);
492 if (rdev
->asic
->mmio_hdp_flush
&&
493 radeon_mem_type_to_domain(cur_placement
) == RADEON_GEM_DOMAIN_VRAM
)
494 robj
->rdev
->asic
->mmio_hdp_flush(rdev
);
495 drm_gem_object_put_unlocked(gobj
);
496 r
= radeon_gem_handle_lockup(rdev
, r
);
500 int radeon_gem_set_tiling_ioctl(struct drm_device
*dev
, void *data
,
501 struct drm_file
*filp
)
503 struct drm_radeon_gem_set_tiling
*args
= data
;
504 struct drm_gem_object
*gobj
;
505 struct radeon_bo
*robj
;
508 DRM_DEBUG("%d \n", args
->handle
);
509 gobj
= drm_gem_object_lookup(filp
, args
->handle
);
512 robj
= gem_to_radeon_bo(gobj
);
513 r
= radeon_bo_set_tiling_flags(robj
, args
->tiling_flags
, args
->pitch
);
514 drm_gem_object_put_unlocked(gobj
);
518 int radeon_gem_get_tiling_ioctl(struct drm_device
*dev
, void *data
,
519 struct drm_file
*filp
)
521 struct drm_radeon_gem_get_tiling
*args
= data
;
522 struct drm_gem_object
*gobj
;
523 struct radeon_bo
*rbo
;
527 gobj
= drm_gem_object_lookup(filp
, args
->handle
);
530 rbo
= gem_to_radeon_bo(gobj
);
531 r
= radeon_bo_reserve(rbo
, false);
532 if (unlikely(r
!= 0))
534 radeon_bo_get_tiling_flags(rbo
, &args
->tiling_flags
, &args
->pitch
);
535 radeon_bo_unreserve(rbo
);
537 drm_gem_object_put_unlocked(gobj
);
542 * radeon_gem_va_update_vm -update the bo_va in its VM
544 * @rdev: radeon_device pointer
545 * @bo_va: bo_va to update
547 * Update the bo_va directly after setting it's address. Errors are not
548 * vital here, so they are not reported back to userspace.
550 static void radeon_gem_va_update_vm(struct radeon_device
*rdev
,
551 struct radeon_bo_va
*bo_va
)
553 struct ttm_validate_buffer tv
, *entry
;
554 struct radeon_bo_list
*vm_bos
;
555 struct ww_acquire_ctx ticket
;
556 struct list_head list
;
560 INIT_LIST_HEAD(&list
);
562 tv
.bo
= &bo_va
->bo
->tbo
;
564 list_add(&tv
.head
, &list
);
566 vm_bos
= radeon_vm_get_bos(rdev
, bo_va
->vm
, &list
);
570 r
= ttm_eu_reserve_buffers(&ticket
, &list
, true, NULL
);
574 list_for_each_entry(entry
, &list
, head
) {
575 domain
= radeon_mem_type_to_domain(entry
->bo
->mem
.mem_type
);
576 /* if anything is swapped out don't swap it in here,
577 just abort and wait for the next CS */
578 if (domain
== RADEON_GEM_DOMAIN_CPU
)
579 goto error_unreserve
;
582 mutex_lock(&bo_va
->vm
->mutex
);
583 r
= radeon_vm_clear_freed(rdev
, bo_va
->vm
);
588 r
= radeon_vm_bo_update(rdev
, bo_va
, &bo_va
->bo
->tbo
.mem
);
591 mutex_unlock(&bo_va
->vm
->mutex
);
594 ttm_eu_backoff_reservation(&ticket
, &list
);
599 if (r
&& r
!= -ERESTARTSYS
)
600 DRM_ERROR("Couldn't update BO_VA (%d)\n", r
);
603 int radeon_gem_va_ioctl(struct drm_device
*dev
, void *data
,
604 struct drm_file
*filp
)
606 struct drm_radeon_gem_va
*args
= data
;
607 struct drm_gem_object
*gobj
;
608 struct radeon_device
*rdev
= dev
->dev_private
;
609 struct radeon_fpriv
*fpriv
= filp
->driver_priv
;
610 struct radeon_bo
*rbo
;
611 struct radeon_bo_va
*bo_va
;
615 if (!rdev
->vm_manager
.enabled
) {
616 args
->operation
= RADEON_VA_RESULT_ERROR
;
621 * We don't support vm_id yet, to be sure we don't have have broken
622 * userspace, reject anyone trying to use non 0 value thus moving
623 * forward we can use those fields without breaking existant userspace
626 args
->operation
= RADEON_VA_RESULT_ERROR
;
630 if (args
->offset
< RADEON_VA_RESERVED_SIZE
) {
631 dev_err(&dev
->pdev
->dev
,
632 "offset 0x%lX is in reserved area 0x%X\n",
633 (unsigned long)args
->offset
,
634 RADEON_VA_RESERVED_SIZE
);
635 args
->operation
= RADEON_VA_RESULT_ERROR
;
639 /* don't remove, we need to enforce userspace to set the snooped flag
640 * otherwise we will endup with broken userspace and we won't be able
641 * to enable this feature without adding new interface
643 invalid_flags
= RADEON_VM_PAGE_VALID
| RADEON_VM_PAGE_SYSTEM
;
644 if ((args
->flags
& invalid_flags
)) {
645 dev_err(&dev
->pdev
->dev
, "invalid flags 0x%08X vs 0x%08X\n",
646 args
->flags
, invalid_flags
);
647 args
->operation
= RADEON_VA_RESULT_ERROR
;
651 switch (args
->operation
) {
653 case RADEON_VA_UNMAP
:
656 dev_err(&dev
->pdev
->dev
, "unsupported operation %d\n",
658 args
->operation
= RADEON_VA_RESULT_ERROR
;
662 gobj
= drm_gem_object_lookup(filp
, args
->handle
);
664 args
->operation
= RADEON_VA_RESULT_ERROR
;
667 rbo
= gem_to_radeon_bo(gobj
);
668 r
= radeon_bo_reserve(rbo
, false);
670 args
->operation
= RADEON_VA_RESULT_ERROR
;
671 drm_gem_object_put_unlocked(gobj
);
674 bo_va
= radeon_vm_bo_find(&fpriv
->vm
, rbo
);
676 args
->operation
= RADEON_VA_RESULT_ERROR
;
677 radeon_bo_unreserve(rbo
);
678 drm_gem_object_put_unlocked(gobj
);
682 switch (args
->operation
) {
684 if (bo_va
->it
.start
) {
685 args
->operation
= RADEON_VA_RESULT_VA_EXIST
;
686 args
->offset
= bo_va
->it
.start
* RADEON_GPU_PAGE_SIZE
;
687 radeon_bo_unreserve(rbo
);
690 r
= radeon_vm_bo_set_addr(rdev
, bo_va
, args
->offset
, args
->flags
);
692 case RADEON_VA_UNMAP
:
693 r
= radeon_vm_bo_set_addr(rdev
, bo_va
, 0, 0);
699 radeon_gem_va_update_vm(rdev
, bo_va
);
700 args
->operation
= RADEON_VA_RESULT_OK
;
702 args
->operation
= RADEON_VA_RESULT_ERROR
;
705 drm_gem_object_put_unlocked(gobj
);
709 int radeon_gem_op_ioctl(struct drm_device
*dev
, void *data
,
710 struct drm_file
*filp
)
712 struct drm_radeon_gem_op
*args
= data
;
713 struct drm_gem_object
*gobj
;
714 struct radeon_bo
*robj
;
717 gobj
= drm_gem_object_lookup(filp
, args
->handle
);
721 robj
= gem_to_radeon_bo(gobj
);
724 if (radeon_ttm_tt_has_userptr(robj
->tbo
.ttm
))
727 r
= radeon_bo_reserve(robj
, false);
732 case RADEON_GEM_OP_GET_INITIAL_DOMAIN
:
733 args
->value
= robj
->initial_domain
;
735 case RADEON_GEM_OP_SET_INITIAL_DOMAIN
:
736 robj
->initial_domain
= args
->value
& (RADEON_GEM_DOMAIN_VRAM
|
737 RADEON_GEM_DOMAIN_GTT
|
738 RADEON_GEM_DOMAIN_CPU
);
744 radeon_bo_unreserve(robj
);
746 drm_gem_object_put_unlocked(gobj
);
750 int radeon_mode_dumb_create(struct drm_file
*file_priv
,
751 struct drm_device
*dev
,
752 struct drm_mode_create_dumb
*args
)
754 struct radeon_device
*rdev
= dev
->dev_private
;
755 struct drm_gem_object
*gobj
;
759 args
->pitch
= radeon_align_pitch(rdev
, args
->width
,
760 DIV_ROUND_UP(args
->bpp
, 8), 0);
761 args
->size
= args
->pitch
* args
->height
;
762 args
->size
= ALIGN(args
->size
, PAGE_SIZE
);
764 r
= radeon_gem_object_create(rdev
, args
->size
, 0,
765 RADEON_GEM_DOMAIN_VRAM
, 0,
770 r
= drm_gem_handle_create(file_priv
, gobj
, &handle
);
771 /* drop reference from allocate - handle holds it now */
772 drm_gem_object_put_unlocked(gobj
);
776 args
->handle
= handle
;
780 #if defined(CONFIG_DEBUG_FS)
781 static int radeon_debugfs_gem_info(struct seq_file
*m
, void *data
)
783 struct drm_info_node
*node
= (struct drm_info_node
*)m
->private;
784 struct drm_device
*dev
= node
->minor
->dev
;
785 struct radeon_device
*rdev
= dev
->dev_private
;
786 struct radeon_bo
*rbo
;
789 mutex_lock(&rdev
->gem
.mutex
);
790 list_for_each_entry(rbo
, &rdev
->gem
.objects
, list
) {
792 const char *placement
;
794 domain
= radeon_mem_type_to_domain(rbo
->tbo
.mem
.mem_type
);
796 case RADEON_GEM_DOMAIN_VRAM
:
799 case RADEON_GEM_DOMAIN_GTT
:
802 case RADEON_GEM_DOMAIN_CPU
:
807 seq_printf(m
, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
808 i
, radeon_bo_size(rbo
) >> 10, radeon_bo_size(rbo
) >> 20,
809 placement
, (unsigned long)rbo
->pid
);
812 mutex_unlock(&rdev
->gem
.mutex
);
816 static struct drm_info_list radeon_debugfs_gem_list
[] = {
817 {"radeon_gem_info", &radeon_debugfs_gem_info
, 0, NULL
},
821 int radeon_gem_debugfs_init(struct radeon_device
*rdev
)
823 #if defined(CONFIG_DEBUG_FS)
824 return radeon_debugfs_add_files(rdev
, radeon_debugfs_gem_list
, 1);