2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
29 #include <linux/pci.h>
31 #include <drm/drm_debugfs.h>
32 #include <drm/drm_device.h>
33 #include <drm/drm_file.h>
34 #include <drm/drm_gem_ttm_helper.h>
35 #include <drm/radeon_drm.h>
38 #include "radeon_prime.h"
40 struct dma_buf
*radeon_gem_prime_export(struct drm_gem_object
*gobj
,
42 struct sg_table
*radeon_gem_prime_get_sg_table(struct drm_gem_object
*obj
);
43 int radeon_gem_prime_pin(struct drm_gem_object
*obj
);
44 void radeon_gem_prime_unpin(struct drm_gem_object
*obj
);
46 static const struct drm_gem_object_funcs radeon_gem_object_funcs
;
48 static void radeon_gem_object_free(struct drm_gem_object
*gobj
)
50 struct radeon_bo
*robj
= gem_to_radeon_bo(gobj
);
53 radeon_mn_unregister(robj
);
54 radeon_bo_unref(&robj
);
58 int radeon_gem_object_create(struct radeon_device
*rdev
, unsigned long size
,
59 int alignment
, int initial_domain
,
60 u32 flags
, bool kernel
,
61 struct drm_gem_object
**obj
)
63 struct radeon_bo
*robj
;
64 unsigned long max_size
;
68 /* At least align on page size */
69 if (alignment
< PAGE_SIZE
) {
70 alignment
= PAGE_SIZE
;
73 /* Maximum bo size is the unpinned gtt size since we use the gtt to
74 * handle vram to system pool migrations.
76 max_size
= rdev
->mc
.gtt_size
- rdev
->gart_pin_size
;
77 if (size
> max_size
) {
78 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
79 size
>> 20, max_size
>> 20);
84 r
= radeon_bo_create(rdev
, size
, alignment
, kernel
, initial_domain
,
85 flags
, NULL
, NULL
, &robj
);
87 if (r
!= -ERESTARTSYS
) {
88 if (initial_domain
== RADEON_GEM_DOMAIN_VRAM
) {
89 initial_domain
|= RADEON_GEM_DOMAIN_GTT
;
92 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
93 size
, initial_domain
, alignment
, r
);
97 *obj
= &robj
->tbo
.base
;
98 (*obj
)->funcs
= &radeon_gem_object_funcs
;
99 robj
->pid
= task_pid_nr(current
);
101 mutex_lock(&rdev
->gem
.mutex
);
102 list_add_tail(&robj
->list
, &rdev
->gem
.objects
);
103 mutex_unlock(&rdev
->gem
.mutex
);
108 static int radeon_gem_set_domain(struct drm_gem_object
*gobj
,
109 uint32_t rdomain
, uint32_t wdomain
)
111 struct radeon_bo
*robj
;
115 /* FIXME: reeimplement */
116 robj
= gem_to_radeon_bo(gobj
);
117 /* work out where to validate the buffer to */
124 pr_warn("Set domain without domain !\n");
127 if (domain
== RADEON_GEM_DOMAIN_CPU
) {
128 /* Asking for cpu access wait for object idle */
129 r
= dma_resv_wait_timeout_rcu(robj
->tbo
.base
.resv
, true, true, 30 * HZ
);
133 if (r
< 0 && r
!= -EINTR
) {
134 pr_err("Failed to wait for object: %li\n", r
);
138 if (domain
== RADEON_GEM_DOMAIN_VRAM
&& robj
->prime_shared_count
) {
139 /* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */
145 int radeon_gem_init(struct radeon_device
*rdev
)
147 INIT_LIST_HEAD(&rdev
->gem
.objects
);
151 void radeon_gem_fini(struct radeon_device
*rdev
)
153 radeon_bo_force_delete(rdev
);
157 * Call from drm_gem_handle_create which appear in both new and open ioctl
160 static int radeon_gem_object_open(struct drm_gem_object
*obj
, struct drm_file
*file_priv
)
162 struct radeon_bo
*rbo
= gem_to_radeon_bo(obj
);
163 struct radeon_device
*rdev
= rbo
->rdev
;
164 struct radeon_fpriv
*fpriv
= file_priv
->driver_priv
;
165 struct radeon_vm
*vm
= &fpriv
->vm
;
166 struct radeon_bo_va
*bo_va
;
169 if ((rdev
->family
< CHIP_CAYMAN
) ||
170 (!rdev
->accel_working
)) {
174 r
= radeon_bo_reserve(rbo
, false);
179 bo_va
= radeon_vm_bo_find(vm
, rbo
);
181 bo_va
= radeon_vm_bo_add(rdev
, vm
, rbo
);
185 radeon_bo_unreserve(rbo
);
190 static void radeon_gem_object_close(struct drm_gem_object
*obj
,
191 struct drm_file
*file_priv
)
193 struct radeon_bo
*rbo
= gem_to_radeon_bo(obj
);
194 struct radeon_device
*rdev
= rbo
->rdev
;
195 struct radeon_fpriv
*fpriv
= file_priv
->driver_priv
;
196 struct radeon_vm
*vm
= &fpriv
->vm
;
197 struct radeon_bo_va
*bo_va
;
200 if ((rdev
->family
< CHIP_CAYMAN
) ||
201 (!rdev
->accel_working
)) {
205 r
= radeon_bo_reserve(rbo
, true);
207 dev_err(rdev
->dev
, "leaking bo va because "
208 "we fail to reserve bo (%d)\n", r
);
211 bo_va
= radeon_vm_bo_find(vm
, rbo
);
213 if (--bo_va
->ref_count
== 0) {
214 radeon_vm_bo_rmv(rdev
, bo_va
);
217 radeon_bo_unreserve(rbo
);
220 static int radeon_gem_handle_lockup(struct radeon_device
*rdev
, int r
)
223 r
= radeon_gpu_reset(rdev
);
230 static const struct drm_gem_object_funcs radeon_gem_object_funcs
= {
231 .free
= radeon_gem_object_free
,
232 .open
= radeon_gem_object_open
,
233 .close
= radeon_gem_object_close
,
234 .export
= radeon_gem_prime_export
,
235 .pin
= radeon_gem_prime_pin
,
236 .unpin
= radeon_gem_prime_unpin
,
237 .get_sg_table
= radeon_gem_prime_get_sg_table
,
238 .vmap
= drm_gem_ttm_vmap
,
239 .vunmap
= drm_gem_ttm_vunmap
,
245 int radeon_gem_info_ioctl(struct drm_device
*dev
, void *data
,
246 struct drm_file
*filp
)
248 struct radeon_device
*rdev
= dev
->dev_private
;
249 struct drm_radeon_gem_info
*args
= data
;
250 struct ttm_resource_manager
*man
;
252 man
= ttm_manager_type(&rdev
->mman
.bdev
, TTM_PL_VRAM
);
254 args
->vram_size
= (u64
)man
->size
<< PAGE_SHIFT
;
255 args
->vram_visible
= rdev
->mc
.visible_vram_size
;
256 args
->vram_visible
-= rdev
->vram_pin_size
;
257 args
->gart_size
= rdev
->mc
.gtt_size
;
258 args
->gart_size
-= rdev
->gart_pin_size
;
263 int radeon_gem_pread_ioctl(struct drm_device
*dev
, void *data
,
264 struct drm_file
*filp
)
266 /* TODO: implement */
267 DRM_ERROR("unimplemented %s\n", __func__
);
271 int radeon_gem_pwrite_ioctl(struct drm_device
*dev
, void *data
,
272 struct drm_file
*filp
)
274 /* TODO: implement */
275 DRM_ERROR("unimplemented %s\n", __func__
);
279 int radeon_gem_create_ioctl(struct drm_device
*dev
, void *data
,
280 struct drm_file
*filp
)
282 struct radeon_device
*rdev
= dev
->dev_private
;
283 struct drm_radeon_gem_create
*args
= data
;
284 struct drm_gem_object
*gobj
;
288 down_read(&rdev
->exclusive_lock
);
289 /* create a gem object to contain this object in */
290 args
->size
= roundup(args
->size
, PAGE_SIZE
);
291 r
= radeon_gem_object_create(rdev
, args
->size
, args
->alignment
,
292 args
->initial_domain
, args
->flags
,
295 up_read(&rdev
->exclusive_lock
);
296 r
= radeon_gem_handle_lockup(rdev
, r
);
299 r
= drm_gem_handle_create(filp
, gobj
, &handle
);
300 /* drop reference from allocate - handle holds it now */
301 drm_gem_object_put(gobj
);
303 up_read(&rdev
->exclusive_lock
);
304 r
= radeon_gem_handle_lockup(rdev
, r
);
307 args
->handle
= handle
;
308 up_read(&rdev
->exclusive_lock
);
312 int radeon_gem_userptr_ioctl(struct drm_device
*dev
, void *data
,
313 struct drm_file
*filp
)
315 struct ttm_operation_ctx ctx
= { true, false };
316 struct radeon_device
*rdev
= dev
->dev_private
;
317 struct drm_radeon_gem_userptr
*args
= data
;
318 struct drm_gem_object
*gobj
;
319 struct radeon_bo
*bo
;
323 args
->addr
= untagged_addr(args
->addr
);
325 if (offset_in_page(args
->addr
| args
->size
))
328 /* reject unknown flag values */
329 if (args
->flags
& ~(RADEON_GEM_USERPTR_READONLY
|
330 RADEON_GEM_USERPTR_ANONONLY
| RADEON_GEM_USERPTR_VALIDATE
|
331 RADEON_GEM_USERPTR_REGISTER
))
334 if (args
->flags
& RADEON_GEM_USERPTR_READONLY
) {
335 /* readonly pages not tested on older hardware */
336 if (rdev
->family
< CHIP_R600
)
339 } else if (!(args
->flags
& RADEON_GEM_USERPTR_ANONONLY
) ||
340 !(args
->flags
& RADEON_GEM_USERPTR_REGISTER
)) {
342 /* if we want to write to it we must require anonymous
343 memory and install a MMU notifier */
347 down_read(&rdev
->exclusive_lock
);
349 /* create a gem object to contain this object in */
350 r
= radeon_gem_object_create(rdev
, args
->size
, 0,
351 RADEON_GEM_DOMAIN_CPU
, 0,
356 bo
= gem_to_radeon_bo(gobj
);
357 r
= radeon_ttm_tt_set_userptr(rdev
, bo
->tbo
.ttm
, args
->addr
, args
->flags
);
361 if (args
->flags
& RADEON_GEM_USERPTR_REGISTER
) {
362 r
= radeon_mn_register(bo
, args
->addr
);
367 if (args
->flags
& RADEON_GEM_USERPTR_VALIDATE
) {
368 mmap_read_lock(current
->mm
);
369 r
= radeon_bo_reserve(bo
, true);
371 mmap_read_unlock(current
->mm
);
375 radeon_ttm_placement_from_domain(bo
, RADEON_GEM_DOMAIN_GTT
);
376 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, &ctx
);
377 radeon_bo_unreserve(bo
);
378 mmap_read_unlock(current
->mm
);
383 r
= drm_gem_handle_create(filp
, gobj
, &handle
);
384 /* drop reference from allocate - handle holds it now */
385 drm_gem_object_put(gobj
);
389 args
->handle
= handle
;
390 up_read(&rdev
->exclusive_lock
);
394 drm_gem_object_put(gobj
);
397 up_read(&rdev
->exclusive_lock
);
398 r
= radeon_gem_handle_lockup(rdev
, r
);
403 int radeon_gem_set_domain_ioctl(struct drm_device
*dev
, void *data
,
404 struct drm_file
*filp
)
406 /* transition the BO to a domain -
407 * just validate the BO into a certain domain */
408 struct radeon_device
*rdev
= dev
->dev_private
;
409 struct drm_radeon_gem_set_domain
*args
= data
;
410 struct drm_gem_object
*gobj
;
411 struct radeon_bo
*robj
;
414 /* for now if someone requests domain CPU -
415 * just make sure the buffer is finished with */
416 down_read(&rdev
->exclusive_lock
);
418 /* just do a BO wait for now */
419 gobj
= drm_gem_object_lookup(filp
, args
->handle
);
421 up_read(&rdev
->exclusive_lock
);
424 robj
= gem_to_radeon_bo(gobj
);
426 r
= radeon_gem_set_domain(gobj
, args
->read_domains
, args
->write_domain
);
428 drm_gem_object_put(gobj
);
429 up_read(&rdev
->exclusive_lock
);
430 r
= radeon_gem_handle_lockup(robj
->rdev
, r
);
434 int radeon_mode_dumb_mmap(struct drm_file
*filp
,
435 struct drm_device
*dev
,
436 uint32_t handle
, uint64_t *offset_p
)
438 struct drm_gem_object
*gobj
;
439 struct radeon_bo
*robj
;
441 gobj
= drm_gem_object_lookup(filp
, handle
);
445 robj
= gem_to_radeon_bo(gobj
);
446 if (radeon_ttm_tt_has_userptr(robj
->rdev
, robj
->tbo
.ttm
)) {
447 drm_gem_object_put(gobj
);
450 *offset_p
= radeon_bo_mmap_offset(robj
);
451 drm_gem_object_put(gobj
);
455 int radeon_gem_mmap_ioctl(struct drm_device
*dev
, void *data
,
456 struct drm_file
*filp
)
458 struct drm_radeon_gem_mmap
*args
= data
;
460 return radeon_mode_dumb_mmap(filp
, dev
, args
->handle
, &args
->addr_ptr
);
463 int radeon_gem_busy_ioctl(struct drm_device
*dev
, void *data
,
464 struct drm_file
*filp
)
466 struct drm_radeon_gem_busy
*args
= data
;
467 struct drm_gem_object
*gobj
;
468 struct radeon_bo
*robj
;
470 uint32_t cur_placement
= 0;
472 gobj
= drm_gem_object_lookup(filp
, args
->handle
);
476 robj
= gem_to_radeon_bo(gobj
);
478 r
= dma_resv_test_signaled_rcu(robj
->tbo
.base
.resv
, true);
484 cur_placement
= READ_ONCE(robj
->tbo
.mem
.mem_type
);
485 args
->domain
= radeon_mem_type_to_domain(cur_placement
);
486 drm_gem_object_put(gobj
);
490 int radeon_gem_wait_idle_ioctl(struct drm_device
*dev
, void *data
,
491 struct drm_file
*filp
)
493 struct radeon_device
*rdev
= dev
->dev_private
;
494 struct drm_radeon_gem_wait_idle
*args
= data
;
495 struct drm_gem_object
*gobj
;
496 struct radeon_bo
*robj
;
498 uint32_t cur_placement
= 0;
501 gobj
= drm_gem_object_lookup(filp
, args
->handle
);
505 robj
= gem_to_radeon_bo(gobj
);
507 ret
= dma_resv_wait_timeout_rcu(robj
->tbo
.base
.resv
, true, true, 30 * HZ
);
513 /* Flush HDP cache via MMIO if necessary */
514 cur_placement
= READ_ONCE(robj
->tbo
.mem
.mem_type
);
515 if (rdev
->asic
->mmio_hdp_flush
&&
516 radeon_mem_type_to_domain(cur_placement
) == RADEON_GEM_DOMAIN_VRAM
)
517 robj
->rdev
->asic
->mmio_hdp_flush(rdev
);
518 drm_gem_object_put(gobj
);
519 r
= radeon_gem_handle_lockup(rdev
, r
);
523 int radeon_gem_set_tiling_ioctl(struct drm_device
*dev
, void *data
,
524 struct drm_file
*filp
)
526 struct drm_radeon_gem_set_tiling
*args
= data
;
527 struct drm_gem_object
*gobj
;
528 struct radeon_bo
*robj
;
531 DRM_DEBUG("%d \n", args
->handle
);
532 gobj
= drm_gem_object_lookup(filp
, args
->handle
);
535 robj
= gem_to_radeon_bo(gobj
);
536 r
= radeon_bo_set_tiling_flags(robj
, args
->tiling_flags
, args
->pitch
);
537 drm_gem_object_put(gobj
);
541 int radeon_gem_get_tiling_ioctl(struct drm_device
*dev
, void *data
,
542 struct drm_file
*filp
)
544 struct drm_radeon_gem_get_tiling
*args
= data
;
545 struct drm_gem_object
*gobj
;
546 struct radeon_bo
*rbo
;
550 gobj
= drm_gem_object_lookup(filp
, args
->handle
);
553 rbo
= gem_to_radeon_bo(gobj
);
554 r
= radeon_bo_reserve(rbo
, false);
555 if (unlikely(r
!= 0))
557 radeon_bo_get_tiling_flags(rbo
, &args
->tiling_flags
, &args
->pitch
);
558 radeon_bo_unreserve(rbo
);
560 drm_gem_object_put(gobj
);
565 * radeon_gem_va_update_vm -update the bo_va in its VM
567 * @rdev: radeon_device pointer
568 * @bo_va: bo_va to update
570 * Update the bo_va directly after setting it's address. Errors are not
571 * vital here, so they are not reported back to userspace.
573 static void radeon_gem_va_update_vm(struct radeon_device
*rdev
,
574 struct radeon_bo_va
*bo_va
)
576 struct ttm_validate_buffer tv
, *entry
;
577 struct radeon_bo_list
*vm_bos
;
578 struct ww_acquire_ctx ticket
;
579 struct list_head list
;
583 INIT_LIST_HEAD(&list
);
585 tv
.bo
= &bo_va
->bo
->tbo
;
587 list_add(&tv
.head
, &list
);
589 vm_bos
= radeon_vm_get_bos(rdev
, bo_va
->vm
, &list
);
593 r
= ttm_eu_reserve_buffers(&ticket
, &list
, true, NULL
);
597 list_for_each_entry(entry
, &list
, head
) {
598 domain
= radeon_mem_type_to_domain(entry
->bo
->mem
.mem_type
);
599 /* if anything is swapped out don't swap it in here,
600 just abort and wait for the next CS */
601 if (domain
== RADEON_GEM_DOMAIN_CPU
)
602 goto error_unreserve
;
605 mutex_lock(&bo_va
->vm
->mutex
);
606 r
= radeon_vm_clear_freed(rdev
, bo_va
->vm
);
611 r
= radeon_vm_bo_update(rdev
, bo_va
, &bo_va
->bo
->tbo
.mem
);
614 mutex_unlock(&bo_va
->vm
->mutex
);
617 ttm_eu_backoff_reservation(&ticket
, &list
);
622 if (r
&& r
!= -ERESTARTSYS
)
623 DRM_ERROR("Couldn't update BO_VA (%d)\n", r
);
626 int radeon_gem_va_ioctl(struct drm_device
*dev
, void *data
,
627 struct drm_file
*filp
)
629 struct drm_radeon_gem_va
*args
= data
;
630 struct drm_gem_object
*gobj
;
631 struct radeon_device
*rdev
= dev
->dev_private
;
632 struct radeon_fpriv
*fpriv
= filp
->driver_priv
;
633 struct radeon_bo
*rbo
;
634 struct radeon_bo_va
*bo_va
;
638 if (!rdev
->vm_manager
.enabled
) {
639 args
->operation
= RADEON_VA_RESULT_ERROR
;
644 * We don't support vm_id yet, to be sure we don't have have broken
645 * userspace, reject anyone trying to use non 0 value thus moving
646 * forward we can use those fields without breaking existant userspace
649 args
->operation
= RADEON_VA_RESULT_ERROR
;
653 if (args
->offset
< RADEON_VA_RESERVED_SIZE
) {
654 dev_err(&dev
->pdev
->dev
,
655 "offset 0x%lX is in reserved area 0x%X\n",
656 (unsigned long)args
->offset
,
657 RADEON_VA_RESERVED_SIZE
);
658 args
->operation
= RADEON_VA_RESULT_ERROR
;
662 /* don't remove, we need to enforce userspace to set the snooped flag
663 * otherwise we will endup with broken userspace and we won't be able
664 * to enable this feature without adding new interface
666 invalid_flags
= RADEON_VM_PAGE_VALID
| RADEON_VM_PAGE_SYSTEM
;
667 if ((args
->flags
& invalid_flags
)) {
668 dev_err(&dev
->pdev
->dev
, "invalid flags 0x%08X vs 0x%08X\n",
669 args
->flags
, invalid_flags
);
670 args
->operation
= RADEON_VA_RESULT_ERROR
;
674 switch (args
->operation
) {
676 case RADEON_VA_UNMAP
:
679 dev_err(&dev
->pdev
->dev
, "unsupported operation %d\n",
681 args
->operation
= RADEON_VA_RESULT_ERROR
;
685 gobj
= drm_gem_object_lookup(filp
, args
->handle
);
687 args
->operation
= RADEON_VA_RESULT_ERROR
;
690 rbo
= gem_to_radeon_bo(gobj
);
691 r
= radeon_bo_reserve(rbo
, false);
693 args
->operation
= RADEON_VA_RESULT_ERROR
;
694 drm_gem_object_put(gobj
);
697 bo_va
= radeon_vm_bo_find(&fpriv
->vm
, rbo
);
699 args
->operation
= RADEON_VA_RESULT_ERROR
;
700 radeon_bo_unreserve(rbo
);
701 drm_gem_object_put(gobj
);
705 switch (args
->operation
) {
707 if (bo_va
->it
.start
) {
708 args
->operation
= RADEON_VA_RESULT_VA_EXIST
;
709 args
->offset
= bo_va
->it
.start
* RADEON_GPU_PAGE_SIZE
;
710 radeon_bo_unreserve(rbo
);
713 r
= radeon_vm_bo_set_addr(rdev
, bo_va
, args
->offset
, args
->flags
);
715 case RADEON_VA_UNMAP
:
716 r
= radeon_vm_bo_set_addr(rdev
, bo_va
, 0, 0);
722 radeon_gem_va_update_vm(rdev
, bo_va
);
723 args
->operation
= RADEON_VA_RESULT_OK
;
725 args
->operation
= RADEON_VA_RESULT_ERROR
;
728 drm_gem_object_put(gobj
);
732 int radeon_gem_op_ioctl(struct drm_device
*dev
, void *data
,
733 struct drm_file
*filp
)
735 struct drm_radeon_gem_op
*args
= data
;
736 struct drm_gem_object
*gobj
;
737 struct radeon_bo
*robj
;
740 gobj
= drm_gem_object_lookup(filp
, args
->handle
);
744 robj
= gem_to_radeon_bo(gobj
);
747 if (radeon_ttm_tt_has_userptr(robj
->rdev
, robj
->tbo
.ttm
))
750 r
= radeon_bo_reserve(robj
, false);
755 case RADEON_GEM_OP_GET_INITIAL_DOMAIN
:
756 args
->value
= robj
->initial_domain
;
758 case RADEON_GEM_OP_SET_INITIAL_DOMAIN
:
759 robj
->initial_domain
= args
->value
& (RADEON_GEM_DOMAIN_VRAM
|
760 RADEON_GEM_DOMAIN_GTT
|
761 RADEON_GEM_DOMAIN_CPU
);
767 radeon_bo_unreserve(robj
);
769 drm_gem_object_put(gobj
);
773 int radeon_mode_dumb_create(struct drm_file
*file_priv
,
774 struct drm_device
*dev
,
775 struct drm_mode_create_dumb
*args
)
777 struct radeon_device
*rdev
= dev
->dev_private
;
778 struct drm_gem_object
*gobj
;
782 args
->pitch
= radeon_align_pitch(rdev
, args
->width
,
783 DIV_ROUND_UP(args
->bpp
, 8), 0);
784 args
->size
= args
->pitch
* args
->height
;
785 args
->size
= ALIGN(args
->size
, PAGE_SIZE
);
787 r
= radeon_gem_object_create(rdev
, args
->size
, 0,
788 RADEON_GEM_DOMAIN_VRAM
, 0,
793 r
= drm_gem_handle_create(file_priv
, gobj
, &handle
);
794 /* drop reference from allocate - handle holds it now */
795 drm_gem_object_put(gobj
);
799 args
->handle
= handle
;
803 #if defined(CONFIG_DEBUG_FS)
804 static int radeon_debugfs_gem_info(struct seq_file
*m
, void *data
)
806 struct drm_info_node
*node
= (struct drm_info_node
*)m
->private;
807 struct drm_device
*dev
= node
->minor
->dev
;
808 struct radeon_device
*rdev
= dev
->dev_private
;
809 struct radeon_bo
*rbo
;
812 mutex_lock(&rdev
->gem
.mutex
);
813 list_for_each_entry(rbo
, &rdev
->gem
.objects
, list
) {
815 const char *placement
;
817 domain
= radeon_mem_type_to_domain(rbo
->tbo
.mem
.mem_type
);
819 case RADEON_GEM_DOMAIN_VRAM
:
822 case RADEON_GEM_DOMAIN_GTT
:
825 case RADEON_GEM_DOMAIN_CPU
:
830 seq_printf(m
, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
831 i
, radeon_bo_size(rbo
) >> 10, radeon_bo_size(rbo
) >> 20,
832 placement
, (unsigned long)rbo
->pid
);
835 mutex_unlock(&rdev
->gem
.mutex
);
839 static struct drm_info_list radeon_debugfs_gem_list
[] = {
840 {"radeon_gem_info", &radeon_debugfs_gem_info
, 0, NULL
},
844 int radeon_gem_debugfs_init(struct radeon_device
*rdev
)
846 #if defined(CONFIG_DEBUG_FS)
847 return radeon_debugfs_add_files(rdev
, radeon_debugfs_gem_list
, 1);