2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/ktime.h>
30 #include <drm/amdgpu_drm.h>
33 void amdgpu_gem_object_free(struct drm_gem_object
*gobj
)
35 struct amdgpu_bo
*robj
= gem_to_amdgpu_bo(gobj
);
38 if (robj
->gem_base
.import_attach
)
39 drm_prime_gem_destroy(&robj
->gem_base
, robj
->tbo
.sg
);
40 amdgpu_mn_unregister(robj
);
41 amdgpu_bo_unref(&robj
);
45 int amdgpu_gem_object_create(struct amdgpu_device
*adev
, unsigned long size
,
46 int alignment
, u32 initial_domain
,
47 u64 flags
, bool kernel
,
48 struct drm_gem_object
**obj
)
50 struct amdgpu_bo
*robj
;
51 unsigned long max_size
;
55 /* At least align on page size */
56 if (alignment
< PAGE_SIZE
) {
57 alignment
= PAGE_SIZE
;
60 if (!(initial_domain
& (AMDGPU_GEM_DOMAIN_GDS
| AMDGPU_GEM_DOMAIN_GWS
| AMDGPU_GEM_DOMAIN_OA
))) {
61 /* Maximum bo size is the unpinned gtt size since we use the gtt to
62 * handle vram to system pool migrations.
64 max_size
= adev
->mc
.gtt_size
- adev
->gart_pin_size
;
65 if (size
> max_size
) {
66 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
67 size
>> 20, max_size
>> 20);
72 r
= amdgpu_bo_create(adev
, size
, alignment
, kernel
, initial_domain
,
73 flags
, NULL
, NULL
, &robj
);
75 if (r
!= -ERESTARTSYS
) {
76 if (initial_domain
== AMDGPU_GEM_DOMAIN_VRAM
) {
77 initial_domain
|= AMDGPU_GEM_DOMAIN_GTT
;
80 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
81 size
, initial_domain
, alignment
, r
);
85 *obj
= &robj
->gem_base
;
86 robj
->pid
= task_pid_nr(current
);
88 mutex_lock(&adev
->gem
.mutex
);
89 list_add_tail(&robj
->list
, &adev
->gem
.objects
);
90 mutex_unlock(&adev
->gem
.mutex
);
95 int amdgpu_gem_init(struct amdgpu_device
*adev
)
97 INIT_LIST_HEAD(&adev
->gem
.objects
);
101 void amdgpu_gem_fini(struct amdgpu_device
*adev
)
103 amdgpu_bo_force_delete(adev
);
107 * Call from drm_gem_handle_create which appear in both new and open ioctl
110 int amdgpu_gem_object_open(struct drm_gem_object
*obj
, struct drm_file
*file_priv
)
112 struct amdgpu_bo
*rbo
= gem_to_amdgpu_bo(obj
);
113 struct amdgpu_device
*adev
= rbo
->adev
;
114 struct amdgpu_fpriv
*fpriv
= file_priv
->driver_priv
;
115 struct amdgpu_vm
*vm
= &fpriv
->vm
;
116 struct amdgpu_bo_va
*bo_va
;
118 r
= amdgpu_bo_reserve(rbo
, false);
122 bo_va
= amdgpu_vm_bo_find(vm
, rbo
);
124 bo_va
= amdgpu_vm_bo_add(adev
, vm
, rbo
);
128 amdgpu_bo_unreserve(rbo
);
132 void amdgpu_gem_object_close(struct drm_gem_object
*obj
,
133 struct drm_file
*file_priv
)
135 struct amdgpu_bo
*rbo
= gem_to_amdgpu_bo(obj
);
136 struct amdgpu_device
*adev
= rbo
->adev
;
137 struct amdgpu_fpriv
*fpriv
= file_priv
->driver_priv
;
138 struct amdgpu_vm
*vm
= &fpriv
->vm
;
139 struct amdgpu_bo_va
*bo_va
;
141 r
= amdgpu_bo_reserve(rbo
, true);
143 dev_err(adev
->dev
, "leaking bo va because "
144 "we fail to reserve bo (%d)\n", r
);
147 bo_va
= amdgpu_vm_bo_find(vm
, rbo
);
149 if (--bo_va
->ref_count
== 0) {
150 amdgpu_vm_bo_rmv(adev
, bo_va
);
153 amdgpu_bo_unreserve(rbo
);
156 static int amdgpu_gem_handle_lockup(struct amdgpu_device
*adev
, int r
)
159 r
= amdgpu_gpu_reset(adev
);
169 int amdgpu_gem_create_ioctl(struct drm_device
*dev
, void *data
,
170 struct drm_file
*filp
)
172 struct amdgpu_device
*adev
= dev
->dev_private
;
173 union drm_amdgpu_gem_create
*args
= data
;
174 uint64_t size
= args
->in
.bo_size
;
175 struct drm_gem_object
*gobj
;
180 /* create a gem object to contain this object in */
181 if (args
->in
.domains
& (AMDGPU_GEM_DOMAIN_GDS
|
182 AMDGPU_GEM_DOMAIN_GWS
| AMDGPU_GEM_DOMAIN_OA
)) {
184 if (args
->in
.domains
== AMDGPU_GEM_DOMAIN_GDS
)
185 size
= size
<< AMDGPU_GDS_SHIFT
;
186 else if (args
->in
.domains
== AMDGPU_GEM_DOMAIN_GWS
)
187 size
= size
<< AMDGPU_GWS_SHIFT
;
188 else if (args
->in
.domains
== AMDGPU_GEM_DOMAIN_OA
)
189 size
= size
<< AMDGPU_OA_SHIFT
;
195 size
= roundup(size
, PAGE_SIZE
);
197 r
= amdgpu_gem_object_create(adev
, size
, args
->in
.alignment
,
198 (u32
)(0xffffffff & args
->in
.domains
),
199 args
->in
.domain_flags
,
204 r
= drm_gem_handle_create(filp
, gobj
, &handle
);
205 /* drop reference from allocate - handle holds it now */
206 drm_gem_object_unreference_unlocked(gobj
);
210 memset(args
, 0, sizeof(*args
));
211 args
->out
.handle
= handle
;
215 r
= amdgpu_gem_handle_lockup(adev
, r
);
219 int amdgpu_gem_userptr_ioctl(struct drm_device
*dev
, void *data
,
220 struct drm_file
*filp
)
222 struct amdgpu_device
*adev
= dev
->dev_private
;
223 struct drm_amdgpu_gem_userptr
*args
= data
;
224 struct drm_gem_object
*gobj
;
225 struct amdgpu_bo
*bo
;
229 if (offset_in_page(args
->addr
| args
->size
))
232 /* reject unknown flag values */
233 if (args
->flags
& ~(AMDGPU_GEM_USERPTR_READONLY
|
234 AMDGPU_GEM_USERPTR_ANONONLY
| AMDGPU_GEM_USERPTR_VALIDATE
|
235 AMDGPU_GEM_USERPTR_REGISTER
))
238 if (!(args
->flags
& AMDGPU_GEM_USERPTR_READONLY
) && (
239 !(args
->flags
& AMDGPU_GEM_USERPTR_ANONONLY
) ||
240 !(args
->flags
& AMDGPU_GEM_USERPTR_REGISTER
))) {
242 /* if we want to write to it we must require anonymous
243 memory and install a MMU notifier */
247 /* create a gem object to contain this object in */
248 r
= amdgpu_gem_object_create(adev
, args
->size
, 0,
249 AMDGPU_GEM_DOMAIN_CPU
, 0,
254 bo
= gem_to_amdgpu_bo(gobj
);
255 r
= amdgpu_ttm_tt_set_userptr(bo
->tbo
.ttm
, args
->addr
, args
->flags
);
259 if (args
->flags
& AMDGPU_GEM_USERPTR_REGISTER
) {
260 r
= amdgpu_mn_register(bo
, args
->addr
);
265 if (args
->flags
& AMDGPU_GEM_USERPTR_VALIDATE
) {
266 down_read(¤t
->mm
->mmap_sem
);
267 r
= amdgpu_bo_reserve(bo
, true);
269 up_read(¤t
->mm
->mmap_sem
);
273 amdgpu_ttm_placement_from_domain(bo
, AMDGPU_GEM_DOMAIN_GTT
);
274 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, true, false);
275 amdgpu_bo_unreserve(bo
);
276 up_read(¤t
->mm
->mmap_sem
);
281 r
= drm_gem_handle_create(filp
, gobj
, &handle
);
282 /* drop reference from allocate - handle holds it now */
283 drm_gem_object_unreference_unlocked(gobj
);
287 args
->handle
= handle
;
291 drm_gem_object_unreference_unlocked(gobj
);
294 r
= amdgpu_gem_handle_lockup(adev
, r
);
299 int amdgpu_mode_dumb_mmap(struct drm_file
*filp
,
300 struct drm_device
*dev
,
301 uint32_t handle
, uint64_t *offset_p
)
303 struct drm_gem_object
*gobj
;
304 struct amdgpu_bo
*robj
;
306 gobj
= drm_gem_object_lookup(dev
, filp
, handle
);
310 robj
= gem_to_amdgpu_bo(gobj
);
311 if (amdgpu_ttm_tt_has_userptr(robj
->tbo
.ttm
) ||
312 (robj
->flags
& AMDGPU_GEM_CREATE_NO_CPU_ACCESS
)) {
313 drm_gem_object_unreference_unlocked(gobj
);
316 *offset_p
= amdgpu_bo_mmap_offset(robj
);
317 drm_gem_object_unreference_unlocked(gobj
);
321 int amdgpu_gem_mmap_ioctl(struct drm_device
*dev
, void *data
,
322 struct drm_file
*filp
)
324 union drm_amdgpu_gem_mmap
*args
= data
;
325 uint32_t handle
= args
->in
.handle
;
326 memset(args
, 0, sizeof(*args
));
327 return amdgpu_mode_dumb_mmap(filp
, dev
, handle
, &args
->out
.addr_ptr
);
331 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
333 * @timeout_ns: timeout in ns
335 * Calculate the timeout in jiffies from an absolute timeout in ns.
337 unsigned long amdgpu_gem_timeout(uint64_t timeout_ns
)
339 unsigned long timeout_jiffies
;
342 /* clamp timeout if it's to large */
343 if (((int64_t)timeout_ns
) < 0)
344 return MAX_SCHEDULE_TIMEOUT
;
346 timeout
= ktime_sub(ns_to_ktime(timeout_ns
), ktime_get());
347 if (ktime_to_ns(timeout
) < 0)
350 timeout_jiffies
= nsecs_to_jiffies(ktime_to_ns(timeout
));
351 /* clamp timeout to avoid unsigned-> signed overflow */
352 if (timeout_jiffies
> MAX_SCHEDULE_TIMEOUT
)
353 return MAX_SCHEDULE_TIMEOUT
- 1;
355 return timeout_jiffies
;
358 int amdgpu_gem_wait_idle_ioctl(struct drm_device
*dev
, void *data
,
359 struct drm_file
*filp
)
361 struct amdgpu_device
*adev
= dev
->dev_private
;
362 union drm_amdgpu_gem_wait_idle
*args
= data
;
363 struct drm_gem_object
*gobj
;
364 struct amdgpu_bo
*robj
;
365 uint32_t handle
= args
->in
.handle
;
366 unsigned long timeout
= amdgpu_gem_timeout(args
->in
.timeout
);
370 gobj
= drm_gem_object_lookup(dev
, filp
, handle
);
374 robj
= gem_to_amdgpu_bo(gobj
);
376 ret
= reservation_object_test_signaled_rcu(robj
->tbo
.resv
, true);
378 ret
= reservation_object_wait_timeout_rcu(robj
->tbo
.resv
, true, true, timeout
);
380 /* ret == 0 means not signaled,
381 * ret > 0 means signaled
382 * ret < 0 means interrupted before timeout
385 memset(args
, 0, sizeof(*args
));
386 args
->out
.status
= (ret
== 0);
390 drm_gem_object_unreference_unlocked(gobj
);
391 r
= amdgpu_gem_handle_lockup(adev
, r
);
395 int amdgpu_gem_metadata_ioctl(struct drm_device
*dev
, void *data
,
396 struct drm_file
*filp
)
398 struct drm_amdgpu_gem_metadata
*args
= data
;
399 struct drm_gem_object
*gobj
;
400 struct amdgpu_bo
*robj
;
403 DRM_DEBUG("%d \n", args
->handle
);
404 gobj
= drm_gem_object_lookup(dev
, filp
, args
->handle
);
407 robj
= gem_to_amdgpu_bo(gobj
);
409 r
= amdgpu_bo_reserve(robj
, false);
410 if (unlikely(r
!= 0))
413 if (args
->op
== AMDGPU_GEM_METADATA_OP_GET_METADATA
) {
414 amdgpu_bo_get_tiling_flags(robj
, &args
->data
.tiling_info
);
415 r
= amdgpu_bo_get_metadata(robj
, args
->data
.data
,
416 sizeof(args
->data
.data
),
417 &args
->data
.data_size_bytes
,
419 } else if (args
->op
== AMDGPU_GEM_METADATA_OP_SET_METADATA
) {
420 if (args
->data
.data_size_bytes
> sizeof(args
->data
.data
)) {
424 r
= amdgpu_bo_set_tiling_flags(robj
, args
->data
.tiling_info
);
426 r
= amdgpu_bo_set_metadata(robj
, args
->data
.data
,
427 args
->data
.data_size_bytes
,
432 amdgpu_bo_unreserve(robj
);
434 drm_gem_object_unreference_unlocked(gobj
);
439 * amdgpu_gem_va_update_vm -update the bo_va in its VM
441 * @adev: amdgpu_device pointer
442 * @bo_va: bo_va to update
444 * Update the bo_va directly after setting it's address. Errors are not
445 * vital here, so they are not reported back to userspace.
447 static void amdgpu_gem_va_update_vm(struct amdgpu_device
*adev
,
448 struct amdgpu_bo_va
*bo_va
, uint32_t operation
)
450 struct ttm_validate_buffer tv
, *entry
;
451 struct amdgpu_bo_list_entry
*vm_bos
;
452 struct ww_acquire_ctx ticket
;
453 struct list_head list
, duplicates
;
457 INIT_LIST_HEAD(&list
);
458 INIT_LIST_HEAD(&duplicates
);
460 tv
.bo
= &bo_va
->bo
->tbo
;
462 list_add(&tv
.head
, &list
);
464 vm_bos
= amdgpu_vm_get_bos(adev
, bo_va
->vm
, &list
);
468 /* Provide duplicates to avoid -EALREADY */
469 r
= ttm_eu_reserve_buffers(&ticket
, &list
, true, &duplicates
);
473 list_for_each_entry(entry
, &list
, head
) {
474 domain
= amdgpu_mem_type_to_domain(entry
->bo
->mem
.mem_type
);
475 /* if anything is swapped out don't swap it in here,
476 just abort and wait for the next CS */
477 if (domain
== AMDGPU_GEM_DOMAIN_CPU
)
478 goto error_unreserve
;
480 list_for_each_entry(entry
, &duplicates
, head
) {
481 domain
= amdgpu_mem_type_to_domain(entry
->bo
->mem
.mem_type
);
482 /* if anything is swapped out don't swap it in here,
483 just abort and wait for the next CS */
484 if (domain
== AMDGPU_GEM_DOMAIN_CPU
)
485 goto error_unreserve
;
488 r
= amdgpu_vm_update_page_directory(adev
, bo_va
->vm
);
490 goto error_unreserve
;
492 r
= amdgpu_vm_clear_freed(adev
, bo_va
->vm
);
494 goto error_unreserve
;
496 if (operation
== AMDGPU_VA_OP_MAP
)
497 r
= amdgpu_vm_bo_update(adev
, bo_va
, &bo_va
->bo
->tbo
.mem
);
500 ttm_eu_backoff_reservation(&ticket
, &list
);
503 drm_free_large(vm_bos
);
505 if (r
&& r
!= -ERESTARTSYS
)
506 DRM_ERROR("Couldn't update BO_VA (%d)\n", r
);
511 int amdgpu_gem_va_ioctl(struct drm_device
*dev
, void *data
,
512 struct drm_file
*filp
)
514 struct drm_amdgpu_gem_va
*args
= data
;
515 struct drm_gem_object
*gobj
;
516 struct amdgpu_device
*adev
= dev
->dev_private
;
517 struct amdgpu_fpriv
*fpriv
= filp
->driver_priv
;
518 struct amdgpu_bo
*rbo
;
519 struct amdgpu_bo_va
*bo_va
;
520 struct ttm_validate_buffer tv
, tv_pd
;
521 struct ww_acquire_ctx ticket
;
522 struct list_head list
, duplicates
;
523 uint32_t invalid_flags
, va_flags
= 0;
526 if (!adev
->vm_manager
.enabled
)
529 if (args
->va_address
< AMDGPU_VA_RESERVED_SIZE
) {
530 dev_err(&dev
->pdev
->dev
,
531 "va_address 0x%lX is in reserved area 0x%X\n",
532 (unsigned long)args
->va_address
,
533 AMDGPU_VA_RESERVED_SIZE
);
537 invalid_flags
= ~(AMDGPU_VM_DELAY_UPDATE
| AMDGPU_VM_PAGE_READABLE
|
538 AMDGPU_VM_PAGE_WRITEABLE
| AMDGPU_VM_PAGE_EXECUTABLE
);
539 if ((args
->flags
& invalid_flags
)) {
540 dev_err(&dev
->pdev
->dev
, "invalid flags 0x%08X vs 0x%08X\n",
541 args
->flags
, invalid_flags
);
545 switch (args
->operation
) {
546 case AMDGPU_VA_OP_MAP
:
547 case AMDGPU_VA_OP_UNMAP
:
550 dev_err(&dev
->pdev
->dev
, "unsupported operation %d\n",
555 gobj
= drm_gem_object_lookup(dev
, filp
, args
->handle
);
558 rbo
= gem_to_amdgpu_bo(gobj
);
559 INIT_LIST_HEAD(&list
);
560 INIT_LIST_HEAD(&duplicates
);
563 list_add(&tv
.head
, &list
);
565 if (args
->operation
== AMDGPU_VA_OP_MAP
) {
566 tv_pd
.bo
= &fpriv
->vm
.page_directory
->tbo
;
568 list_add(&tv_pd
.head
, &list
);
570 r
= ttm_eu_reserve_buffers(&ticket
, &list
, true, &duplicates
);
572 drm_gem_object_unreference_unlocked(gobj
);
576 bo_va
= amdgpu_vm_bo_find(&fpriv
->vm
, rbo
);
578 ttm_eu_backoff_reservation(&ticket
, &list
);
579 drm_gem_object_unreference_unlocked(gobj
);
583 switch (args
->operation
) {
584 case AMDGPU_VA_OP_MAP
:
585 if (args
->flags
& AMDGPU_VM_PAGE_READABLE
)
586 va_flags
|= AMDGPU_PTE_READABLE
;
587 if (args
->flags
& AMDGPU_VM_PAGE_WRITEABLE
)
588 va_flags
|= AMDGPU_PTE_WRITEABLE
;
589 if (args
->flags
& AMDGPU_VM_PAGE_EXECUTABLE
)
590 va_flags
|= AMDGPU_PTE_EXECUTABLE
;
591 r
= amdgpu_vm_bo_map(adev
, bo_va
, args
->va_address
,
592 args
->offset_in_bo
, args
->map_size
,
595 case AMDGPU_VA_OP_UNMAP
:
596 r
= amdgpu_vm_bo_unmap(adev
, bo_va
, args
->va_address
);
601 ttm_eu_backoff_reservation(&ticket
, &list
);
602 if (!r
&& !(args
->flags
& AMDGPU_VM_DELAY_UPDATE
))
603 amdgpu_gem_va_update_vm(adev
, bo_va
, args
->operation
);
605 drm_gem_object_unreference_unlocked(gobj
);
609 int amdgpu_gem_op_ioctl(struct drm_device
*dev
, void *data
,
610 struct drm_file
*filp
)
612 struct drm_amdgpu_gem_op
*args
= data
;
613 struct drm_gem_object
*gobj
;
614 struct amdgpu_bo
*robj
;
617 gobj
= drm_gem_object_lookup(dev
, filp
, args
->handle
);
621 robj
= gem_to_amdgpu_bo(gobj
);
623 r
= amdgpu_bo_reserve(robj
, false);
628 case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO
: {
629 struct drm_amdgpu_gem_create_in info
;
630 void __user
*out
= (void __user
*)(long)args
->value
;
632 info
.bo_size
= robj
->gem_base
.size
;
633 info
.alignment
= robj
->tbo
.mem
.page_alignment
<< PAGE_SHIFT
;
634 info
.domains
= robj
->initial_domain
;
635 info
.domain_flags
= robj
->flags
;
636 amdgpu_bo_unreserve(robj
);
637 if (copy_to_user(out
, &info
, sizeof(info
)))
641 case AMDGPU_GEM_OP_SET_PLACEMENT
:
642 if (amdgpu_ttm_tt_has_userptr(robj
->tbo
.ttm
)) {
644 amdgpu_bo_unreserve(robj
);
647 robj
->initial_domain
= args
->value
& (AMDGPU_GEM_DOMAIN_VRAM
|
648 AMDGPU_GEM_DOMAIN_GTT
|
649 AMDGPU_GEM_DOMAIN_CPU
);
650 amdgpu_bo_unreserve(robj
);
653 amdgpu_bo_unreserve(robj
);
658 drm_gem_object_unreference_unlocked(gobj
);
662 int amdgpu_mode_dumb_create(struct drm_file
*file_priv
,
663 struct drm_device
*dev
,
664 struct drm_mode_create_dumb
*args
)
666 struct amdgpu_device
*adev
= dev
->dev_private
;
667 struct drm_gem_object
*gobj
;
671 args
->pitch
= amdgpu_align_pitch(adev
, args
->width
, args
->bpp
, 0) * ((args
->bpp
+ 1) / 8);
672 args
->size
= (u64
)args
->pitch
* args
->height
;
673 args
->size
= ALIGN(args
->size
, PAGE_SIZE
);
675 r
= amdgpu_gem_object_create(adev
, args
->size
, 0,
676 AMDGPU_GEM_DOMAIN_VRAM
,
677 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
,
683 r
= drm_gem_handle_create(file_priv
, gobj
, &handle
);
684 /* drop reference from allocate - handle holds it now */
685 drm_gem_object_unreference_unlocked(gobj
);
689 args
->handle
= handle
;
693 #if defined(CONFIG_DEBUG_FS)
694 static int amdgpu_debugfs_gem_info(struct seq_file
*m
, void *data
)
696 struct drm_info_node
*node
= (struct drm_info_node
*)m
->private;
697 struct drm_device
*dev
= node
->minor
->dev
;
698 struct amdgpu_device
*adev
= dev
->dev_private
;
699 struct amdgpu_bo
*rbo
;
702 mutex_lock(&adev
->gem
.mutex
);
703 list_for_each_entry(rbo
, &adev
->gem
.objects
, list
) {
705 const char *placement
;
707 domain
= amdgpu_mem_type_to_domain(rbo
->tbo
.mem
.mem_type
);
709 case AMDGPU_GEM_DOMAIN_VRAM
:
712 case AMDGPU_GEM_DOMAIN_GTT
:
715 case AMDGPU_GEM_DOMAIN_CPU
:
720 seq_printf(m
, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
721 i
, amdgpu_bo_size(rbo
) >> 10, amdgpu_bo_size(rbo
) >> 20,
722 placement
, (unsigned long)rbo
->pid
);
725 mutex_unlock(&adev
->gem
.mutex
);
729 static struct drm_info_list amdgpu_debugfs_gem_list
[] = {
730 {"amdgpu_gem_info", &amdgpu_debugfs_gem_info
, 0, NULL
},
734 int amdgpu_gem_debugfs_init(struct amdgpu_device
*adev
)
736 #if defined(CONFIG_DEBUG_FS)
737 return amdgpu_debugfs_add_files(adev
, amdgpu_debugfs_gem_list
, 1);