2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
32 #include <ttm/ttm_bo_api.h>
33 #include <ttm/ttm_bo_driver.h>
34 #include <ttm/ttm_placement.h>
35 #include <ttm/ttm_module.h>
37 #include <drm/radeon_drm.h>
38 #include <linux/seq_file.h>
39 #include "radeon_reg.h"
42 #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
44 static int radeon_ttm_debugfs_init(struct radeon_device
*rdev
);
46 static struct radeon_device
*radeon_get_rdev(struct ttm_bo_device
*bdev
)
48 struct radeon_mman
*mman
;
49 struct radeon_device
*rdev
;
51 mman
= container_of(bdev
, struct radeon_mman
, bdev
);
52 rdev
= container_of(mman
, struct radeon_device
, mman
);
60 static int radeon_ttm_mem_global_init(struct ttm_global_reference
*ref
)
62 return ttm_mem_global_init(ref
->object
);
65 static void radeon_ttm_mem_global_release(struct ttm_global_reference
*ref
)
67 ttm_mem_global_release(ref
->object
);
70 static int radeon_ttm_global_init(struct radeon_device
*rdev
)
72 struct ttm_global_reference
*global_ref
;
75 rdev
->mman
.mem_global_referenced
= false;
76 global_ref
= &rdev
->mman
.mem_global_ref
;
77 global_ref
->global_type
= TTM_GLOBAL_TTM_MEM
;
78 global_ref
->size
= sizeof(struct ttm_mem_global
);
79 global_ref
->init
= &radeon_ttm_mem_global_init
;
80 global_ref
->release
= &radeon_ttm_mem_global_release
;
81 r
= ttm_global_item_ref(global_ref
);
83 DRM_ERROR("Failed setting up TTM memory accounting "
88 rdev
->mman
.bo_global_ref
.mem_glob
=
89 rdev
->mman
.mem_global_ref
.object
;
90 global_ref
= &rdev
->mman
.bo_global_ref
.ref
;
91 global_ref
->global_type
= TTM_GLOBAL_TTM_BO
;
92 global_ref
->size
= sizeof(struct ttm_bo_global
);
93 global_ref
->init
= &ttm_bo_global_init
;
94 global_ref
->release
= &ttm_bo_global_release
;
95 r
= ttm_global_item_ref(global_ref
);
97 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
98 ttm_global_item_unref(&rdev
->mman
.mem_global_ref
);
102 rdev
->mman
.mem_global_referenced
= true;
106 static void radeon_ttm_global_fini(struct radeon_device
*rdev
)
108 if (rdev
->mman
.mem_global_referenced
) {
109 ttm_global_item_unref(&rdev
->mman
.bo_global_ref
.ref
);
110 ttm_global_item_unref(&rdev
->mman
.mem_global_ref
);
111 rdev
->mman
.mem_global_referenced
= false;
115 struct ttm_backend
*radeon_ttm_backend_create(struct radeon_device
*rdev
);
117 static struct ttm_backend
*
118 radeon_create_ttm_backend_entry(struct ttm_bo_device
*bdev
)
120 struct radeon_device
*rdev
;
122 rdev
= radeon_get_rdev(bdev
);
124 if (rdev
->flags
& RADEON_IS_AGP
) {
125 return ttm_agp_backend_init(bdev
, rdev
->ddev
->agp
->bridge
);
129 return radeon_ttm_backend_create(rdev
);
133 static int radeon_invalidate_caches(struct ttm_bo_device
*bdev
, uint32_t flags
)
138 static int radeon_init_mem_type(struct ttm_bo_device
*bdev
, uint32_t type
,
139 struct ttm_mem_type_manager
*man
)
141 struct radeon_device
*rdev
;
143 rdev
= radeon_get_rdev(bdev
);
148 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
;
149 man
->available_caching
= TTM_PL_MASK_CACHING
;
150 man
->default_caching
= TTM_PL_FLAG_CACHED
;
154 man
->available_caching
= TTM_PL_MASK_CACHING
;
155 man
->default_caching
= TTM_PL_FLAG_CACHED
;
156 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
| TTM_MEMTYPE_FLAG_CMA
;
158 if (rdev
->flags
& RADEON_IS_AGP
) {
159 if (!(drm_core_has_AGP(rdev
->ddev
) && rdev
->ddev
->agp
)) {
160 DRM_ERROR("AGP is not enabled for memory type %u\n",
164 man
->io_offset
= rdev
->mc
.agp_base
;
165 man
->io_size
= rdev
->mc
.gtt_size
;
167 if (!rdev
->ddev
->agp
->cant_use_aperture
)
168 man
->flags
= TTM_MEMTYPE_FLAG_NEEDS_IOREMAP
|
169 TTM_MEMTYPE_FLAG_MAPPABLE
;
170 man
->available_caching
= TTM_PL_FLAG_UNCACHED
|
172 man
->default_caching
= TTM_PL_FLAG_WC
;
182 /* "On-card" video ram */
184 man
->flags
= TTM_MEMTYPE_FLAG_FIXED
|
185 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP
|
186 TTM_MEMTYPE_FLAG_MAPPABLE
;
187 man
->available_caching
= TTM_PL_FLAG_UNCACHED
| TTM_PL_FLAG_WC
;
188 man
->default_caching
= TTM_PL_FLAG_WC
;
190 man
->io_offset
= rdev
->mc
.aper_base
;
191 man
->io_size
= rdev
->mc
.aper_size
;
194 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type
);
200 static uint32_t radeon_evict_flags(struct ttm_buffer_object
*bo
)
202 uint32_t cur_placement
= bo
->mem
.placement
& ~TTM_PL_MASK_MEMTYPE
;
204 switch (bo
->mem
.mem_type
) {
206 return (cur_placement
& ~TTM_PL_MASK_CACHING
) |
212 static int radeon_verify_access(struct ttm_buffer_object
*bo
, struct file
*filp
)
217 static void radeon_move_null(struct ttm_buffer_object
*bo
,
218 struct ttm_mem_reg
*new_mem
)
220 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
222 BUG_ON(old_mem
->mm_node
!= NULL
);
224 new_mem
->mm_node
= NULL
;
227 static int radeon_move_blit(struct ttm_buffer_object
*bo
,
228 bool evict
, int no_wait
,
229 struct ttm_mem_reg
*new_mem
,
230 struct ttm_mem_reg
*old_mem
)
232 struct radeon_device
*rdev
;
233 uint64_t old_start
, new_start
;
234 struct radeon_fence
*fence
;
237 rdev
= radeon_get_rdev(bo
->bdev
);
238 r
= radeon_fence_create(rdev
, &fence
);
242 old_start
= old_mem
->mm_node
->start
<< PAGE_SHIFT
;
243 new_start
= new_mem
->mm_node
->start
<< PAGE_SHIFT
;
245 switch (old_mem
->mem_type
) {
247 old_start
+= rdev
->mc
.vram_location
;
250 old_start
+= rdev
->mc
.gtt_location
;
253 DRM_ERROR("Unknown placement %d\n", old_mem
->mem_type
);
256 switch (new_mem
->mem_type
) {
258 new_start
+= rdev
->mc
.vram_location
;
261 new_start
+= rdev
->mc
.gtt_location
;
264 DRM_ERROR("Unknown placement %d\n", old_mem
->mem_type
);
267 if (!rdev
->cp
.ready
) {
268 DRM_ERROR("Trying to move memory with CP turned off.\n");
271 r
= radeon_copy(rdev
, old_start
, new_start
, new_mem
->num_pages
, fence
);
272 /* FIXME: handle copy error */
273 r
= ttm_bo_move_accel_cleanup(bo
, (void *)fence
, NULL
,
274 evict
, no_wait
, new_mem
);
275 radeon_fence_unref(&fence
);
279 static int radeon_move_vram_ram(struct ttm_buffer_object
*bo
,
280 bool evict
, bool interruptible
, bool no_wait
,
281 struct ttm_mem_reg
*new_mem
)
283 struct radeon_device
*rdev
;
284 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
285 struct ttm_mem_reg tmp_mem
;
286 uint32_t proposed_placement
;
289 rdev
= radeon_get_rdev(bo
->bdev
);
291 tmp_mem
.mm_node
= NULL
;
292 proposed_placement
= TTM_PL_FLAG_TT
| TTM_PL_MASK_CACHING
;
293 r
= ttm_bo_mem_space(bo
, proposed_placement
, &tmp_mem
,
294 interruptible
, no_wait
);
299 r
= ttm_tt_set_placement_caching(bo
->ttm
, tmp_mem
.placement
);
304 r
= ttm_tt_bind(bo
->ttm
, &tmp_mem
);
308 r
= radeon_move_blit(bo
, true, no_wait
, &tmp_mem
, old_mem
);
312 r
= ttm_bo_move_ttm(bo
, true, no_wait
, new_mem
);
314 if (tmp_mem
.mm_node
) {
315 struct ttm_bo_global
*glob
= rdev
->mman
.bdev
.glob
;
317 spin_lock(&glob
->lru_lock
);
318 drm_mm_put_block(tmp_mem
.mm_node
);
319 spin_unlock(&glob
->lru_lock
);
325 static int radeon_move_ram_vram(struct ttm_buffer_object
*bo
,
326 bool evict
, bool interruptible
, bool no_wait
,
327 struct ttm_mem_reg
*new_mem
)
329 struct radeon_device
*rdev
;
330 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
331 struct ttm_mem_reg tmp_mem
;
332 uint32_t proposed_flags
;
335 rdev
= radeon_get_rdev(bo
->bdev
);
337 tmp_mem
.mm_node
= NULL
;
338 proposed_flags
= TTM_PL_FLAG_TT
| TTM_PL_MASK_CACHING
;
339 r
= ttm_bo_mem_space(bo
, proposed_flags
, &tmp_mem
,
340 interruptible
, no_wait
);
344 r
= ttm_bo_move_ttm(bo
, true, no_wait
, &tmp_mem
);
348 r
= radeon_move_blit(bo
, true, no_wait
, new_mem
, old_mem
);
353 if (tmp_mem
.mm_node
) {
354 struct ttm_bo_global
*glob
= rdev
->mman
.bdev
.glob
;
356 spin_lock(&glob
->lru_lock
);
357 drm_mm_put_block(tmp_mem
.mm_node
);
358 spin_unlock(&glob
->lru_lock
);
364 static int radeon_bo_move(struct ttm_buffer_object
*bo
,
365 bool evict
, bool interruptible
, bool no_wait
,
366 struct ttm_mem_reg
*new_mem
)
368 struct radeon_device
*rdev
;
369 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
372 rdev
= radeon_get_rdev(bo
->bdev
);
373 if (old_mem
->mem_type
== TTM_PL_SYSTEM
&& bo
->ttm
== NULL
) {
374 radeon_move_null(bo
, new_mem
);
377 if ((old_mem
->mem_type
== TTM_PL_TT
&&
378 new_mem
->mem_type
== TTM_PL_SYSTEM
) ||
379 (old_mem
->mem_type
== TTM_PL_SYSTEM
&&
380 new_mem
->mem_type
== TTM_PL_TT
)) {
381 /* bind is enought */
382 radeon_move_null(bo
, new_mem
);
385 if (!rdev
->cp
.ready
|| rdev
->asic
->copy
== NULL
) {
390 if (old_mem
->mem_type
== TTM_PL_VRAM
&&
391 new_mem
->mem_type
== TTM_PL_SYSTEM
) {
392 r
= radeon_move_vram_ram(bo
, evict
, interruptible
,
394 } else if (old_mem
->mem_type
== TTM_PL_SYSTEM
&&
395 new_mem
->mem_type
== TTM_PL_VRAM
) {
396 r
= radeon_move_ram_vram(bo
, evict
, interruptible
,
399 r
= radeon_move_blit(bo
, evict
, no_wait
, new_mem
, old_mem
);
404 r
= ttm_bo_move_memcpy(bo
, evict
, no_wait
, new_mem
);
410 const uint32_t radeon_mem_prios
[] = {
416 const uint32_t radeon_busy_prios
[] = {
422 static int radeon_sync_obj_wait(void *sync_obj
, void *sync_arg
,
423 bool lazy
, bool interruptible
)
425 return radeon_fence_wait((struct radeon_fence
*)sync_obj
, interruptible
);
428 static int radeon_sync_obj_flush(void *sync_obj
, void *sync_arg
)
433 static void radeon_sync_obj_unref(void **sync_obj
)
435 radeon_fence_unref((struct radeon_fence
**)sync_obj
);
438 static void *radeon_sync_obj_ref(void *sync_obj
)
440 return radeon_fence_ref((struct radeon_fence
*)sync_obj
);
443 static bool radeon_sync_obj_signaled(void *sync_obj
, void *sync_arg
)
445 return radeon_fence_signaled((struct radeon_fence
*)sync_obj
);
448 static struct ttm_bo_driver radeon_bo_driver
= {
449 .mem_type_prio
= radeon_mem_prios
,
450 .mem_busy_prio
= radeon_busy_prios
,
451 .num_mem_type_prio
= ARRAY_SIZE(radeon_mem_prios
),
452 .num_mem_busy_prio
= ARRAY_SIZE(radeon_busy_prios
),
453 .create_ttm_backend_entry
= &radeon_create_ttm_backend_entry
,
454 .invalidate_caches
= &radeon_invalidate_caches
,
455 .init_mem_type
= &radeon_init_mem_type
,
456 .evict_flags
= &radeon_evict_flags
,
457 .move
= &radeon_bo_move
,
458 .verify_access
= &radeon_verify_access
,
459 .sync_obj_signaled
= &radeon_sync_obj_signaled
,
460 .sync_obj_wait
= &radeon_sync_obj_wait
,
461 .sync_obj_flush
= &radeon_sync_obj_flush
,
462 .sync_obj_unref
= &radeon_sync_obj_unref
,
463 .sync_obj_ref
= &radeon_sync_obj_ref
,
464 .move_notify
= &radeon_bo_move_notify
,
465 .fault_reserve_notify
= &radeon_bo_fault_reserve_notify
,
468 int radeon_ttm_init(struct radeon_device
*rdev
)
472 r
= radeon_ttm_global_init(rdev
);
476 /* No others user of address space so set it to 0 */
477 r
= ttm_bo_device_init(&rdev
->mman
.bdev
,
478 rdev
->mman
.bo_global_ref
.ref
.object
,
479 &radeon_bo_driver
, DRM_FILE_PAGE_OFFSET
,
482 DRM_ERROR("failed initializing buffer object driver(%d).\n", r
);
485 r
= ttm_bo_init_mm(&rdev
->mman
.bdev
, TTM_PL_VRAM
, 0,
486 ((rdev
->mc
.real_vram_size
) >> PAGE_SHIFT
));
488 DRM_ERROR("Failed initializing VRAM heap.\n");
491 r
= radeon_object_create(rdev
, NULL
, 256 * 1024, true,
492 RADEON_GEM_DOMAIN_VRAM
, false,
493 &rdev
->stollen_vga_memory
);
497 r
= radeon_object_pin(rdev
->stollen_vga_memory
, RADEON_GEM_DOMAIN_VRAM
, NULL
);
499 radeon_object_unref(&rdev
->stollen_vga_memory
);
502 DRM_INFO("radeon: %uM of VRAM memory ready\n",
503 (unsigned)rdev
->mc
.real_vram_size
/ (1024 * 1024));
504 r
= ttm_bo_init_mm(&rdev
->mman
.bdev
, TTM_PL_TT
, 0,
505 ((rdev
->mc
.gtt_size
) >> PAGE_SHIFT
));
507 DRM_ERROR("Failed initializing GTT heap.\n");
510 DRM_INFO("radeon: %uM of GTT memory ready.\n",
511 (unsigned)(rdev
->mc
.gtt_size
/ (1024 * 1024)));
512 if (unlikely(rdev
->mman
.bdev
.dev_mapping
== NULL
)) {
513 rdev
->mman
.bdev
.dev_mapping
= rdev
->ddev
->dev_mapping
;
516 r
= radeon_ttm_debugfs_init(rdev
);
518 DRM_ERROR("Failed to init debugfs\n");
524 void radeon_ttm_fini(struct radeon_device
*rdev
)
526 if (rdev
->stollen_vga_memory
) {
527 radeon_object_unpin(rdev
->stollen_vga_memory
);
528 radeon_object_unref(&rdev
->stollen_vga_memory
);
530 ttm_bo_clean_mm(&rdev
->mman
.bdev
, TTM_PL_VRAM
);
531 ttm_bo_clean_mm(&rdev
->mman
.bdev
, TTM_PL_TT
);
532 ttm_bo_device_release(&rdev
->mman
.bdev
);
533 radeon_gart_fini(rdev
);
534 radeon_ttm_global_fini(rdev
);
535 DRM_INFO("radeon: ttm finalized\n");
538 static struct vm_operations_struct radeon_ttm_vm_ops
;
539 static const struct vm_operations_struct
*ttm_vm_ops
= NULL
;
541 static int radeon_ttm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
543 struct ttm_buffer_object
*bo
;
546 bo
= (struct ttm_buffer_object
*)vma
->vm_private_data
;
548 return VM_FAULT_NOPAGE
;
550 r
= ttm_vm_ops
->fault(vma
, vmf
);
554 int radeon_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
556 struct drm_file
*file_priv
;
557 struct radeon_device
*rdev
;
560 if (unlikely(vma
->vm_pgoff
< DRM_FILE_PAGE_OFFSET
)) {
561 return drm_mmap(filp
, vma
);
564 file_priv
= (struct drm_file
*)filp
->private_data
;
565 rdev
= file_priv
->minor
->dev
->dev_private
;
569 r
= ttm_bo_mmap(filp
, vma
, &rdev
->mman
.bdev
);
570 if (unlikely(r
!= 0)) {
573 if (unlikely(ttm_vm_ops
== NULL
)) {
574 ttm_vm_ops
= vma
->vm_ops
;
575 radeon_ttm_vm_ops
= *ttm_vm_ops
;
576 radeon_ttm_vm_ops
.fault
= &radeon_ttm_fault
;
578 vma
->vm_ops
= &radeon_ttm_vm_ops
;
584 * TTM backend functions.
586 struct radeon_ttm_backend
{
587 struct ttm_backend backend
;
588 struct radeon_device
*rdev
;
589 unsigned long num_pages
;
591 struct page
*dummy_read_page
;
597 static int radeon_ttm_backend_populate(struct ttm_backend
*backend
,
598 unsigned long num_pages
,
600 struct page
*dummy_read_page
)
602 struct radeon_ttm_backend
*gtt
;
604 gtt
= container_of(backend
, struct radeon_ttm_backend
, backend
);
606 gtt
->num_pages
= num_pages
;
607 gtt
->dummy_read_page
= dummy_read_page
;
608 gtt
->populated
= true;
612 static void radeon_ttm_backend_clear(struct ttm_backend
*backend
)
614 struct radeon_ttm_backend
*gtt
;
616 gtt
= container_of(backend
, struct radeon_ttm_backend
, backend
);
619 gtt
->dummy_read_page
= NULL
;
620 gtt
->populated
= false;
625 static int radeon_ttm_backend_bind(struct ttm_backend
*backend
,
626 struct ttm_mem_reg
*bo_mem
)
628 struct radeon_ttm_backend
*gtt
;
631 gtt
= container_of(backend
, struct radeon_ttm_backend
, backend
);
632 gtt
->offset
= bo_mem
->mm_node
->start
<< PAGE_SHIFT
;
633 if (!gtt
->num_pages
) {
634 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt
->num_pages
, bo_mem
, backend
);
636 r
= radeon_gart_bind(gtt
->rdev
, gtt
->offset
,
637 gtt
->num_pages
, gtt
->pages
);
639 DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
640 gtt
->num_pages
, gtt
->offset
);
647 static int radeon_ttm_backend_unbind(struct ttm_backend
*backend
)
649 struct radeon_ttm_backend
*gtt
;
651 gtt
= container_of(backend
, struct radeon_ttm_backend
, backend
);
652 radeon_gart_unbind(gtt
->rdev
, gtt
->offset
, gtt
->num_pages
);
657 static void radeon_ttm_backend_destroy(struct ttm_backend
*backend
)
659 struct radeon_ttm_backend
*gtt
;
661 gtt
= container_of(backend
, struct radeon_ttm_backend
, backend
);
663 radeon_ttm_backend_unbind(backend
);
668 static struct ttm_backend_func radeon_backend_func
= {
669 .populate
= &radeon_ttm_backend_populate
,
670 .clear
= &radeon_ttm_backend_clear
,
671 .bind
= &radeon_ttm_backend_bind
,
672 .unbind
= &radeon_ttm_backend_unbind
,
673 .destroy
= &radeon_ttm_backend_destroy
,
676 struct ttm_backend
*radeon_ttm_backend_create(struct radeon_device
*rdev
)
678 struct radeon_ttm_backend
*gtt
;
680 gtt
= kzalloc(sizeof(struct radeon_ttm_backend
), GFP_KERNEL
);
684 gtt
->backend
.bdev
= &rdev
->mman
.bdev
;
685 gtt
->backend
.flags
= 0;
686 gtt
->backend
.func
= &radeon_backend_func
;
690 gtt
->dummy_read_page
= NULL
;
691 gtt
->populated
= false;
693 return >t
->backend
;
696 #define RADEON_DEBUGFS_MEM_TYPES 2
698 #if defined(CONFIG_DEBUG_FS)
699 static int radeon_mm_dump_table(struct seq_file
*m
, void *data
)
701 struct drm_info_node
*node
= (struct drm_info_node
*)m
->private;
702 struct drm_mm
*mm
= (struct drm_mm
*)node
->info_ent
->data
;
703 struct drm_device
*dev
= node
->minor
->dev
;
704 struct radeon_device
*rdev
= dev
->dev_private
;
706 struct ttm_bo_global
*glob
= rdev
->mman
.bdev
.glob
;
708 spin_lock(&glob
->lru_lock
);
709 ret
= drm_mm_dump_table(m
, mm
);
710 spin_unlock(&glob
->lru_lock
);
715 static int radeon_ttm_debugfs_init(struct radeon_device
*rdev
)
717 #if defined(CONFIG_DEBUG_FS)
718 static struct drm_info_list radeon_mem_types_list
[RADEON_DEBUGFS_MEM_TYPES
];
719 static char radeon_mem_types_names
[RADEON_DEBUGFS_MEM_TYPES
][32];
722 for (i
= 0; i
< RADEON_DEBUGFS_MEM_TYPES
; i
++) {
724 sprintf(radeon_mem_types_names
[i
], "radeon_vram_mm");
726 sprintf(radeon_mem_types_names
[i
], "radeon_gtt_mm");
727 radeon_mem_types_list
[i
].name
= radeon_mem_types_names
[i
];
728 radeon_mem_types_list
[i
].show
= &radeon_mm_dump_table
;
729 radeon_mem_types_list
[i
].driver_features
= 0;
731 radeon_mem_types_list
[i
].data
= &rdev
->mman
.bdev
.man
[TTM_PL_VRAM
].manager
;
733 radeon_mem_types_list
[i
].data
= &rdev
->mman
.bdev
.man
[TTM_PL_TT
].manager
;
736 return radeon_debugfs_add_files(rdev
, radeon_mem_types_list
, RADEON_DEBUGFS_MEM_TYPES
);