2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
32 #include <drm/ttm/ttm_bo_api.h>
33 #include <drm/ttm/ttm_bo_driver.h>
34 #include <drm/ttm/ttm_placement.h>
35 #include <drm/ttm/ttm_module.h>
36 #include <drm/ttm/ttm_page_alloc.h>
38 #include <drm/radeon_drm.h>
39 #include <linux/seq_file.h>
40 #include <linux/slab.h>
41 #include <linux/swiotlb.h>
42 #include <linux/swap.h>
43 #include <linux/pagemap.h>
44 #include <linux/debugfs.h>
45 #include "radeon_reg.h"
48 #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
50 static int radeon_ttm_debugfs_init(struct radeon_device
*rdev
);
51 static void radeon_ttm_debugfs_fini(struct radeon_device
*rdev
);
53 static struct radeon_device
*radeon_get_rdev(struct ttm_bo_device
*bdev
)
55 struct radeon_mman
*mman
;
56 struct radeon_device
*rdev
;
58 mman
= container_of(bdev
, struct radeon_mman
, bdev
);
59 rdev
= container_of(mman
, struct radeon_device
, mman
);
67 static int radeon_ttm_mem_global_init(struct drm_global_reference
*ref
)
69 return ttm_mem_global_init(ref
->object
);
72 static void radeon_ttm_mem_global_release(struct drm_global_reference
*ref
)
74 ttm_mem_global_release(ref
->object
);
77 static int radeon_ttm_global_init(struct radeon_device
*rdev
)
79 struct drm_global_reference
*global_ref
;
82 rdev
->mman
.mem_global_referenced
= false;
83 global_ref
= &rdev
->mman
.mem_global_ref
;
84 global_ref
->global_type
= DRM_GLOBAL_TTM_MEM
;
85 global_ref
->size
= sizeof(struct ttm_mem_global
);
86 global_ref
->init
= &radeon_ttm_mem_global_init
;
87 global_ref
->release
= &radeon_ttm_mem_global_release
;
88 r
= drm_global_item_ref(global_ref
);
90 DRM_ERROR("Failed setting up TTM memory accounting "
95 rdev
->mman
.bo_global_ref
.mem_glob
=
96 rdev
->mman
.mem_global_ref
.object
;
97 global_ref
= &rdev
->mman
.bo_global_ref
.ref
;
98 global_ref
->global_type
= DRM_GLOBAL_TTM_BO
;
99 global_ref
->size
= sizeof(struct ttm_bo_global
);
100 global_ref
->init
= &ttm_bo_global_init
;
101 global_ref
->release
= &ttm_bo_global_release
;
102 r
= drm_global_item_ref(global_ref
);
104 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
105 drm_global_item_unref(&rdev
->mman
.mem_global_ref
);
109 rdev
->mman
.mem_global_referenced
= true;
113 static void radeon_ttm_global_fini(struct radeon_device
*rdev
)
115 if (rdev
->mman
.mem_global_referenced
) {
116 drm_global_item_unref(&rdev
->mman
.bo_global_ref
.ref
);
117 drm_global_item_unref(&rdev
->mman
.mem_global_ref
);
118 rdev
->mman
.mem_global_referenced
= false;
122 static int radeon_invalidate_caches(struct ttm_bo_device
*bdev
, uint32_t flags
)
127 static int radeon_init_mem_type(struct ttm_bo_device
*bdev
, uint32_t type
,
128 struct ttm_mem_type_manager
*man
)
130 struct radeon_device
*rdev
;
132 rdev
= radeon_get_rdev(bdev
);
137 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
;
138 man
->available_caching
= TTM_PL_MASK_CACHING
;
139 man
->default_caching
= TTM_PL_FLAG_CACHED
;
142 man
->func
= &ttm_bo_manager_func
;
143 man
->gpu_offset
= rdev
->mc
.gtt_start
;
144 man
->available_caching
= TTM_PL_MASK_CACHING
;
145 man
->default_caching
= TTM_PL_FLAG_CACHED
;
146 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
| TTM_MEMTYPE_FLAG_CMA
;
147 #if IS_ENABLED(CONFIG_AGP)
148 if (rdev
->flags
& RADEON_IS_AGP
) {
149 if (!rdev
->ddev
->agp
) {
150 DRM_ERROR("AGP is not enabled for memory type %u\n",
154 if (!rdev
->ddev
->agp
->cant_use_aperture
)
155 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
;
156 man
->available_caching
= TTM_PL_FLAG_UNCACHED
|
158 man
->default_caching
= TTM_PL_FLAG_WC
;
163 /* "On-card" video ram */
164 man
->func
= &ttm_bo_manager_func
;
165 man
->gpu_offset
= rdev
->mc
.vram_start
;
166 man
->flags
= TTM_MEMTYPE_FLAG_FIXED
|
167 TTM_MEMTYPE_FLAG_MAPPABLE
;
168 man
->available_caching
= TTM_PL_FLAG_UNCACHED
| TTM_PL_FLAG_WC
;
169 man
->default_caching
= TTM_PL_FLAG_WC
;
172 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type
);
178 static void radeon_evict_flags(struct ttm_buffer_object
*bo
,
179 struct ttm_placement
*placement
)
181 static const struct ttm_place placements
= {
184 .flags
= TTM_PL_MASK_CACHING
| TTM_PL_FLAG_SYSTEM
187 struct radeon_bo
*rbo
;
189 if (!radeon_ttm_bo_is_radeon_bo(bo
)) {
190 placement
->placement
= &placements
;
191 placement
->busy_placement
= &placements
;
192 placement
->num_placement
= 1;
193 placement
->num_busy_placement
= 1;
196 rbo
= container_of(bo
, struct radeon_bo
, tbo
);
197 switch (bo
->mem
.mem_type
) {
199 if (rbo
->rdev
->ring
[radeon_copy_ring_index(rbo
->rdev
)].ready
== false)
200 radeon_ttm_placement_from_domain(rbo
, RADEON_GEM_DOMAIN_CPU
);
201 else if (rbo
->rdev
->mc
.visible_vram_size
< rbo
->rdev
->mc
.real_vram_size
&&
202 bo
->mem
.start
< (rbo
->rdev
->mc
.visible_vram_size
>> PAGE_SHIFT
)) {
203 unsigned fpfn
= rbo
->rdev
->mc
.visible_vram_size
>> PAGE_SHIFT
;
206 /* Try evicting to the CPU inaccessible part of VRAM
207 * first, but only set GTT as busy placement, so this
208 * BO will be evicted to GTT rather than causing other
209 * BOs to be evicted from VRAM
211 radeon_ttm_placement_from_domain(rbo
, RADEON_GEM_DOMAIN_VRAM
|
212 RADEON_GEM_DOMAIN_GTT
);
213 rbo
->placement
.num_busy_placement
= 0;
214 for (i
= 0; i
< rbo
->placement
.num_placement
; i
++) {
215 if (rbo
->placements
[i
].flags
& TTM_PL_FLAG_VRAM
) {
216 if (rbo
->placements
[i
].fpfn
< fpfn
)
217 rbo
->placements
[i
].fpfn
= fpfn
;
219 rbo
->placement
.busy_placement
=
221 rbo
->placement
.num_busy_placement
= 1;
225 radeon_ttm_placement_from_domain(rbo
, RADEON_GEM_DOMAIN_GTT
);
229 radeon_ttm_placement_from_domain(rbo
, RADEON_GEM_DOMAIN_CPU
);
231 *placement
= rbo
->placement
;
234 static int radeon_verify_access(struct ttm_buffer_object
*bo
, struct file
*filp
)
236 struct radeon_bo
*rbo
= container_of(bo
, struct radeon_bo
, tbo
);
238 if (radeon_ttm_tt_has_userptr(bo
->ttm
))
240 return drm_vma_node_verify_access(&rbo
->gem_base
.vma_node
,
244 static void radeon_move_null(struct ttm_buffer_object
*bo
,
245 struct ttm_mem_reg
*new_mem
)
247 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
249 BUG_ON(old_mem
->mm_node
!= NULL
);
251 new_mem
->mm_node
= NULL
;
254 static int radeon_move_blit(struct ttm_buffer_object
*bo
,
255 bool evict
, bool no_wait_gpu
,
256 struct ttm_mem_reg
*new_mem
,
257 struct ttm_mem_reg
*old_mem
)
259 struct radeon_device
*rdev
;
260 uint64_t old_start
, new_start
;
261 struct radeon_fence
*fence
;
265 rdev
= radeon_get_rdev(bo
->bdev
);
266 ridx
= radeon_copy_ring_index(rdev
);
267 old_start
= (u64
)old_mem
->start
<< PAGE_SHIFT
;
268 new_start
= (u64
)new_mem
->start
<< PAGE_SHIFT
;
270 switch (old_mem
->mem_type
) {
272 old_start
+= rdev
->mc
.vram_start
;
275 old_start
+= rdev
->mc
.gtt_start
;
278 DRM_ERROR("Unknown placement %d\n", old_mem
->mem_type
);
281 switch (new_mem
->mem_type
) {
283 new_start
+= rdev
->mc
.vram_start
;
286 new_start
+= rdev
->mc
.gtt_start
;
289 DRM_ERROR("Unknown placement %d\n", old_mem
->mem_type
);
292 if (!rdev
->ring
[ridx
].ready
) {
293 DRM_ERROR("Trying to move memory with ring turned off.\n");
297 BUILD_BUG_ON((PAGE_SIZE
% RADEON_GPU_PAGE_SIZE
) != 0);
299 num_pages
= new_mem
->num_pages
* (PAGE_SIZE
/ RADEON_GPU_PAGE_SIZE
);
300 fence
= radeon_copy(rdev
, old_start
, new_start
, num_pages
, bo
->resv
);
302 return PTR_ERR(fence
);
304 r
= ttm_bo_move_accel_cleanup(bo
, &fence
->base
, evict
, new_mem
);
305 radeon_fence_unref(&fence
);
309 static int radeon_move_vram_ram(struct ttm_buffer_object
*bo
,
310 bool evict
, bool interruptible
,
312 struct ttm_mem_reg
*new_mem
)
314 struct ttm_operation_ctx ctx
= { interruptible
, no_wait_gpu
};
315 struct radeon_device
*rdev
;
316 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
317 struct ttm_mem_reg tmp_mem
;
318 struct ttm_place placements
;
319 struct ttm_placement placement
;
322 rdev
= radeon_get_rdev(bo
->bdev
);
324 tmp_mem
.mm_node
= NULL
;
325 placement
.num_placement
= 1;
326 placement
.placement
= &placements
;
327 placement
.num_busy_placement
= 1;
328 placement
.busy_placement
= &placements
;
331 placements
.flags
= TTM_PL_MASK_CACHING
| TTM_PL_FLAG_TT
;
332 r
= ttm_bo_mem_space(bo
, &placement
, &tmp_mem
, &ctx
);
337 r
= ttm_tt_set_placement_caching(bo
->ttm
, tmp_mem
.placement
);
342 r
= ttm_tt_bind(bo
->ttm
, &tmp_mem
, &ctx
);
346 r
= radeon_move_blit(bo
, true, no_wait_gpu
, &tmp_mem
, old_mem
);
350 r
= ttm_bo_move_ttm(bo
, &ctx
, new_mem
);
352 ttm_bo_mem_put(bo
, &tmp_mem
);
356 static int radeon_move_ram_vram(struct ttm_buffer_object
*bo
,
357 bool evict
, bool interruptible
,
359 struct ttm_mem_reg
*new_mem
)
361 struct ttm_operation_ctx ctx
= { interruptible
, no_wait_gpu
};
362 struct radeon_device
*rdev
;
363 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
364 struct ttm_mem_reg tmp_mem
;
365 struct ttm_placement placement
;
366 struct ttm_place placements
;
369 rdev
= radeon_get_rdev(bo
->bdev
);
371 tmp_mem
.mm_node
= NULL
;
372 placement
.num_placement
= 1;
373 placement
.placement
= &placements
;
374 placement
.num_busy_placement
= 1;
375 placement
.busy_placement
= &placements
;
378 placements
.flags
= TTM_PL_MASK_CACHING
| TTM_PL_FLAG_TT
;
379 r
= ttm_bo_mem_space(bo
, &placement
, &tmp_mem
, &ctx
);
383 r
= ttm_bo_move_ttm(bo
, &ctx
, &tmp_mem
);
387 r
= radeon_move_blit(bo
, true, no_wait_gpu
, new_mem
, old_mem
);
392 ttm_bo_mem_put(bo
, &tmp_mem
);
396 static int radeon_bo_move(struct ttm_buffer_object
*bo
, bool evict
,
397 struct ttm_operation_ctx
*ctx
,
398 struct ttm_mem_reg
*new_mem
)
400 struct radeon_device
*rdev
;
401 struct radeon_bo
*rbo
;
402 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
405 r
= ttm_bo_wait(bo
, ctx
->interruptible
, ctx
->no_wait_gpu
);
409 /* Can't move a pinned BO */
410 rbo
= container_of(bo
, struct radeon_bo
, tbo
);
411 if (WARN_ON_ONCE(rbo
->pin_count
> 0))
414 rdev
= radeon_get_rdev(bo
->bdev
);
415 if (old_mem
->mem_type
== TTM_PL_SYSTEM
&& bo
->ttm
== NULL
) {
416 radeon_move_null(bo
, new_mem
);
419 if ((old_mem
->mem_type
== TTM_PL_TT
&&
420 new_mem
->mem_type
== TTM_PL_SYSTEM
) ||
421 (old_mem
->mem_type
== TTM_PL_SYSTEM
&&
422 new_mem
->mem_type
== TTM_PL_TT
)) {
424 radeon_move_null(bo
, new_mem
);
427 if (!rdev
->ring
[radeon_copy_ring_index(rdev
)].ready
||
428 rdev
->asic
->copy
.copy
== NULL
) {
433 if (old_mem
->mem_type
== TTM_PL_VRAM
&&
434 new_mem
->mem_type
== TTM_PL_SYSTEM
) {
435 r
= radeon_move_vram_ram(bo
, evict
, ctx
->interruptible
,
436 ctx
->no_wait_gpu
, new_mem
);
437 } else if (old_mem
->mem_type
== TTM_PL_SYSTEM
&&
438 new_mem
->mem_type
== TTM_PL_VRAM
) {
439 r
= radeon_move_ram_vram(bo
, evict
, ctx
->interruptible
,
440 ctx
->no_wait_gpu
, new_mem
);
442 r
= radeon_move_blit(bo
, evict
, ctx
->no_wait_gpu
,
448 r
= ttm_bo_move_memcpy(bo
, ctx
, new_mem
);
454 /* update statistics */
455 atomic64_add((u64
)bo
->num_pages
<< PAGE_SHIFT
, &rdev
->num_bytes_moved
);
459 static int radeon_ttm_io_mem_reserve(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
)
461 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
462 struct radeon_device
*rdev
= radeon_get_rdev(bdev
);
464 mem
->bus
.addr
= NULL
;
466 mem
->bus
.size
= mem
->num_pages
<< PAGE_SHIFT
;
468 mem
->bus
.is_iomem
= false;
469 if (!(man
->flags
& TTM_MEMTYPE_FLAG_MAPPABLE
))
471 switch (mem
->mem_type
) {
476 #if IS_ENABLED(CONFIG_AGP)
477 if (rdev
->flags
& RADEON_IS_AGP
) {
478 /* RADEON_IS_AGP is set only if AGP is active */
479 mem
->bus
.offset
= mem
->start
<< PAGE_SHIFT
;
480 mem
->bus
.base
= rdev
->mc
.agp_base
;
481 mem
->bus
.is_iomem
= !rdev
->ddev
->agp
->cant_use_aperture
;
486 mem
->bus
.offset
= mem
->start
<< PAGE_SHIFT
;
487 /* check if it's visible */
488 if ((mem
->bus
.offset
+ mem
->bus
.size
) > rdev
->mc
.visible_vram_size
)
490 mem
->bus
.base
= rdev
->mc
.aper_base
;
491 mem
->bus
.is_iomem
= true;
494 * Alpha: use bus.addr to hold the ioremap() return,
495 * so we can modify bus.base below.
497 if (mem
->placement
& TTM_PL_FLAG_WC
)
499 ioremap_wc(mem
->bus
.base
+ mem
->bus
.offset
,
503 ioremap_nocache(mem
->bus
.base
+ mem
->bus
.offset
,
509 * Alpha: Use just the bus offset plus
510 * the hose/domain memory base for bus.base.
511 * It then can be used to build PTEs for VRAM
512 * access, as done in ttm_bo_vm_fault().
514 mem
->bus
.base
= (mem
->bus
.base
& 0x0ffffffffUL
) +
515 rdev
->ddev
->hose
->dense_mem_base
;
524 static void radeon_ttm_io_mem_free(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
)
529 * TTM backend functions.
531 struct radeon_ttm_tt
{
532 struct ttm_dma_tt ttm
;
533 struct radeon_device
*rdev
;
537 struct mm_struct
*usermm
;
541 /* prepare the sg table with the user pages */
542 static int radeon_ttm_tt_pin_userptr(struct ttm_tt
*ttm
)
544 struct radeon_device
*rdev
= radeon_get_rdev(ttm
->bdev
);
545 struct radeon_ttm_tt
*gtt
= (void *)ttm
;
546 unsigned pinned
= 0, nents
;
549 int write
= !(gtt
->userflags
& RADEON_GEM_USERPTR_READONLY
);
550 enum dma_data_direction direction
= write
?
551 DMA_BIDIRECTIONAL
: DMA_TO_DEVICE
;
553 if (current
->mm
!= gtt
->usermm
)
556 if (gtt
->userflags
& RADEON_GEM_USERPTR_ANONONLY
) {
557 /* check that we only pin down anonymous memory
558 to prevent problems with writeback */
559 unsigned long end
= gtt
->userptr
+ ttm
->num_pages
* PAGE_SIZE
;
560 struct vm_area_struct
*vma
;
561 vma
= find_vma(gtt
->usermm
, gtt
->userptr
);
562 if (!vma
|| vma
->vm_file
|| vma
->vm_end
< end
)
567 unsigned num_pages
= ttm
->num_pages
- pinned
;
568 uint64_t userptr
= gtt
->userptr
+ pinned
* PAGE_SIZE
;
569 struct page
**pages
= ttm
->pages
+ pinned
;
571 r
= get_user_pages(userptr
, num_pages
, write
? FOLL_WRITE
: 0,
578 } while (pinned
< ttm
->num_pages
);
580 r
= sg_alloc_table_from_pages(ttm
->sg
, ttm
->pages
, ttm
->num_pages
, 0,
581 ttm
->num_pages
<< PAGE_SHIFT
,
587 nents
= dma_map_sg(rdev
->dev
, ttm
->sg
->sgl
, ttm
->sg
->nents
, direction
);
588 if (nents
!= ttm
->sg
->nents
)
591 drm_prime_sg_to_page_addr_arrays(ttm
->sg
, ttm
->pages
,
592 gtt
->ttm
.dma_address
, ttm
->num_pages
);
600 release_pages(ttm
->pages
, pinned
);
604 static void radeon_ttm_tt_unpin_userptr(struct ttm_tt
*ttm
)
606 struct radeon_device
*rdev
= radeon_get_rdev(ttm
->bdev
);
607 struct radeon_ttm_tt
*gtt
= (void *)ttm
;
608 struct sg_page_iter sg_iter
;
610 int write
= !(gtt
->userflags
& RADEON_GEM_USERPTR_READONLY
);
611 enum dma_data_direction direction
= write
?
612 DMA_BIDIRECTIONAL
: DMA_TO_DEVICE
;
614 /* double check that we don't free the table twice */
618 /* free the sg table and pages again */
619 dma_unmap_sg(rdev
->dev
, ttm
->sg
->sgl
, ttm
->sg
->nents
, direction
);
621 for_each_sg_page(ttm
->sg
->sgl
, &sg_iter
, ttm
->sg
->nents
, 0) {
622 struct page
*page
= sg_page_iter_page(&sg_iter
);
623 if (!(gtt
->userflags
& RADEON_GEM_USERPTR_READONLY
))
624 set_page_dirty(page
);
626 mark_page_accessed(page
);
630 sg_free_table(ttm
->sg
);
633 static int radeon_ttm_backend_bind(struct ttm_tt
*ttm
,
634 struct ttm_mem_reg
*bo_mem
)
636 struct radeon_ttm_tt
*gtt
= (void*)ttm
;
637 uint32_t flags
= RADEON_GART_PAGE_VALID
| RADEON_GART_PAGE_READ
|
638 RADEON_GART_PAGE_WRITE
;
642 radeon_ttm_tt_pin_userptr(ttm
);
643 flags
&= ~RADEON_GART_PAGE_WRITE
;
646 gtt
->offset
= (unsigned long)(bo_mem
->start
<< PAGE_SHIFT
);
647 if (!ttm
->num_pages
) {
648 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
649 ttm
->num_pages
, bo_mem
, ttm
);
651 if (ttm
->caching_state
== tt_cached
)
652 flags
|= RADEON_GART_PAGE_SNOOP
;
653 r
= radeon_gart_bind(gtt
->rdev
, gtt
->offset
, ttm
->num_pages
,
654 ttm
->pages
, gtt
->ttm
.dma_address
, flags
);
656 DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
657 ttm
->num_pages
, (unsigned)gtt
->offset
);
663 static int radeon_ttm_backend_unbind(struct ttm_tt
*ttm
)
665 struct radeon_ttm_tt
*gtt
= (void *)ttm
;
667 radeon_gart_unbind(gtt
->rdev
, gtt
->offset
, ttm
->num_pages
);
670 radeon_ttm_tt_unpin_userptr(ttm
);
675 static void radeon_ttm_backend_destroy(struct ttm_tt
*ttm
)
677 struct radeon_ttm_tt
*gtt
= (void *)ttm
;
679 ttm_dma_tt_fini(>t
->ttm
);
683 static struct ttm_backend_func radeon_backend_func
= {
684 .bind
= &radeon_ttm_backend_bind
,
685 .unbind
= &radeon_ttm_backend_unbind
,
686 .destroy
= &radeon_ttm_backend_destroy
,
689 static struct ttm_tt
*radeon_ttm_tt_create(struct ttm_buffer_object
*bo
,
692 struct radeon_device
*rdev
;
693 struct radeon_ttm_tt
*gtt
;
695 rdev
= radeon_get_rdev(bo
->bdev
);
696 #if IS_ENABLED(CONFIG_AGP)
697 if (rdev
->flags
& RADEON_IS_AGP
) {
698 return ttm_agp_tt_create(bo
, rdev
->ddev
->agp
->bridge
,
703 gtt
= kzalloc(sizeof(struct radeon_ttm_tt
), GFP_KERNEL
);
707 gtt
->ttm
.ttm
.func
= &radeon_backend_func
;
709 if (ttm_dma_tt_init(>t
->ttm
, bo
, page_flags
)) {
713 return >t
->ttm
.ttm
;
716 static struct radeon_ttm_tt
*radeon_ttm_tt_to_gtt(struct ttm_tt
*ttm
)
718 if (!ttm
|| ttm
->func
!= &radeon_backend_func
)
720 return (struct radeon_ttm_tt
*)ttm
;
723 static int radeon_ttm_tt_populate(struct ttm_tt
*ttm
,
724 struct ttm_operation_ctx
*ctx
)
726 struct radeon_ttm_tt
*gtt
= radeon_ttm_tt_to_gtt(ttm
);
727 struct radeon_device
*rdev
;
728 bool slave
= !!(ttm
->page_flags
& TTM_PAGE_FLAG_SG
);
730 if (gtt
&& gtt
->userptr
) {
731 ttm
->sg
= kzalloc(sizeof(struct sg_table
), GFP_KERNEL
);
735 ttm
->page_flags
|= TTM_PAGE_FLAG_SG
;
736 ttm
->state
= tt_unbound
;
740 if (slave
&& ttm
->sg
) {
741 drm_prime_sg_to_page_addr_arrays(ttm
->sg
, ttm
->pages
,
742 gtt
->ttm
.dma_address
, ttm
->num_pages
);
743 ttm
->state
= tt_unbound
;
747 rdev
= radeon_get_rdev(ttm
->bdev
);
748 #if IS_ENABLED(CONFIG_AGP)
749 if (rdev
->flags
& RADEON_IS_AGP
) {
750 return ttm_agp_tt_populate(ttm
, ctx
);
754 #ifdef CONFIG_SWIOTLB
755 if (rdev
->need_swiotlb
&& swiotlb_nr_tbl()) {
756 return ttm_dma_populate(>t
->ttm
, rdev
->dev
, ctx
);
760 return ttm_populate_and_map_pages(rdev
->dev
, >t
->ttm
, ctx
);
763 static void radeon_ttm_tt_unpopulate(struct ttm_tt
*ttm
)
765 struct radeon_device
*rdev
;
766 struct radeon_ttm_tt
*gtt
= radeon_ttm_tt_to_gtt(ttm
);
767 bool slave
= !!(ttm
->page_flags
& TTM_PAGE_FLAG_SG
);
769 if (gtt
&& gtt
->userptr
) {
771 ttm
->page_flags
&= ~TTM_PAGE_FLAG_SG
;
778 rdev
= radeon_get_rdev(ttm
->bdev
);
779 #if IS_ENABLED(CONFIG_AGP)
780 if (rdev
->flags
& RADEON_IS_AGP
) {
781 ttm_agp_tt_unpopulate(ttm
);
786 #ifdef CONFIG_SWIOTLB
787 if (rdev
->need_swiotlb
&& swiotlb_nr_tbl()) {
788 ttm_dma_unpopulate(>t
->ttm
, rdev
->dev
);
793 ttm_unmap_and_unpopulate_pages(rdev
->dev
, >t
->ttm
);
796 int radeon_ttm_tt_set_userptr(struct ttm_tt
*ttm
, uint64_t addr
,
799 struct radeon_ttm_tt
*gtt
= radeon_ttm_tt_to_gtt(ttm
);
805 gtt
->usermm
= current
->mm
;
806 gtt
->userflags
= flags
;
810 bool radeon_ttm_tt_has_userptr(struct ttm_tt
*ttm
)
812 struct radeon_ttm_tt
*gtt
= radeon_ttm_tt_to_gtt(ttm
);
817 return !!gtt
->userptr
;
820 bool radeon_ttm_tt_is_readonly(struct ttm_tt
*ttm
)
822 struct radeon_ttm_tt
*gtt
= radeon_ttm_tt_to_gtt(ttm
);
827 return !!(gtt
->userflags
& RADEON_GEM_USERPTR_READONLY
);
830 static struct ttm_bo_driver radeon_bo_driver
= {
831 .ttm_tt_create
= &radeon_ttm_tt_create
,
832 .ttm_tt_populate
= &radeon_ttm_tt_populate
,
833 .ttm_tt_unpopulate
= &radeon_ttm_tt_unpopulate
,
834 .invalidate_caches
= &radeon_invalidate_caches
,
835 .init_mem_type
= &radeon_init_mem_type
,
836 .eviction_valuable
= ttm_bo_eviction_valuable
,
837 .evict_flags
= &radeon_evict_flags
,
838 .move
= &radeon_bo_move
,
839 .verify_access
= &radeon_verify_access
,
840 .move_notify
= &radeon_bo_move_notify
,
841 .fault_reserve_notify
= &radeon_bo_fault_reserve_notify
,
842 .io_mem_reserve
= &radeon_ttm_io_mem_reserve
,
843 .io_mem_free
= &radeon_ttm_io_mem_free
,
846 int radeon_ttm_init(struct radeon_device
*rdev
)
850 r
= radeon_ttm_global_init(rdev
);
854 /* No others user of address space so set it to 0 */
855 r
= ttm_bo_device_init(&rdev
->mman
.bdev
,
856 rdev
->mman
.bo_global_ref
.ref
.object
,
858 rdev
->ddev
->anon_inode
->i_mapping
,
859 DRM_FILE_PAGE_OFFSET
,
862 DRM_ERROR("failed initializing buffer object driver(%d).\n", r
);
865 rdev
->mman
.initialized
= true;
866 r
= ttm_bo_init_mm(&rdev
->mman
.bdev
, TTM_PL_VRAM
,
867 rdev
->mc
.real_vram_size
>> PAGE_SHIFT
);
869 DRM_ERROR("Failed initializing VRAM heap.\n");
872 /* Change the size here instead of the init above so only lpfn is affected */
873 radeon_ttm_set_active_vram_size(rdev
, rdev
->mc
.visible_vram_size
);
875 r
= radeon_bo_create(rdev
, 256 * 1024, PAGE_SIZE
, true,
876 RADEON_GEM_DOMAIN_VRAM
, 0, NULL
,
877 NULL
, &rdev
->stolen_vga_memory
);
881 r
= radeon_bo_reserve(rdev
->stolen_vga_memory
, false);
884 r
= radeon_bo_pin(rdev
->stolen_vga_memory
, RADEON_GEM_DOMAIN_VRAM
, NULL
);
885 radeon_bo_unreserve(rdev
->stolen_vga_memory
);
887 radeon_bo_unref(&rdev
->stolen_vga_memory
);
890 DRM_INFO("radeon: %uM of VRAM memory ready\n",
891 (unsigned) (rdev
->mc
.real_vram_size
/ (1024 * 1024)));
892 r
= ttm_bo_init_mm(&rdev
->mman
.bdev
, TTM_PL_TT
,
893 rdev
->mc
.gtt_size
>> PAGE_SHIFT
);
895 DRM_ERROR("Failed initializing GTT heap.\n");
898 DRM_INFO("radeon: %uM of GTT memory ready.\n",
899 (unsigned)(rdev
->mc
.gtt_size
/ (1024 * 1024)));
901 r
= radeon_ttm_debugfs_init(rdev
);
903 DRM_ERROR("Failed to init debugfs\n");
909 void radeon_ttm_fini(struct radeon_device
*rdev
)
913 if (!rdev
->mman
.initialized
)
915 radeon_ttm_debugfs_fini(rdev
);
916 if (rdev
->stolen_vga_memory
) {
917 r
= radeon_bo_reserve(rdev
->stolen_vga_memory
, false);
919 radeon_bo_unpin(rdev
->stolen_vga_memory
);
920 radeon_bo_unreserve(rdev
->stolen_vga_memory
);
922 radeon_bo_unref(&rdev
->stolen_vga_memory
);
924 ttm_bo_clean_mm(&rdev
->mman
.bdev
, TTM_PL_VRAM
);
925 ttm_bo_clean_mm(&rdev
->mman
.bdev
, TTM_PL_TT
);
926 ttm_bo_device_release(&rdev
->mman
.bdev
);
927 radeon_gart_fini(rdev
);
928 radeon_ttm_global_fini(rdev
);
929 rdev
->mman
.initialized
= false;
930 DRM_INFO("radeon: ttm finalized\n");
933 /* this should only be called at bootup or when userspace
935 void radeon_ttm_set_active_vram_size(struct radeon_device
*rdev
, u64 size
)
937 struct ttm_mem_type_manager
*man
;
939 if (!rdev
->mman
.initialized
)
942 man
= &rdev
->mman
.bdev
.man
[TTM_PL_VRAM
];
943 /* this just adjusts TTM size idea, which sets lpfn to the correct value */
944 man
->size
= size
>> PAGE_SHIFT
;
947 static struct vm_operations_struct radeon_ttm_vm_ops
;
948 static const struct vm_operations_struct
*ttm_vm_ops
= NULL
;
950 static vm_fault_t
radeon_ttm_fault(struct vm_fault
*vmf
)
952 struct ttm_buffer_object
*bo
;
953 struct radeon_device
*rdev
;
956 bo
= (struct ttm_buffer_object
*)vmf
->vma
->vm_private_data
;
958 return VM_FAULT_NOPAGE
;
960 rdev
= radeon_get_rdev(bo
->bdev
);
961 down_read(&rdev
->pm
.mclk_lock
);
962 ret
= ttm_vm_ops
->fault(vmf
);
963 up_read(&rdev
->pm
.mclk_lock
);
967 int radeon_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
969 struct drm_file
*file_priv
;
970 struct radeon_device
*rdev
;
973 if (unlikely(vma
->vm_pgoff
< DRM_FILE_PAGE_OFFSET
)) {
977 file_priv
= filp
->private_data
;
978 rdev
= file_priv
->minor
->dev
->dev_private
;
982 r
= ttm_bo_mmap(filp
, vma
, &rdev
->mman
.bdev
);
983 if (unlikely(r
!= 0)) {
986 if (unlikely(ttm_vm_ops
== NULL
)) {
987 ttm_vm_ops
= vma
->vm_ops
;
988 radeon_ttm_vm_ops
= *ttm_vm_ops
;
989 radeon_ttm_vm_ops
.fault
= &radeon_ttm_fault
;
991 vma
->vm_ops
= &radeon_ttm_vm_ops
;
995 #if defined(CONFIG_DEBUG_FS)
997 static int radeon_mm_dump_table(struct seq_file
*m
, void *data
)
999 struct drm_info_node
*node
= (struct drm_info_node
*)m
->private;
1000 unsigned ttm_pl
= *(int*)node
->info_ent
->data
;
1001 struct drm_device
*dev
= node
->minor
->dev
;
1002 struct radeon_device
*rdev
= dev
->dev_private
;
1003 struct ttm_mem_type_manager
*man
= &rdev
->mman
.bdev
.man
[ttm_pl
];
1004 struct drm_printer p
= drm_seq_file_printer(m
);
1006 man
->func
->debug(man
, &p
);
1011 static int ttm_pl_vram
= TTM_PL_VRAM
;
1012 static int ttm_pl_tt
= TTM_PL_TT
;
1014 static struct drm_info_list radeon_ttm_debugfs_list
[] = {
1015 {"radeon_vram_mm", radeon_mm_dump_table
, 0, &ttm_pl_vram
},
1016 {"radeon_gtt_mm", radeon_mm_dump_table
, 0, &ttm_pl_tt
},
1017 {"ttm_page_pool", ttm_page_alloc_debugfs
, 0, NULL
},
1018 #ifdef CONFIG_SWIOTLB
1019 {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs
, 0, NULL
}
1023 static int radeon_ttm_vram_open(struct inode
*inode
, struct file
*filep
)
1025 struct radeon_device
*rdev
= inode
->i_private
;
1026 i_size_write(inode
, rdev
->mc
.mc_vram_size
);
1027 filep
->private_data
= inode
->i_private
;
1031 static ssize_t
radeon_ttm_vram_read(struct file
*f
, char __user
*buf
,
1032 size_t size
, loff_t
*pos
)
1034 struct radeon_device
*rdev
= f
->private_data
;
1038 if (size
& 0x3 || *pos
& 0x3)
1042 unsigned long flags
;
1045 if (*pos
>= rdev
->mc
.mc_vram_size
)
1048 spin_lock_irqsave(&rdev
->mmio_idx_lock
, flags
);
1049 WREG32(RADEON_MM_INDEX
, ((uint32_t)*pos
) | 0x80000000);
1050 if (rdev
->family
>= CHIP_CEDAR
)
1051 WREG32(EVERGREEN_MM_INDEX_HI
, *pos
>> 31);
1052 value
= RREG32(RADEON_MM_DATA
);
1053 spin_unlock_irqrestore(&rdev
->mmio_idx_lock
, flags
);
1055 r
= put_user(value
, (uint32_t *)buf
);
1068 static const struct file_operations radeon_ttm_vram_fops
= {
1069 .owner
= THIS_MODULE
,
1070 .open
= radeon_ttm_vram_open
,
1071 .read
= radeon_ttm_vram_read
,
1072 .llseek
= default_llseek
1075 static int radeon_ttm_gtt_open(struct inode
*inode
, struct file
*filep
)
1077 struct radeon_device
*rdev
= inode
->i_private
;
1078 i_size_write(inode
, rdev
->mc
.gtt_size
);
1079 filep
->private_data
= inode
->i_private
;
1083 static ssize_t
radeon_ttm_gtt_read(struct file
*f
, char __user
*buf
,
1084 size_t size
, loff_t
*pos
)
1086 struct radeon_device
*rdev
= f
->private_data
;
1091 loff_t p
= *pos
/ PAGE_SIZE
;
1092 unsigned off
= *pos
& ~PAGE_MASK
;
1093 size_t cur_size
= min_t(size_t, size
, PAGE_SIZE
- off
);
1097 if (p
>= rdev
->gart
.num_cpu_pages
)
1100 page
= rdev
->gart
.pages
[p
];
1105 r
= copy_to_user(buf
, ptr
, cur_size
);
1106 kunmap(rdev
->gart
.pages
[p
]);
1108 r
= clear_user(buf
, cur_size
);
1122 static const struct file_operations radeon_ttm_gtt_fops
= {
1123 .owner
= THIS_MODULE
,
1124 .open
= radeon_ttm_gtt_open
,
1125 .read
= radeon_ttm_gtt_read
,
1126 .llseek
= default_llseek
1131 static int radeon_ttm_debugfs_init(struct radeon_device
*rdev
)
1133 #if defined(CONFIG_DEBUG_FS)
1136 struct drm_minor
*minor
= rdev
->ddev
->primary
;
1137 struct dentry
*ent
, *root
= minor
->debugfs_root
;
1139 ent
= debugfs_create_file("radeon_vram", S_IFREG
| S_IRUGO
, root
,
1140 rdev
, &radeon_ttm_vram_fops
);
1142 return PTR_ERR(ent
);
1143 rdev
->mman
.vram
= ent
;
1145 ent
= debugfs_create_file("radeon_gtt", S_IFREG
| S_IRUGO
, root
,
1146 rdev
, &radeon_ttm_gtt_fops
);
1148 return PTR_ERR(ent
);
1149 rdev
->mman
.gtt
= ent
;
1151 count
= ARRAY_SIZE(radeon_ttm_debugfs_list
);
1153 #ifdef CONFIG_SWIOTLB
1154 if (!(rdev
->need_swiotlb
&& swiotlb_nr_tbl()))
1158 return radeon_debugfs_add_files(rdev
, radeon_ttm_debugfs_list
, count
);
1165 static void radeon_ttm_debugfs_fini(struct radeon_device
*rdev
)
1167 #if defined(CONFIG_DEBUG_FS)
1169 debugfs_remove(rdev
->mman
.vram
);
1170 rdev
->mman
.vram
= NULL
;
1172 debugfs_remove(rdev
->mman
.gtt
);
1173 rdev
->mman
.gtt
= NULL
;