2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
33 #include <linux/dma-mapping.h>
34 #include <linux/pagemap.h>
35 #include <linux/seq_file.h>
36 #include <linux/slab.h>
37 #include <linux/swap.h>
38 #include <linux/swiotlb.h>
40 #include <drm/drm_agpsupport.h>
41 #include <drm/drm_debugfs.h>
42 #include <drm/drm_device.h>
43 #include <drm/drm_file.h>
44 #include <drm/drm_pci.h>
45 #include <drm/drm_prime.h>
46 #include <drm/radeon_drm.h>
47 #include <drm/ttm/ttm_bo_api.h>
48 #include <drm/ttm/ttm_bo_driver.h>
49 #include <drm/ttm/ttm_module.h>
50 #include <drm/ttm/ttm_page_alloc.h>
51 #include <drm/ttm/ttm_placement.h>
53 #include "radeon_reg.h"
56 static int radeon_ttm_debugfs_init(struct radeon_device
*rdev
);
57 static void radeon_ttm_debugfs_fini(struct radeon_device
*rdev
);
59 static struct radeon_device
*radeon_get_rdev(struct ttm_bo_device
*bdev
)
61 struct radeon_mman
*mman
;
62 struct radeon_device
*rdev
;
64 mman
= container_of(bdev
, struct radeon_mman
, bdev
);
65 rdev
= container_of(mman
, struct radeon_device
, mman
);
69 static int radeon_invalidate_caches(struct ttm_bo_device
*bdev
, uint32_t flags
)
74 static int radeon_init_mem_type(struct ttm_bo_device
*bdev
, uint32_t type
,
75 struct ttm_mem_type_manager
*man
)
77 struct radeon_device
*rdev
;
79 rdev
= radeon_get_rdev(bdev
);
84 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
;
85 man
->available_caching
= TTM_PL_MASK_CACHING
;
86 man
->default_caching
= TTM_PL_FLAG_CACHED
;
89 man
->func
= &ttm_bo_manager_func
;
90 man
->gpu_offset
= rdev
->mc
.gtt_start
;
91 man
->available_caching
= TTM_PL_MASK_CACHING
;
92 man
->default_caching
= TTM_PL_FLAG_CACHED
;
93 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
| TTM_MEMTYPE_FLAG_CMA
;
94 #if IS_ENABLED(CONFIG_AGP)
95 if (rdev
->flags
& RADEON_IS_AGP
) {
96 if (!rdev
->ddev
->agp
) {
97 DRM_ERROR("AGP is not enabled for memory type %u\n",
101 if (!rdev
->ddev
->agp
->cant_use_aperture
)
102 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
;
103 man
->available_caching
= TTM_PL_FLAG_UNCACHED
|
105 man
->default_caching
= TTM_PL_FLAG_WC
;
110 /* "On-card" video ram */
111 man
->func
= &ttm_bo_manager_func
;
112 man
->gpu_offset
= rdev
->mc
.vram_start
;
113 man
->flags
= TTM_MEMTYPE_FLAG_FIXED
|
114 TTM_MEMTYPE_FLAG_MAPPABLE
;
115 man
->available_caching
= TTM_PL_FLAG_UNCACHED
| TTM_PL_FLAG_WC
;
116 man
->default_caching
= TTM_PL_FLAG_WC
;
119 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type
);
125 static void radeon_evict_flags(struct ttm_buffer_object
*bo
,
126 struct ttm_placement
*placement
)
128 static const struct ttm_place placements
= {
131 .flags
= TTM_PL_MASK_CACHING
| TTM_PL_FLAG_SYSTEM
134 struct radeon_bo
*rbo
;
136 if (!radeon_ttm_bo_is_radeon_bo(bo
)) {
137 placement
->placement
= &placements
;
138 placement
->busy_placement
= &placements
;
139 placement
->num_placement
= 1;
140 placement
->num_busy_placement
= 1;
143 rbo
= container_of(bo
, struct radeon_bo
, tbo
);
144 switch (bo
->mem
.mem_type
) {
146 if (rbo
->rdev
->ring
[radeon_copy_ring_index(rbo
->rdev
)].ready
== false)
147 radeon_ttm_placement_from_domain(rbo
, RADEON_GEM_DOMAIN_CPU
);
148 else if (rbo
->rdev
->mc
.visible_vram_size
< rbo
->rdev
->mc
.real_vram_size
&&
149 bo
->mem
.start
< (rbo
->rdev
->mc
.visible_vram_size
>> PAGE_SHIFT
)) {
150 unsigned fpfn
= rbo
->rdev
->mc
.visible_vram_size
>> PAGE_SHIFT
;
153 /* Try evicting to the CPU inaccessible part of VRAM
154 * first, but only set GTT as busy placement, so this
155 * BO will be evicted to GTT rather than causing other
156 * BOs to be evicted from VRAM
158 radeon_ttm_placement_from_domain(rbo
, RADEON_GEM_DOMAIN_VRAM
|
159 RADEON_GEM_DOMAIN_GTT
);
160 rbo
->placement
.num_busy_placement
= 0;
161 for (i
= 0; i
< rbo
->placement
.num_placement
; i
++) {
162 if (rbo
->placements
[i
].flags
& TTM_PL_FLAG_VRAM
) {
163 if (rbo
->placements
[i
].fpfn
< fpfn
)
164 rbo
->placements
[i
].fpfn
= fpfn
;
166 rbo
->placement
.busy_placement
=
168 rbo
->placement
.num_busy_placement
= 1;
172 radeon_ttm_placement_from_domain(rbo
, RADEON_GEM_DOMAIN_GTT
);
176 radeon_ttm_placement_from_domain(rbo
, RADEON_GEM_DOMAIN_CPU
);
178 *placement
= rbo
->placement
;
181 static int radeon_verify_access(struct ttm_buffer_object
*bo
, struct file
*filp
)
183 struct radeon_bo
*rbo
= container_of(bo
, struct radeon_bo
, tbo
);
185 if (radeon_ttm_tt_has_userptr(bo
->ttm
))
187 return drm_vma_node_verify_access(&rbo
->tbo
.base
.vma_node
,
191 static void radeon_move_null(struct ttm_buffer_object
*bo
,
192 struct ttm_mem_reg
*new_mem
)
194 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
196 BUG_ON(old_mem
->mm_node
!= NULL
);
198 new_mem
->mm_node
= NULL
;
201 static int radeon_move_blit(struct ttm_buffer_object
*bo
,
202 bool evict
, bool no_wait_gpu
,
203 struct ttm_mem_reg
*new_mem
,
204 struct ttm_mem_reg
*old_mem
)
206 struct radeon_device
*rdev
;
207 uint64_t old_start
, new_start
;
208 struct radeon_fence
*fence
;
212 rdev
= radeon_get_rdev(bo
->bdev
);
213 ridx
= radeon_copy_ring_index(rdev
);
214 old_start
= (u64
)old_mem
->start
<< PAGE_SHIFT
;
215 new_start
= (u64
)new_mem
->start
<< PAGE_SHIFT
;
217 switch (old_mem
->mem_type
) {
219 old_start
+= rdev
->mc
.vram_start
;
222 old_start
+= rdev
->mc
.gtt_start
;
225 DRM_ERROR("Unknown placement %d\n", old_mem
->mem_type
);
228 switch (new_mem
->mem_type
) {
230 new_start
+= rdev
->mc
.vram_start
;
233 new_start
+= rdev
->mc
.gtt_start
;
236 DRM_ERROR("Unknown placement %d\n", old_mem
->mem_type
);
239 if (!rdev
->ring
[ridx
].ready
) {
240 DRM_ERROR("Trying to move memory with ring turned off.\n");
244 BUILD_BUG_ON((PAGE_SIZE
% RADEON_GPU_PAGE_SIZE
) != 0);
246 num_pages
= new_mem
->num_pages
* (PAGE_SIZE
/ RADEON_GPU_PAGE_SIZE
);
247 fence
= radeon_copy(rdev
, old_start
, new_start
, num_pages
, bo
->base
.resv
);
249 return PTR_ERR(fence
);
251 r
= ttm_bo_move_accel_cleanup(bo
, &fence
->base
, evict
, new_mem
);
252 radeon_fence_unref(&fence
);
256 static int radeon_move_vram_ram(struct ttm_buffer_object
*bo
,
257 bool evict
, bool interruptible
,
259 struct ttm_mem_reg
*new_mem
)
261 struct ttm_operation_ctx ctx
= { interruptible
, no_wait_gpu
};
262 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
263 struct ttm_mem_reg tmp_mem
;
264 struct ttm_place placements
;
265 struct ttm_placement placement
;
269 tmp_mem
.mm_node
= NULL
;
270 placement
.num_placement
= 1;
271 placement
.placement
= &placements
;
272 placement
.num_busy_placement
= 1;
273 placement
.busy_placement
= &placements
;
276 placements
.flags
= TTM_PL_MASK_CACHING
| TTM_PL_FLAG_TT
;
277 r
= ttm_bo_mem_space(bo
, &placement
, &tmp_mem
, &ctx
);
282 r
= ttm_tt_set_placement_caching(bo
->ttm
, tmp_mem
.placement
);
287 r
= ttm_tt_bind(bo
->ttm
, &tmp_mem
, &ctx
);
291 r
= radeon_move_blit(bo
, true, no_wait_gpu
, &tmp_mem
, old_mem
);
295 r
= ttm_bo_move_ttm(bo
, &ctx
, new_mem
);
297 ttm_bo_mem_put(bo
, &tmp_mem
);
301 static int radeon_move_ram_vram(struct ttm_buffer_object
*bo
,
302 bool evict
, bool interruptible
,
304 struct ttm_mem_reg
*new_mem
)
306 struct ttm_operation_ctx ctx
= { interruptible
, no_wait_gpu
};
307 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
308 struct ttm_mem_reg tmp_mem
;
309 struct ttm_placement placement
;
310 struct ttm_place placements
;
314 tmp_mem
.mm_node
= NULL
;
315 placement
.num_placement
= 1;
316 placement
.placement
= &placements
;
317 placement
.num_busy_placement
= 1;
318 placement
.busy_placement
= &placements
;
321 placements
.flags
= TTM_PL_MASK_CACHING
| TTM_PL_FLAG_TT
;
322 r
= ttm_bo_mem_space(bo
, &placement
, &tmp_mem
, &ctx
);
326 r
= ttm_bo_move_ttm(bo
, &ctx
, &tmp_mem
);
330 r
= radeon_move_blit(bo
, true, no_wait_gpu
, new_mem
, old_mem
);
335 ttm_bo_mem_put(bo
, &tmp_mem
);
339 static int radeon_bo_move(struct ttm_buffer_object
*bo
, bool evict
,
340 struct ttm_operation_ctx
*ctx
,
341 struct ttm_mem_reg
*new_mem
)
343 struct radeon_device
*rdev
;
344 struct radeon_bo
*rbo
;
345 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
348 r
= ttm_bo_wait(bo
, ctx
->interruptible
, ctx
->no_wait_gpu
);
352 /* Can't move a pinned BO */
353 rbo
= container_of(bo
, struct radeon_bo
, tbo
);
354 if (WARN_ON_ONCE(rbo
->pin_count
> 0))
357 rdev
= radeon_get_rdev(bo
->bdev
);
358 if (old_mem
->mem_type
== TTM_PL_SYSTEM
&& bo
->ttm
== NULL
) {
359 radeon_move_null(bo
, new_mem
);
362 if ((old_mem
->mem_type
== TTM_PL_TT
&&
363 new_mem
->mem_type
== TTM_PL_SYSTEM
) ||
364 (old_mem
->mem_type
== TTM_PL_SYSTEM
&&
365 new_mem
->mem_type
== TTM_PL_TT
)) {
367 radeon_move_null(bo
, new_mem
);
370 if (!rdev
->ring
[radeon_copy_ring_index(rdev
)].ready
||
371 rdev
->asic
->copy
.copy
== NULL
) {
376 if (old_mem
->mem_type
== TTM_PL_VRAM
&&
377 new_mem
->mem_type
== TTM_PL_SYSTEM
) {
378 r
= radeon_move_vram_ram(bo
, evict
, ctx
->interruptible
,
379 ctx
->no_wait_gpu
, new_mem
);
380 } else if (old_mem
->mem_type
== TTM_PL_SYSTEM
&&
381 new_mem
->mem_type
== TTM_PL_VRAM
) {
382 r
= radeon_move_ram_vram(bo
, evict
, ctx
->interruptible
,
383 ctx
->no_wait_gpu
, new_mem
);
385 r
= radeon_move_blit(bo
, evict
, ctx
->no_wait_gpu
,
391 r
= ttm_bo_move_memcpy(bo
, ctx
, new_mem
);
397 /* update statistics */
398 atomic64_add((u64
)bo
->num_pages
<< PAGE_SHIFT
, &rdev
->num_bytes_moved
);
402 static int radeon_ttm_io_mem_reserve(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
)
404 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
405 struct radeon_device
*rdev
= radeon_get_rdev(bdev
);
407 mem
->bus
.addr
= NULL
;
409 mem
->bus
.size
= mem
->num_pages
<< PAGE_SHIFT
;
411 mem
->bus
.is_iomem
= false;
412 if (!(man
->flags
& TTM_MEMTYPE_FLAG_MAPPABLE
))
414 switch (mem
->mem_type
) {
419 #if IS_ENABLED(CONFIG_AGP)
420 if (rdev
->flags
& RADEON_IS_AGP
) {
421 /* RADEON_IS_AGP is set only if AGP is active */
422 mem
->bus
.offset
= mem
->start
<< PAGE_SHIFT
;
423 mem
->bus
.base
= rdev
->mc
.agp_base
;
424 mem
->bus
.is_iomem
= !rdev
->ddev
->agp
->cant_use_aperture
;
429 mem
->bus
.offset
= mem
->start
<< PAGE_SHIFT
;
430 /* check if it's visible */
431 if ((mem
->bus
.offset
+ mem
->bus
.size
) > rdev
->mc
.visible_vram_size
)
433 mem
->bus
.base
= rdev
->mc
.aper_base
;
434 mem
->bus
.is_iomem
= true;
437 * Alpha: use bus.addr to hold the ioremap() return,
438 * so we can modify bus.base below.
440 if (mem
->placement
& TTM_PL_FLAG_WC
)
442 ioremap_wc(mem
->bus
.base
+ mem
->bus
.offset
,
446 ioremap_nocache(mem
->bus
.base
+ mem
->bus
.offset
,
452 * Alpha: Use just the bus offset plus
453 * the hose/domain memory base for bus.base.
454 * It then can be used to build PTEs for VRAM
455 * access, as done in ttm_bo_vm_fault().
457 mem
->bus
.base
= (mem
->bus
.base
& 0x0ffffffffUL
) +
458 rdev
->ddev
->hose
->dense_mem_base
;
467 static void radeon_ttm_io_mem_free(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
)
472 * TTM backend functions.
474 struct radeon_ttm_tt
{
475 struct ttm_dma_tt ttm
;
476 struct radeon_device
*rdev
;
480 struct mm_struct
*usermm
;
484 /* prepare the sg table with the user pages */
485 static int radeon_ttm_tt_pin_userptr(struct ttm_tt
*ttm
)
487 struct radeon_device
*rdev
= radeon_get_rdev(ttm
->bdev
);
488 struct radeon_ttm_tt
*gtt
= (void *)ttm
;
489 unsigned pinned
= 0, nents
;
492 int write
= !(gtt
->userflags
& RADEON_GEM_USERPTR_READONLY
);
493 enum dma_data_direction direction
= write
?
494 DMA_BIDIRECTIONAL
: DMA_TO_DEVICE
;
496 if (current
->mm
!= gtt
->usermm
)
499 if (gtt
->userflags
& RADEON_GEM_USERPTR_ANONONLY
) {
500 /* check that we only pin down anonymous memory
501 to prevent problems with writeback */
502 unsigned long end
= gtt
->userptr
+ ttm
->num_pages
* PAGE_SIZE
;
503 struct vm_area_struct
*vma
;
504 vma
= find_vma(gtt
->usermm
, gtt
->userptr
);
505 if (!vma
|| vma
->vm_file
|| vma
->vm_end
< end
)
510 unsigned num_pages
= ttm
->num_pages
- pinned
;
511 uint64_t userptr
= gtt
->userptr
+ pinned
* PAGE_SIZE
;
512 struct page
**pages
= ttm
->pages
+ pinned
;
514 r
= get_user_pages(userptr
, num_pages
, write
? FOLL_WRITE
: 0,
521 } while (pinned
< ttm
->num_pages
);
523 r
= sg_alloc_table_from_pages(ttm
->sg
, ttm
->pages
, ttm
->num_pages
, 0,
524 ttm
->num_pages
<< PAGE_SHIFT
,
530 nents
= dma_map_sg(rdev
->dev
, ttm
->sg
->sgl
, ttm
->sg
->nents
, direction
);
534 drm_prime_sg_to_page_addr_arrays(ttm
->sg
, ttm
->pages
,
535 gtt
->ttm
.dma_address
, ttm
->num_pages
);
543 release_pages(ttm
->pages
, pinned
);
547 static void radeon_ttm_tt_unpin_userptr(struct ttm_tt
*ttm
)
549 struct radeon_device
*rdev
= radeon_get_rdev(ttm
->bdev
);
550 struct radeon_ttm_tt
*gtt
= (void *)ttm
;
551 struct sg_page_iter sg_iter
;
553 int write
= !(gtt
->userflags
& RADEON_GEM_USERPTR_READONLY
);
554 enum dma_data_direction direction
= write
?
555 DMA_BIDIRECTIONAL
: DMA_TO_DEVICE
;
557 /* double check that we don't free the table twice */
561 /* free the sg table and pages again */
562 dma_unmap_sg(rdev
->dev
, ttm
->sg
->sgl
, ttm
->sg
->nents
, direction
);
564 for_each_sg_page(ttm
->sg
->sgl
, &sg_iter
, ttm
->sg
->nents
, 0) {
565 struct page
*page
= sg_page_iter_page(&sg_iter
);
566 if (!(gtt
->userflags
& RADEON_GEM_USERPTR_READONLY
))
567 set_page_dirty(page
);
569 mark_page_accessed(page
);
573 sg_free_table(ttm
->sg
);
576 static int radeon_ttm_backend_bind(struct ttm_tt
*ttm
,
577 struct ttm_mem_reg
*bo_mem
)
579 struct radeon_ttm_tt
*gtt
= (void*)ttm
;
580 uint32_t flags
= RADEON_GART_PAGE_VALID
| RADEON_GART_PAGE_READ
|
581 RADEON_GART_PAGE_WRITE
;
585 radeon_ttm_tt_pin_userptr(ttm
);
586 flags
&= ~RADEON_GART_PAGE_WRITE
;
589 gtt
->offset
= (unsigned long)(bo_mem
->start
<< PAGE_SHIFT
);
590 if (!ttm
->num_pages
) {
591 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
592 ttm
->num_pages
, bo_mem
, ttm
);
594 if (ttm
->caching_state
== tt_cached
)
595 flags
|= RADEON_GART_PAGE_SNOOP
;
596 r
= radeon_gart_bind(gtt
->rdev
, gtt
->offset
, ttm
->num_pages
,
597 ttm
->pages
, gtt
->ttm
.dma_address
, flags
);
599 DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
600 ttm
->num_pages
, (unsigned)gtt
->offset
);
606 static int radeon_ttm_backend_unbind(struct ttm_tt
*ttm
)
608 struct radeon_ttm_tt
*gtt
= (void *)ttm
;
610 radeon_gart_unbind(gtt
->rdev
, gtt
->offset
, ttm
->num_pages
);
613 radeon_ttm_tt_unpin_userptr(ttm
);
618 static void radeon_ttm_backend_destroy(struct ttm_tt
*ttm
)
620 struct radeon_ttm_tt
*gtt
= (void *)ttm
;
622 ttm_dma_tt_fini(>t
->ttm
);
626 static struct ttm_backend_func radeon_backend_func
= {
627 .bind
= &radeon_ttm_backend_bind
,
628 .unbind
= &radeon_ttm_backend_unbind
,
629 .destroy
= &radeon_ttm_backend_destroy
,
632 static struct ttm_tt
*radeon_ttm_tt_create(struct ttm_buffer_object
*bo
,
635 struct radeon_device
*rdev
;
636 struct radeon_ttm_tt
*gtt
;
638 rdev
= radeon_get_rdev(bo
->bdev
);
639 #if IS_ENABLED(CONFIG_AGP)
640 if (rdev
->flags
& RADEON_IS_AGP
) {
641 return ttm_agp_tt_create(bo
, rdev
->ddev
->agp
->bridge
,
646 gtt
= kzalloc(sizeof(struct radeon_ttm_tt
), GFP_KERNEL
);
650 gtt
->ttm
.ttm
.func
= &radeon_backend_func
;
652 if (ttm_dma_tt_init(>t
->ttm
, bo
, page_flags
)) {
656 return >t
->ttm
.ttm
;
659 static struct radeon_ttm_tt
*radeon_ttm_tt_to_gtt(struct ttm_tt
*ttm
)
661 if (!ttm
|| ttm
->func
!= &radeon_backend_func
)
663 return (struct radeon_ttm_tt
*)ttm
;
666 static int radeon_ttm_tt_populate(struct ttm_tt
*ttm
,
667 struct ttm_operation_ctx
*ctx
)
669 struct radeon_ttm_tt
*gtt
= radeon_ttm_tt_to_gtt(ttm
);
670 struct radeon_device
*rdev
;
671 bool slave
= !!(ttm
->page_flags
& TTM_PAGE_FLAG_SG
);
673 if (gtt
&& gtt
->userptr
) {
674 ttm
->sg
= kzalloc(sizeof(struct sg_table
), GFP_KERNEL
);
678 ttm
->page_flags
|= TTM_PAGE_FLAG_SG
;
679 ttm
->state
= tt_unbound
;
683 if (slave
&& ttm
->sg
) {
684 drm_prime_sg_to_page_addr_arrays(ttm
->sg
, ttm
->pages
,
685 gtt
->ttm
.dma_address
, ttm
->num_pages
);
686 ttm
->state
= tt_unbound
;
690 rdev
= radeon_get_rdev(ttm
->bdev
);
691 #if IS_ENABLED(CONFIG_AGP)
692 if (rdev
->flags
& RADEON_IS_AGP
) {
693 return ttm_agp_tt_populate(ttm
, ctx
);
697 #ifdef CONFIG_SWIOTLB
698 if (rdev
->need_swiotlb
&& swiotlb_nr_tbl()) {
699 return ttm_dma_populate(>t
->ttm
, rdev
->dev
, ctx
);
703 return ttm_populate_and_map_pages(rdev
->dev
, >t
->ttm
, ctx
);
706 static void radeon_ttm_tt_unpopulate(struct ttm_tt
*ttm
)
708 struct radeon_device
*rdev
;
709 struct radeon_ttm_tt
*gtt
= radeon_ttm_tt_to_gtt(ttm
);
710 bool slave
= !!(ttm
->page_flags
& TTM_PAGE_FLAG_SG
);
712 if (gtt
&& gtt
->userptr
) {
714 ttm
->page_flags
&= ~TTM_PAGE_FLAG_SG
;
721 rdev
= radeon_get_rdev(ttm
->bdev
);
722 #if IS_ENABLED(CONFIG_AGP)
723 if (rdev
->flags
& RADEON_IS_AGP
) {
724 ttm_agp_tt_unpopulate(ttm
);
729 #ifdef CONFIG_SWIOTLB
730 if (rdev
->need_swiotlb
&& swiotlb_nr_tbl()) {
731 ttm_dma_unpopulate(>t
->ttm
, rdev
->dev
);
736 ttm_unmap_and_unpopulate_pages(rdev
->dev
, >t
->ttm
);
739 int radeon_ttm_tt_set_userptr(struct ttm_tt
*ttm
, uint64_t addr
,
742 struct radeon_ttm_tt
*gtt
= radeon_ttm_tt_to_gtt(ttm
);
748 gtt
->usermm
= current
->mm
;
749 gtt
->userflags
= flags
;
753 bool radeon_ttm_tt_has_userptr(struct ttm_tt
*ttm
)
755 struct radeon_ttm_tt
*gtt
= radeon_ttm_tt_to_gtt(ttm
);
760 return !!gtt
->userptr
;
763 bool radeon_ttm_tt_is_readonly(struct ttm_tt
*ttm
)
765 struct radeon_ttm_tt
*gtt
= radeon_ttm_tt_to_gtt(ttm
);
770 return !!(gtt
->userflags
& RADEON_GEM_USERPTR_READONLY
);
773 static struct ttm_bo_driver radeon_bo_driver
= {
774 .ttm_tt_create
= &radeon_ttm_tt_create
,
775 .ttm_tt_populate
= &radeon_ttm_tt_populate
,
776 .ttm_tt_unpopulate
= &radeon_ttm_tt_unpopulate
,
777 .invalidate_caches
= &radeon_invalidate_caches
,
778 .init_mem_type
= &radeon_init_mem_type
,
779 .eviction_valuable
= ttm_bo_eviction_valuable
,
780 .evict_flags
= &radeon_evict_flags
,
781 .move
= &radeon_bo_move
,
782 .verify_access
= &radeon_verify_access
,
783 .move_notify
= &radeon_bo_move_notify
,
784 .fault_reserve_notify
= &radeon_bo_fault_reserve_notify
,
785 .io_mem_reserve
= &radeon_ttm_io_mem_reserve
,
786 .io_mem_free
= &radeon_ttm_io_mem_free
,
789 int radeon_ttm_init(struct radeon_device
*rdev
)
793 /* No others user of address space so set it to 0 */
794 r
= ttm_bo_device_init(&rdev
->mman
.bdev
,
796 rdev
->ddev
->anon_inode
->i_mapping
,
797 rdev
->ddev
->vma_offset_manager
,
798 dma_addressing_limited(&rdev
->pdev
->dev
));
800 DRM_ERROR("failed initializing buffer object driver(%d).\n", r
);
803 rdev
->mman
.initialized
= true;
804 r
= ttm_bo_init_mm(&rdev
->mman
.bdev
, TTM_PL_VRAM
,
805 rdev
->mc
.real_vram_size
>> PAGE_SHIFT
);
807 DRM_ERROR("Failed initializing VRAM heap.\n");
810 /* Change the size here instead of the init above so only lpfn is affected */
811 radeon_ttm_set_active_vram_size(rdev
, rdev
->mc
.visible_vram_size
);
813 r
= radeon_bo_create(rdev
, 256 * 1024, PAGE_SIZE
, true,
814 RADEON_GEM_DOMAIN_VRAM
, 0, NULL
,
815 NULL
, &rdev
->stolen_vga_memory
);
819 r
= radeon_bo_reserve(rdev
->stolen_vga_memory
, false);
822 r
= radeon_bo_pin(rdev
->stolen_vga_memory
, RADEON_GEM_DOMAIN_VRAM
, NULL
);
823 radeon_bo_unreserve(rdev
->stolen_vga_memory
);
825 radeon_bo_unref(&rdev
->stolen_vga_memory
);
828 DRM_INFO("radeon: %uM of VRAM memory ready\n",
829 (unsigned) (rdev
->mc
.real_vram_size
/ (1024 * 1024)));
830 r
= ttm_bo_init_mm(&rdev
->mman
.bdev
, TTM_PL_TT
,
831 rdev
->mc
.gtt_size
>> PAGE_SHIFT
);
833 DRM_ERROR("Failed initializing GTT heap.\n");
836 DRM_INFO("radeon: %uM of GTT memory ready.\n",
837 (unsigned)(rdev
->mc
.gtt_size
/ (1024 * 1024)));
839 r
= radeon_ttm_debugfs_init(rdev
);
841 DRM_ERROR("Failed to init debugfs\n");
847 void radeon_ttm_fini(struct radeon_device
*rdev
)
851 if (!rdev
->mman
.initialized
)
853 radeon_ttm_debugfs_fini(rdev
);
854 if (rdev
->stolen_vga_memory
) {
855 r
= radeon_bo_reserve(rdev
->stolen_vga_memory
, false);
857 radeon_bo_unpin(rdev
->stolen_vga_memory
);
858 radeon_bo_unreserve(rdev
->stolen_vga_memory
);
860 radeon_bo_unref(&rdev
->stolen_vga_memory
);
862 ttm_bo_clean_mm(&rdev
->mman
.bdev
, TTM_PL_VRAM
);
863 ttm_bo_clean_mm(&rdev
->mman
.bdev
, TTM_PL_TT
);
864 ttm_bo_device_release(&rdev
->mman
.bdev
);
865 radeon_gart_fini(rdev
);
866 rdev
->mman
.initialized
= false;
867 DRM_INFO("radeon: ttm finalized\n");
870 /* this should only be called at bootup or when userspace
872 void radeon_ttm_set_active_vram_size(struct radeon_device
*rdev
, u64 size
)
874 struct ttm_mem_type_manager
*man
;
876 if (!rdev
->mman
.initialized
)
879 man
= &rdev
->mman
.bdev
.man
[TTM_PL_VRAM
];
880 /* this just adjusts TTM size idea, which sets lpfn to the correct value */
881 man
->size
= size
>> PAGE_SHIFT
;
884 static struct vm_operations_struct radeon_ttm_vm_ops
;
885 static const struct vm_operations_struct
*ttm_vm_ops
= NULL
;
887 static vm_fault_t
radeon_ttm_fault(struct vm_fault
*vmf
)
889 struct ttm_buffer_object
*bo
;
890 struct radeon_device
*rdev
;
893 bo
= (struct ttm_buffer_object
*)vmf
->vma
->vm_private_data
;
895 return VM_FAULT_NOPAGE
;
897 rdev
= radeon_get_rdev(bo
->bdev
);
898 down_read(&rdev
->pm
.mclk_lock
);
899 ret
= ttm_vm_ops
->fault(vmf
);
900 up_read(&rdev
->pm
.mclk_lock
);
904 int radeon_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
907 struct drm_file
*file_priv
= filp
->private_data
;
908 struct radeon_device
*rdev
= file_priv
->minor
->dev
->dev_private
;
913 r
= ttm_bo_mmap(filp
, vma
, &rdev
->mman
.bdev
);
914 if (unlikely(r
!= 0)) {
917 if (unlikely(ttm_vm_ops
== NULL
)) {
918 ttm_vm_ops
= vma
->vm_ops
;
919 radeon_ttm_vm_ops
= *ttm_vm_ops
;
920 radeon_ttm_vm_ops
.fault
= &radeon_ttm_fault
;
922 vma
->vm_ops
= &radeon_ttm_vm_ops
;
926 #if defined(CONFIG_DEBUG_FS)
928 static int radeon_mm_dump_table(struct seq_file
*m
, void *data
)
930 struct drm_info_node
*node
= (struct drm_info_node
*)m
->private;
931 unsigned ttm_pl
= *(int*)node
->info_ent
->data
;
932 struct drm_device
*dev
= node
->minor
->dev
;
933 struct radeon_device
*rdev
= dev
->dev_private
;
934 struct ttm_mem_type_manager
*man
= &rdev
->mman
.bdev
.man
[ttm_pl
];
935 struct drm_printer p
= drm_seq_file_printer(m
);
937 man
->func
->debug(man
, &p
);
942 static int ttm_pl_vram
= TTM_PL_VRAM
;
943 static int ttm_pl_tt
= TTM_PL_TT
;
945 static struct drm_info_list radeon_ttm_debugfs_list
[] = {
946 {"radeon_vram_mm", radeon_mm_dump_table
, 0, &ttm_pl_vram
},
947 {"radeon_gtt_mm", radeon_mm_dump_table
, 0, &ttm_pl_tt
},
948 {"ttm_page_pool", ttm_page_alloc_debugfs
, 0, NULL
},
949 #ifdef CONFIG_SWIOTLB
950 {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs
, 0, NULL
}
954 static int radeon_ttm_vram_open(struct inode
*inode
, struct file
*filep
)
956 struct radeon_device
*rdev
= inode
->i_private
;
957 i_size_write(inode
, rdev
->mc
.mc_vram_size
);
958 filep
->private_data
= inode
->i_private
;
962 static ssize_t
radeon_ttm_vram_read(struct file
*f
, char __user
*buf
,
963 size_t size
, loff_t
*pos
)
965 struct radeon_device
*rdev
= f
->private_data
;
969 if (size
& 0x3 || *pos
& 0x3)
976 if (*pos
>= rdev
->mc
.mc_vram_size
)
979 spin_lock_irqsave(&rdev
->mmio_idx_lock
, flags
);
980 WREG32(RADEON_MM_INDEX
, ((uint32_t)*pos
) | 0x80000000);
981 if (rdev
->family
>= CHIP_CEDAR
)
982 WREG32(EVERGREEN_MM_INDEX_HI
, *pos
>> 31);
983 value
= RREG32(RADEON_MM_DATA
);
984 spin_unlock_irqrestore(&rdev
->mmio_idx_lock
, flags
);
986 r
= put_user(value
, (uint32_t *)buf
);
999 static const struct file_operations radeon_ttm_vram_fops
= {
1000 .owner
= THIS_MODULE
,
1001 .open
= radeon_ttm_vram_open
,
1002 .read
= radeon_ttm_vram_read
,
1003 .llseek
= default_llseek
1006 static int radeon_ttm_gtt_open(struct inode
*inode
, struct file
*filep
)
1008 struct radeon_device
*rdev
= inode
->i_private
;
1009 i_size_write(inode
, rdev
->mc
.gtt_size
);
1010 filep
->private_data
= inode
->i_private
;
1014 static ssize_t
radeon_ttm_gtt_read(struct file
*f
, char __user
*buf
,
1015 size_t size
, loff_t
*pos
)
1017 struct radeon_device
*rdev
= f
->private_data
;
1022 loff_t p
= *pos
/ PAGE_SIZE
;
1023 unsigned off
= *pos
& ~PAGE_MASK
;
1024 size_t cur_size
= min_t(size_t, size
, PAGE_SIZE
- off
);
1028 if (p
>= rdev
->gart
.num_cpu_pages
)
1031 page
= rdev
->gart
.pages
[p
];
1036 r
= copy_to_user(buf
, ptr
, cur_size
);
1037 kunmap(rdev
->gart
.pages
[p
]);
1039 r
= clear_user(buf
, cur_size
);
1053 static const struct file_operations radeon_ttm_gtt_fops
= {
1054 .owner
= THIS_MODULE
,
1055 .open
= radeon_ttm_gtt_open
,
1056 .read
= radeon_ttm_gtt_read
,
1057 .llseek
= default_llseek
1062 static int radeon_ttm_debugfs_init(struct radeon_device
*rdev
)
1064 #if defined(CONFIG_DEBUG_FS)
1067 struct drm_minor
*minor
= rdev
->ddev
->primary
;
1068 struct dentry
*root
= minor
->debugfs_root
;
1070 rdev
->mman
.vram
= debugfs_create_file("radeon_vram", S_IFREG
| S_IRUGO
,
1072 &radeon_ttm_vram_fops
);
1074 rdev
->mman
.gtt
= debugfs_create_file("radeon_gtt", S_IFREG
| S_IRUGO
,
1075 root
, rdev
, &radeon_ttm_gtt_fops
);
1077 count
= ARRAY_SIZE(radeon_ttm_debugfs_list
);
1079 #ifdef CONFIG_SWIOTLB
1080 if (!(rdev
->need_swiotlb
&& swiotlb_nr_tbl()))
1084 return radeon_debugfs_add_files(rdev
, radeon_ttm_debugfs_list
, count
);
1091 static void radeon_ttm_debugfs_fini(struct radeon_device
*rdev
)
1093 #if defined(CONFIG_DEBUG_FS)
1095 debugfs_remove(rdev
->mman
.vram
);
1096 rdev
->mman
.vram
= NULL
;
1098 debugfs_remove(rdev
->mman
.gtt
);
1099 rdev
->mman
.gtt
= NULL
;