2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
33 #include <linux/dma-mapping.h>
34 #include <linux/pagemap.h>
35 #include <linux/pci.h>
36 #include <linux/seq_file.h>
37 #include <linux/slab.h>
38 #include <linux/swap.h>
39 #include <linux/swiotlb.h>
41 #include <drm/drm_agpsupport.h>
42 #include <drm/drm_debugfs.h>
43 #include <drm/drm_device.h>
44 #include <drm/drm_file.h>
45 #include <drm/drm_prime.h>
46 #include <drm/radeon_drm.h>
47 #include <drm/ttm/ttm_bo_api.h>
48 #include <drm/ttm/ttm_bo_driver.h>
49 #include <drm/ttm/ttm_module.h>
50 #include <drm/ttm/ttm_placement.h>
52 #include "radeon_reg.h"
54 #include "radeon_ttm.h"
56 static int radeon_ttm_debugfs_init(struct radeon_device
*rdev
);
57 static void radeon_ttm_debugfs_fini(struct radeon_device
*rdev
);
59 static int radeon_ttm_tt_bind(struct ttm_bo_device
*bdev
,
61 struct ttm_resource
*bo_mem
);
62 static void radeon_ttm_tt_unbind(struct ttm_bo_device
*bdev
,
65 struct radeon_device
*radeon_get_rdev(struct ttm_bo_device
*bdev
)
67 struct radeon_mman
*mman
;
68 struct radeon_device
*rdev
;
70 mman
= container_of(bdev
, struct radeon_mman
, bdev
);
71 rdev
= container_of(mman
, struct radeon_device
, mman
);
75 static int radeon_ttm_init_vram(struct radeon_device
*rdev
)
77 return ttm_range_man_init(&rdev
->mman
.bdev
, TTM_PL_VRAM
,
78 false, rdev
->mc
.real_vram_size
>> PAGE_SHIFT
);
81 static int radeon_ttm_init_gtt(struct radeon_device
*rdev
)
83 return ttm_range_man_init(&rdev
->mman
.bdev
, TTM_PL_TT
,
84 true, rdev
->mc
.gtt_size
>> PAGE_SHIFT
);
87 static void radeon_evict_flags(struct ttm_buffer_object
*bo
,
88 struct ttm_placement
*placement
)
90 static const struct ttm_place placements
= {
93 .mem_type
= TTM_PL_SYSTEM
,
97 struct radeon_bo
*rbo
;
99 if (!radeon_ttm_bo_is_radeon_bo(bo
)) {
100 placement
->placement
= &placements
;
101 placement
->busy_placement
= &placements
;
102 placement
->num_placement
= 1;
103 placement
->num_busy_placement
= 1;
106 rbo
= container_of(bo
, struct radeon_bo
, tbo
);
107 switch (bo
->mem
.mem_type
) {
109 if (rbo
->rdev
->ring
[radeon_copy_ring_index(rbo
->rdev
)].ready
== false)
110 radeon_ttm_placement_from_domain(rbo
, RADEON_GEM_DOMAIN_CPU
);
111 else if (rbo
->rdev
->mc
.visible_vram_size
< rbo
->rdev
->mc
.real_vram_size
&&
112 bo
->mem
.start
< (rbo
->rdev
->mc
.visible_vram_size
>> PAGE_SHIFT
)) {
113 unsigned fpfn
= rbo
->rdev
->mc
.visible_vram_size
>> PAGE_SHIFT
;
116 /* Try evicting to the CPU inaccessible part of VRAM
117 * first, but only set GTT as busy placement, so this
118 * BO will be evicted to GTT rather than causing other
119 * BOs to be evicted from VRAM
121 radeon_ttm_placement_from_domain(rbo
, RADEON_GEM_DOMAIN_VRAM
|
122 RADEON_GEM_DOMAIN_GTT
);
123 rbo
->placement
.num_busy_placement
= 0;
124 for (i
= 0; i
< rbo
->placement
.num_placement
; i
++) {
125 if (rbo
->placements
[i
].mem_type
== TTM_PL_VRAM
) {
126 if (rbo
->placements
[i
].fpfn
< fpfn
)
127 rbo
->placements
[i
].fpfn
= fpfn
;
129 rbo
->placement
.busy_placement
=
131 rbo
->placement
.num_busy_placement
= 1;
135 radeon_ttm_placement_from_domain(rbo
, RADEON_GEM_DOMAIN_GTT
);
139 radeon_ttm_placement_from_domain(rbo
, RADEON_GEM_DOMAIN_CPU
);
141 *placement
= rbo
->placement
;
144 static int radeon_verify_access(struct ttm_buffer_object
*bo
, struct file
*filp
)
146 struct radeon_bo
*rbo
= container_of(bo
, struct radeon_bo
, tbo
);
147 struct radeon_device
*rdev
= radeon_get_rdev(bo
->bdev
);
149 if (radeon_ttm_tt_has_userptr(rdev
, bo
->ttm
))
151 return drm_vma_node_verify_access(&rbo
->tbo
.base
.vma_node
,
155 static int radeon_move_blit(struct ttm_buffer_object
*bo
,
157 struct ttm_resource
*new_mem
,
158 struct ttm_resource
*old_mem
)
160 struct radeon_device
*rdev
;
161 uint64_t old_start
, new_start
;
162 struct radeon_fence
*fence
;
166 rdev
= radeon_get_rdev(bo
->bdev
);
167 ridx
= radeon_copy_ring_index(rdev
);
168 old_start
= (u64
)old_mem
->start
<< PAGE_SHIFT
;
169 new_start
= (u64
)new_mem
->start
<< PAGE_SHIFT
;
171 switch (old_mem
->mem_type
) {
173 old_start
+= rdev
->mc
.vram_start
;
176 old_start
+= rdev
->mc
.gtt_start
;
179 DRM_ERROR("Unknown placement %d\n", old_mem
->mem_type
);
182 switch (new_mem
->mem_type
) {
184 new_start
+= rdev
->mc
.vram_start
;
187 new_start
+= rdev
->mc
.gtt_start
;
190 DRM_ERROR("Unknown placement %d\n", old_mem
->mem_type
);
193 if (!rdev
->ring
[ridx
].ready
) {
194 DRM_ERROR("Trying to move memory with ring turned off.\n");
198 BUILD_BUG_ON((PAGE_SIZE
% RADEON_GPU_PAGE_SIZE
) != 0);
200 num_pages
= new_mem
->num_pages
* (PAGE_SIZE
/ RADEON_GPU_PAGE_SIZE
);
201 fence
= radeon_copy(rdev
, old_start
, new_start
, num_pages
, bo
->base
.resv
);
203 return PTR_ERR(fence
);
205 r
= ttm_bo_move_accel_cleanup(bo
, &fence
->base
, evict
, false, new_mem
);
206 radeon_fence_unref(&fence
);
210 static int radeon_bo_move(struct ttm_buffer_object
*bo
, bool evict
,
211 struct ttm_operation_ctx
*ctx
,
212 struct ttm_resource
*new_mem
,
213 struct ttm_place
*hop
)
215 struct radeon_device
*rdev
;
216 struct radeon_bo
*rbo
;
217 struct ttm_resource
*old_mem
= &bo
->mem
;
220 if (new_mem
->mem_type
== TTM_PL_TT
) {
221 r
= radeon_ttm_tt_bind(bo
->bdev
, bo
->ttm
, new_mem
);
226 r
= ttm_bo_wait_ctx(bo
, ctx
);
230 /* Can't move a pinned BO */
231 rbo
= container_of(bo
, struct radeon_bo
, tbo
);
232 if (WARN_ON_ONCE(rbo
->tbo
.pin_count
> 0))
235 rdev
= radeon_get_rdev(bo
->bdev
);
236 if (old_mem
->mem_type
== TTM_PL_SYSTEM
&& bo
->ttm
== NULL
) {
237 ttm_bo_move_null(bo
, new_mem
);
240 if (old_mem
->mem_type
== TTM_PL_SYSTEM
&&
241 new_mem
->mem_type
== TTM_PL_TT
) {
242 ttm_bo_move_null(bo
, new_mem
);
246 if (old_mem
->mem_type
== TTM_PL_TT
&&
247 new_mem
->mem_type
== TTM_PL_SYSTEM
) {
248 radeon_ttm_tt_unbind(bo
->bdev
, bo
->ttm
);
249 ttm_resource_free(bo
, &bo
->mem
);
250 ttm_bo_assign_mem(bo
, new_mem
);
253 if (rdev
->ring
[radeon_copy_ring_index(rdev
)].ready
&&
254 rdev
->asic
->copy
.copy
!= NULL
) {
255 if ((old_mem
->mem_type
== TTM_PL_SYSTEM
&&
256 new_mem
->mem_type
== TTM_PL_VRAM
) ||
257 (old_mem
->mem_type
== TTM_PL_VRAM
&&
258 new_mem
->mem_type
== TTM_PL_SYSTEM
)) {
261 hop
->mem_type
= TTM_PL_TT
;
266 r
= radeon_move_blit(bo
, evict
, new_mem
, old_mem
);
272 r
= ttm_bo_move_memcpy(bo
, ctx
, new_mem
);
278 /* update statistics */
279 atomic64_add((u64
)bo
->num_pages
<< PAGE_SHIFT
, &rdev
->num_bytes_moved
);
280 radeon_bo_move_notify(bo
, evict
, new_mem
);
284 static int radeon_ttm_io_mem_reserve(struct ttm_bo_device
*bdev
, struct ttm_resource
*mem
)
286 struct radeon_device
*rdev
= radeon_get_rdev(bdev
);
287 size_t bus_size
= (size_t)mem
->num_pages
<< PAGE_SHIFT
;
289 switch (mem
->mem_type
) {
294 #if IS_ENABLED(CONFIG_AGP)
295 if (rdev
->flags
& RADEON_IS_AGP
) {
296 /* RADEON_IS_AGP is set only if AGP is active */
297 mem
->bus
.offset
= (mem
->start
<< PAGE_SHIFT
) +
299 mem
->bus
.is_iomem
= !rdev
->ddev
->agp
->cant_use_aperture
;
300 mem
->bus
.caching
= ttm_write_combined
;
305 mem
->bus
.offset
= mem
->start
<< PAGE_SHIFT
;
306 /* check if it's visible */
307 if ((mem
->bus
.offset
+ bus_size
) > rdev
->mc
.visible_vram_size
)
309 mem
->bus
.offset
+= rdev
->mc
.aper_base
;
310 mem
->bus
.is_iomem
= true;
311 mem
->bus
.caching
= ttm_write_combined
;
314 * Alpha: use bus.addr to hold the ioremap() return,
315 * so we can modify bus.base below.
317 mem
->bus
.addr
= ioremap_wc(mem
->bus
.offset
, bus_size
);
322 * Alpha: Use just the bus offset plus
323 * the hose/domain memory base for bus.base.
324 * It then can be used to build PTEs for VRAM
325 * access, as done in ttm_bo_vm_fault().
327 mem
->bus
.offset
= (mem
->bus
.offset
& 0x0ffffffffUL
) +
328 rdev
->ddev
->hose
->dense_mem_base
;
338 * TTM backend functions.
340 struct radeon_ttm_tt
{
345 struct mm_struct
*usermm
;
350 /* prepare the sg table with the user pages */
351 static int radeon_ttm_tt_pin_userptr(struct ttm_bo_device
*bdev
, struct ttm_tt
*ttm
)
353 struct radeon_device
*rdev
= radeon_get_rdev(bdev
);
354 struct radeon_ttm_tt
*gtt
= (void *)ttm
;
358 int write
= !(gtt
->userflags
& RADEON_GEM_USERPTR_READONLY
);
359 enum dma_data_direction direction
= write
?
360 DMA_BIDIRECTIONAL
: DMA_TO_DEVICE
;
362 if (current
->mm
!= gtt
->usermm
)
365 if (gtt
->userflags
& RADEON_GEM_USERPTR_ANONONLY
) {
366 /* check that we only pin down anonymous memory
367 to prevent problems with writeback */
368 unsigned long end
= gtt
->userptr
+ ttm
->num_pages
* PAGE_SIZE
;
369 struct vm_area_struct
*vma
;
370 vma
= find_vma(gtt
->usermm
, gtt
->userptr
);
371 if (!vma
|| vma
->vm_file
|| vma
->vm_end
< end
)
376 unsigned num_pages
= ttm
->num_pages
- pinned
;
377 uint64_t userptr
= gtt
->userptr
+ pinned
* PAGE_SIZE
;
378 struct page
**pages
= ttm
->pages
+ pinned
;
380 r
= get_user_pages(userptr
, num_pages
, write
? FOLL_WRITE
: 0,
387 } while (pinned
< ttm
->num_pages
);
389 r
= sg_alloc_table_from_pages(ttm
->sg
, ttm
->pages
, ttm
->num_pages
, 0,
390 ttm
->num_pages
<< PAGE_SHIFT
,
395 r
= dma_map_sgtable(rdev
->dev
, ttm
->sg
, direction
, 0);
399 drm_prime_sg_to_page_addr_arrays(ttm
->sg
, ttm
->pages
,
400 gtt
->ttm
.dma_address
, ttm
->num_pages
);
408 release_pages(ttm
->pages
, pinned
);
412 static void radeon_ttm_tt_unpin_userptr(struct ttm_bo_device
*bdev
, struct ttm_tt
*ttm
)
414 struct radeon_device
*rdev
= radeon_get_rdev(bdev
);
415 struct radeon_ttm_tt
*gtt
= (void *)ttm
;
416 struct sg_page_iter sg_iter
;
418 int write
= !(gtt
->userflags
& RADEON_GEM_USERPTR_READONLY
);
419 enum dma_data_direction direction
= write
?
420 DMA_BIDIRECTIONAL
: DMA_TO_DEVICE
;
422 /* double check that we don't free the table twice */
426 /* free the sg table and pages again */
427 dma_unmap_sgtable(rdev
->dev
, ttm
->sg
, direction
, 0);
429 for_each_sgtable_page(ttm
->sg
, &sg_iter
, 0) {
430 struct page
*page
= sg_page_iter_page(&sg_iter
);
431 if (!(gtt
->userflags
& RADEON_GEM_USERPTR_READONLY
))
432 set_page_dirty(page
);
434 mark_page_accessed(page
);
438 sg_free_table(ttm
->sg
);
441 static bool radeon_ttm_backend_is_bound(struct ttm_tt
*ttm
)
443 struct radeon_ttm_tt
*gtt
= (void*)ttm
;
448 static int radeon_ttm_backend_bind(struct ttm_bo_device
*bdev
,
450 struct ttm_resource
*bo_mem
)
452 struct radeon_ttm_tt
*gtt
= (void*)ttm
;
453 struct radeon_device
*rdev
= radeon_get_rdev(bdev
);
454 uint32_t flags
= RADEON_GART_PAGE_VALID
| RADEON_GART_PAGE_READ
|
455 RADEON_GART_PAGE_WRITE
;
462 radeon_ttm_tt_pin_userptr(bdev
, ttm
);
463 flags
&= ~RADEON_GART_PAGE_WRITE
;
466 gtt
->offset
= (unsigned long)(bo_mem
->start
<< PAGE_SHIFT
);
467 if (!ttm
->num_pages
) {
468 WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
469 ttm
->num_pages
, bo_mem
, ttm
);
471 if (ttm
->caching
== ttm_cached
)
472 flags
|= RADEON_GART_PAGE_SNOOP
;
473 r
= radeon_gart_bind(rdev
, gtt
->offset
, ttm
->num_pages
,
474 ttm
->pages
, gtt
->ttm
.dma_address
, flags
);
476 DRM_ERROR("failed to bind %u pages at 0x%08X\n",
477 ttm
->num_pages
, (unsigned)gtt
->offset
);
484 static void radeon_ttm_backend_unbind(struct ttm_bo_device
*bdev
, struct ttm_tt
*ttm
)
486 struct radeon_ttm_tt
*gtt
= (void *)ttm
;
487 struct radeon_device
*rdev
= radeon_get_rdev(bdev
);
492 radeon_gart_unbind(rdev
, gtt
->offset
, ttm
->num_pages
);
495 radeon_ttm_tt_unpin_userptr(bdev
, ttm
);
499 static void radeon_ttm_backend_destroy(struct ttm_bo_device
*bdev
, struct ttm_tt
*ttm
)
501 struct radeon_ttm_tt
*gtt
= (void *)ttm
;
503 radeon_ttm_backend_unbind(bdev
, ttm
);
504 ttm_tt_destroy_common(bdev
, ttm
);
506 ttm_tt_fini(>t
->ttm
);
510 static struct ttm_tt
*radeon_ttm_tt_create(struct ttm_buffer_object
*bo
,
513 struct radeon_ttm_tt
*gtt
;
514 enum ttm_caching caching
;
515 struct radeon_bo
*rbo
;
516 #if IS_ENABLED(CONFIG_AGP)
517 struct radeon_device
*rdev
= radeon_get_rdev(bo
->bdev
);
519 if (rdev
->flags
& RADEON_IS_AGP
) {
520 return ttm_agp_tt_create(bo
, rdev
->ddev
->agp
->bridge
,
524 rbo
= container_of(bo
, struct radeon_bo
, tbo
);
526 gtt
= kzalloc(sizeof(struct radeon_ttm_tt
), GFP_KERNEL
);
531 if (rbo
->flags
& RADEON_GEM_GTT_UC
)
532 caching
= ttm_uncached
;
533 else if (rbo
->flags
& RADEON_GEM_GTT_WC
)
534 caching
= ttm_write_combined
;
536 caching
= ttm_cached
;
538 if (ttm_dma_tt_init(>t
->ttm
, bo
, page_flags
, caching
)) {
545 static struct radeon_ttm_tt
*radeon_ttm_tt_to_gtt(struct radeon_device
*rdev
,
548 #if IS_ENABLED(CONFIG_AGP)
549 if (rdev
->flags
& RADEON_IS_AGP
)
555 return container_of(ttm
, struct radeon_ttm_tt
, ttm
);
558 static int radeon_ttm_tt_populate(struct ttm_bo_device
*bdev
,
560 struct ttm_operation_ctx
*ctx
)
562 struct radeon_device
*rdev
= radeon_get_rdev(bdev
);
563 struct radeon_ttm_tt
*gtt
= radeon_ttm_tt_to_gtt(rdev
, ttm
);
564 bool slave
= !!(ttm
->page_flags
& TTM_PAGE_FLAG_SG
);
566 if (gtt
&& gtt
->userptr
) {
567 ttm
->sg
= kzalloc(sizeof(struct sg_table
), GFP_KERNEL
);
571 ttm
->page_flags
|= TTM_PAGE_FLAG_SG
;
575 if (slave
&& ttm
->sg
) {
576 drm_prime_sg_to_page_addr_arrays(ttm
->sg
, ttm
->pages
,
577 gtt
->ttm
.dma_address
, ttm
->num_pages
);
581 return ttm_pool_alloc(&rdev
->mman
.bdev
.pool
, ttm
, ctx
);
584 static void radeon_ttm_tt_unpopulate(struct ttm_bo_device
*bdev
, struct ttm_tt
*ttm
)
586 struct radeon_device
*rdev
= radeon_get_rdev(bdev
);
587 struct radeon_ttm_tt
*gtt
= radeon_ttm_tt_to_gtt(rdev
, ttm
);
588 bool slave
= !!(ttm
->page_flags
& TTM_PAGE_FLAG_SG
);
590 if (gtt
&& gtt
->userptr
) {
592 ttm
->page_flags
&= ~TTM_PAGE_FLAG_SG
;
599 return ttm_pool_free(&rdev
->mman
.bdev
.pool
, ttm
);
602 int radeon_ttm_tt_set_userptr(struct radeon_device
*rdev
,
603 struct ttm_tt
*ttm
, uint64_t addr
,
606 struct radeon_ttm_tt
*gtt
= radeon_ttm_tt_to_gtt(rdev
, ttm
);
612 gtt
->usermm
= current
->mm
;
613 gtt
->userflags
= flags
;
617 bool radeon_ttm_tt_is_bound(struct ttm_bo_device
*bdev
,
620 #if IS_ENABLED(CONFIG_AGP)
621 struct radeon_device
*rdev
= radeon_get_rdev(bdev
);
622 if (rdev
->flags
& RADEON_IS_AGP
)
623 return ttm_agp_is_bound(ttm
);
625 return radeon_ttm_backend_is_bound(ttm
);
628 static int radeon_ttm_tt_bind(struct ttm_bo_device
*bdev
,
630 struct ttm_resource
*bo_mem
)
632 #if IS_ENABLED(CONFIG_AGP)
633 struct radeon_device
*rdev
= radeon_get_rdev(bdev
);
638 #if IS_ENABLED(CONFIG_AGP)
639 if (rdev
->flags
& RADEON_IS_AGP
)
640 return ttm_agp_bind(ttm
, bo_mem
);
643 return radeon_ttm_backend_bind(bdev
, ttm
, bo_mem
);
646 static void radeon_ttm_tt_unbind(struct ttm_bo_device
*bdev
,
649 #if IS_ENABLED(CONFIG_AGP)
650 struct radeon_device
*rdev
= radeon_get_rdev(bdev
);
652 if (rdev
->flags
& RADEON_IS_AGP
) {
657 radeon_ttm_backend_unbind(bdev
, ttm
);
660 static void radeon_ttm_tt_destroy(struct ttm_bo_device
*bdev
,
663 #if IS_ENABLED(CONFIG_AGP)
664 struct radeon_device
*rdev
= radeon_get_rdev(bdev
);
666 if (rdev
->flags
& RADEON_IS_AGP
) {
668 ttm_tt_destroy_common(bdev
, ttm
);
669 ttm_agp_destroy(ttm
);
673 radeon_ttm_backend_destroy(bdev
, ttm
);
676 bool radeon_ttm_tt_has_userptr(struct radeon_device
*rdev
,
679 struct radeon_ttm_tt
*gtt
= radeon_ttm_tt_to_gtt(rdev
, ttm
);
684 return !!gtt
->userptr
;
687 bool radeon_ttm_tt_is_readonly(struct radeon_device
*rdev
,
690 struct radeon_ttm_tt
*gtt
= radeon_ttm_tt_to_gtt(rdev
, ttm
);
695 return !!(gtt
->userflags
& RADEON_GEM_USERPTR_READONLY
);
699 radeon_bo_delete_mem_notify(struct ttm_buffer_object
*bo
)
701 radeon_bo_move_notify(bo
, false, NULL
);
704 static struct ttm_bo_driver radeon_bo_driver
= {
705 .ttm_tt_create
= &radeon_ttm_tt_create
,
706 .ttm_tt_populate
= &radeon_ttm_tt_populate
,
707 .ttm_tt_unpopulate
= &radeon_ttm_tt_unpopulate
,
708 .ttm_tt_destroy
= &radeon_ttm_tt_destroy
,
709 .eviction_valuable
= ttm_bo_eviction_valuable
,
710 .evict_flags
= &radeon_evict_flags
,
711 .move
= &radeon_bo_move
,
712 .verify_access
= &radeon_verify_access
,
713 .delete_mem_notify
= &radeon_bo_delete_mem_notify
,
714 .io_mem_reserve
= &radeon_ttm_io_mem_reserve
,
717 int radeon_ttm_init(struct radeon_device
*rdev
)
721 /* No others user of address space so set it to 0 */
722 r
= ttm_bo_device_init(&rdev
->mman
.bdev
, &radeon_bo_driver
, rdev
->dev
,
723 rdev
->ddev
->anon_inode
->i_mapping
,
724 rdev
->ddev
->vma_offset_manager
,
726 dma_addressing_limited(&rdev
->pdev
->dev
));
728 DRM_ERROR("failed initializing buffer object driver(%d).\n", r
);
731 rdev
->mman
.initialized
= true;
733 r
= radeon_ttm_init_vram(rdev
);
735 DRM_ERROR("Failed initializing VRAM heap.\n");
738 /* Change the size here instead of the init above so only lpfn is affected */
739 radeon_ttm_set_active_vram_size(rdev
, rdev
->mc
.visible_vram_size
);
741 r
= radeon_bo_create(rdev
, 256 * 1024, PAGE_SIZE
, true,
742 RADEON_GEM_DOMAIN_VRAM
, 0, NULL
,
743 NULL
, &rdev
->stolen_vga_memory
);
747 r
= radeon_bo_reserve(rdev
->stolen_vga_memory
, false);
750 r
= radeon_bo_pin(rdev
->stolen_vga_memory
, RADEON_GEM_DOMAIN_VRAM
, NULL
);
751 radeon_bo_unreserve(rdev
->stolen_vga_memory
);
753 radeon_bo_unref(&rdev
->stolen_vga_memory
);
756 DRM_INFO("radeon: %uM of VRAM memory ready\n",
757 (unsigned) (rdev
->mc
.real_vram_size
/ (1024 * 1024)));
759 r
= radeon_ttm_init_gtt(rdev
);
761 DRM_ERROR("Failed initializing GTT heap.\n");
764 DRM_INFO("radeon: %uM of GTT memory ready.\n",
765 (unsigned)(rdev
->mc
.gtt_size
/ (1024 * 1024)));
767 r
= radeon_ttm_debugfs_init(rdev
);
769 DRM_ERROR("Failed to init debugfs\n");
775 void radeon_ttm_fini(struct radeon_device
*rdev
)
779 if (!rdev
->mman
.initialized
)
781 radeon_ttm_debugfs_fini(rdev
);
782 if (rdev
->stolen_vga_memory
) {
783 r
= radeon_bo_reserve(rdev
->stolen_vga_memory
, false);
785 radeon_bo_unpin(rdev
->stolen_vga_memory
);
786 radeon_bo_unreserve(rdev
->stolen_vga_memory
);
788 radeon_bo_unref(&rdev
->stolen_vga_memory
);
790 ttm_range_man_fini(&rdev
->mman
.bdev
, TTM_PL_VRAM
);
791 ttm_range_man_fini(&rdev
->mman
.bdev
, TTM_PL_TT
);
792 ttm_bo_device_release(&rdev
->mman
.bdev
);
793 radeon_gart_fini(rdev
);
794 rdev
->mman
.initialized
= false;
795 DRM_INFO("radeon: ttm finalized\n");
798 /* this should only be called at bootup or when userspace
800 void radeon_ttm_set_active_vram_size(struct radeon_device
*rdev
, u64 size
)
802 struct ttm_resource_manager
*man
;
804 if (!rdev
->mman
.initialized
)
807 man
= ttm_manager_type(&rdev
->mman
.bdev
, TTM_PL_VRAM
);
808 /* this just adjusts TTM size idea, which sets lpfn to the correct value */
809 man
->size
= size
>> PAGE_SHIFT
;
812 static vm_fault_t
radeon_ttm_fault(struct vm_fault
*vmf
)
814 struct ttm_buffer_object
*bo
= vmf
->vma
->vm_private_data
;
815 struct radeon_device
*rdev
= radeon_get_rdev(bo
->bdev
);
818 down_read(&rdev
->pm
.mclk_lock
);
820 ret
= ttm_bo_vm_reserve(bo
, vmf
);
824 ret
= radeon_bo_fault_reserve_notify(bo
);
828 ret
= ttm_bo_vm_fault_reserved(vmf
, vmf
->vma
->vm_page_prot
,
829 TTM_BO_VM_NUM_PREFAULT
, 1);
830 if (ret
== VM_FAULT_RETRY
&& !(vmf
->flags
& FAULT_FLAG_RETRY_NOWAIT
))
834 dma_resv_unlock(bo
->base
.resv
);
837 up_read(&rdev
->pm
.mclk_lock
);
841 static struct vm_operations_struct radeon_ttm_vm_ops
= {
842 .fault
= radeon_ttm_fault
,
843 .open
= ttm_bo_vm_open
,
844 .close
= ttm_bo_vm_close
,
845 .access
= ttm_bo_vm_access
848 int radeon_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
851 struct drm_file
*file_priv
= filp
->private_data
;
852 struct radeon_device
*rdev
= file_priv
->minor
->dev
->dev_private
;
857 r
= ttm_bo_mmap(filp
, vma
, &rdev
->mman
.bdev
);
858 if (unlikely(r
!= 0))
861 vma
->vm_ops
= &radeon_ttm_vm_ops
;
865 #if defined(CONFIG_DEBUG_FS)
867 static int radeon_mm_dump_table(struct seq_file
*m
, void *data
)
869 struct drm_info_node
*node
= (struct drm_info_node
*)m
->private;
870 unsigned ttm_pl
= *(int*)node
->info_ent
->data
;
871 struct drm_device
*dev
= node
->minor
->dev
;
872 struct radeon_device
*rdev
= dev
->dev_private
;
873 struct ttm_resource_manager
*man
= ttm_manager_type(&rdev
->mman
.bdev
, ttm_pl
);
874 struct drm_printer p
= drm_seq_file_printer(m
);
876 man
->func
->debug(man
, &p
);
880 static int radeon_ttm_pool_debugfs(struct seq_file
*m
, void *data
)
882 struct drm_info_node
*node
= (struct drm_info_node
*)m
->private;
883 struct drm_device
*dev
= node
->minor
->dev
;
884 struct radeon_device
*rdev
= dev
->dev_private
;
886 return ttm_pool_debugfs(&rdev
->mman
.bdev
.pool
, m
);
889 static int ttm_pl_vram
= TTM_PL_VRAM
;
890 static int ttm_pl_tt
= TTM_PL_TT
;
892 static struct drm_info_list radeon_ttm_debugfs_list
[] = {
893 {"radeon_vram_mm", radeon_mm_dump_table
, 0, &ttm_pl_vram
},
894 {"radeon_gtt_mm", radeon_mm_dump_table
, 0, &ttm_pl_tt
},
895 {"ttm_page_pool", radeon_ttm_pool_debugfs
, 0, NULL
}
898 static int radeon_ttm_vram_open(struct inode
*inode
, struct file
*filep
)
900 struct radeon_device
*rdev
= inode
->i_private
;
901 i_size_write(inode
, rdev
->mc
.mc_vram_size
);
902 filep
->private_data
= inode
->i_private
;
906 static ssize_t
radeon_ttm_vram_read(struct file
*f
, char __user
*buf
,
907 size_t size
, loff_t
*pos
)
909 struct radeon_device
*rdev
= f
->private_data
;
913 if (size
& 0x3 || *pos
& 0x3)
920 if (*pos
>= rdev
->mc
.mc_vram_size
)
923 spin_lock_irqsave(&rdev
->mmio_idx_lock
, flags
);
924 WREG32(RADEON_MM_INDEX
, ((uint32_t)*pos
) | 0x80000000);
925 if (rdev
->family
>= CHIP_CEDAR
)
926 WREG32(EVERGREEN_MM_INDEX_HI
, *pos
>> 31);
927 value
= RREG32(RADEON_MM_DATA
);
928 spin_unlock_irqrestore(&rdev
->mmio_idx_lock
, flags
);
930 r
= put_user(value
, (uint32_t *)buf
);
943 static const struct file_operations radeon_ttm_vram_fops
= {
944 .owner
= THIS_MODULE
,
945 .open
= radeon_ttm_vram_open
,
946 .read
= radeon_ttm_vram_read
,
947 .llseek
= default_llseek
950 static int radeon_ttm_gtt_open(struct inode
*inode
, struct file
*filep
)
952 struct radeon_device
*rdev
= inode
->i_private
;
953 i_size_write(inode
, rdev
->mc
.gtt_size
);
954 filep
->private_data
= inode
->i_private
;
958 static ssize_t
radeon_ttm_gtt_read(struct file
*f
, char __user
*buf
,
959 size_t size
, loff_t
*pos
)
961 struct radeon_device
*rdev
= f
->private_data
;
966 loff_t p
= *pos
/ PAGE_SIZE
;
967 unsigned off
= *pos
& ~PAGE_MASK
;
968 size_t cur_size
= min_t(size_t, size
, PAGE_SIZE
- off
);
972 if (p
>= rdev
->gart
.num_cpu_pages
)
975 page
= rdev
->gart
.pages
[p
];
980 r
= copy_to_user(buf
, ptr
, cur_size
);
981 kunmap(rdev
->gart
.pages
[p
]);
983 r
= clear_user(buf
, cur_size
);
997 static const struct file_operations radeon_ttm_gtt_fops
= {
998 .owner
= THIS_MODULE
,
999 .open
= radeon_ttm_gtt_open
,
1000 .read
= radeon_ttm_gtt_read
,
1001 .llseek
= default_llseek
1006 static int radeon_ttm_debugfs_init(struct radeon_device
*rdev
)
1008 #if defined(CONFIG_DEBUG_FS)
1011 struct drm_minor
*minor
= rdev
->ddev
->primary
;
1012 struct dentry
*root
= minor
->debugfs_root
;
1014 rdev
->mman
.vram
= debugfs_create_file("radeon_vram", S_IFREG
| S_IRUGO
,
1016 &radeon_ttm_vram_fops
);
1018 rdev
->mman
.gtt
= debugfs_create_file("radeon_gtt", S_IFREG
| S_IRUGO
,
1019 root
, rdev
, &radeon_ttm_gtt_fops
);
1021 count
= ARRAY_SIZE(radeon_ttm_debugfs_list
);
1023 return radeon_debugfs_add_files(rdev
, radeon_ttm_debugfs_list
, count
);
1030 static void radeon_ttm_debugfs_fini(struct radeon_device
*rdev
)
1032 #if defined(CONFIG_DEBUG_FS)
1034 debugfs_remove(rdev
->mman
.vram
);
1035 rdev
->mman
.vram
= NULL
;
1037 debugfs_remove(rdev
->mman
.gtt
);
1038 rdev
->mman
.gtt
= NULL
;