2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
33 #include <linux/dma-mapping.h>
34 #include <linux/iommu.h>
35 #include <linux/pagemap.h>
36 #include <linux/sched/task.h>
37 #include <linux/sched/mm.h>
38 #include <linux/seq_file.h>
39 #include <linux/slab.h>
40 #include <linux/swap.h>
41 #include <linux/dma-buf.h>
42 #include <linux/sizes.h>
43 #include <linux/module.h>
45 #include <drm/drm_drv.h>
46 #include <drm/ttm/ttm_bo.h>
47 #include <drm/ttm/ttm_placement.h>
48 #include <drm/ttm/ttm_range_manager.h>
49 #include <drm/ttm/ttm_tt.h>
51 #include <drm/amdgpu_drm.h>
54 #include "amdgpu_object.h"
55 #include "amdgpu_trace.h"
56 #include "amdgpu_amdkfd.h"
57 #include "amdgpu_sdma.h"
58 #include "amdgpu_ras.h"
59 #include "amdgpu_hmm.h"
60 #include "amdgpu_atomfirmware.h"
61 #include "amdgpu_res_cursor.h"
62 #include "bif/bif_4_1_d.h"
64 MODULE_IMPORT_NS("DMA_BUF");
66 #define AMDGPU_TTM_VRAM_MAX_DW_READ ((size_t)128)
68 static int amdgpu_ttm_backend_bind(struct ttm_device
*bdev
,
70 struct ttm_resource
*bo_mem
);
71 static void amdgpu_ttm_backend_unbind(struct ttm_device
*bdev
,
74 static int amdgpu_ttm_init_on_chip(struct amdgpu_device
*adev
,
76 uint64_t size_in_page
)
78 return ttm_range_man_init(&adev
->mman
.bdev
, type
,
83 * amdgpu_evict_flags - Compute placement flags
85 * @bo: The buffer object to evict
86 * @placement: Possible destination(s) for evicted BO
88 * Fill in placement data when ttm_bo_evict() is called
90 static void amdgpu_evict_flags(struct ttm_buffer_object
*bo
,
91 struct ttm_placement
*placement
)
93 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->bdev
);
94 struct amdgpu_bo
*abo
;
95 static const struct ttm_place placements
= {
98 .mem_type
= TTM_PL_SYSTEM
,
102 /* Don't handle scatter gather BOs */
103 if (bo
->type
== ttm_bo_type_sg
) {
104 placement
->num_placement
= 0;
108 /* Object isn't an AMDGPU object so ignore */
109 if (!amdgpu_bo_is_amdgpu_bo(bo
)) {
110 placement
->placement
= &placements
;
111 placement
->num_placement
= 1;
115 abo
= ttm_to_amdgpu_bo(bo
);
116 if (abo
->flags
& AMDGPU_GEM_CREATE_DISCARDABLE
) {
117 placement
->num_placement
= 0;
121 switch (bo
->resource
->mem_type
) {
125 case AMDGPU_PL_DOORBELL
:
126 placement
->num_placement
= 0;
130 if (!adev
->mman
.buffer_funcs_enabled
) {
131 /* Move to system memory */
132 amdgpu_bo_placement_from_domain(abo
, AMDGPU_GEM_DOMAIN_CPU
);
134 } else if (!amdgpu_gmc_vram_full_visible(&adev
->gmc
) &&
135 !(abo
->flags
& AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
) &&
136 amdgpu_res_cpu_visible(adev
, bo
->resource
)) {
138 /* Try evicting to the CPU inaccessible part of VRAM
139 * first, but only set GTT as busy placement, so this
140 * BO will be evicted to GTT rather than causing other
141 * BOs to be evicted from VRAM
143 amdgpu_bo_placement_from_domain(abo
, AMDGPU_GEM_DOMAIN_VRAM
|
144 AMDGPU_GEM_DOMAIN_GTT
|
145 AMDGPU_GEM_DOMAIN_CPU
);
146 abo
->placements
[0].fpfn
= adev
->gmc
.visible_vram_size
>> PAGE_SHIFT
;
147 abo
->placements
[0].lpfn
= 0;
148 abo
->placements
[0].flags
|= TTM_PL_FLAG_DESIRED
;
150 /* Move to GTT memory */
151 amdgpu_bo_placement_from_domain(abo
, AMDGPU_GEM_DOMAIN_GTT
|
152 AMDGPU_GEM_DOMAIN_CPU
);
156 case AMDGPU_PL_PREEMPT
:
158 amdgpu_bo_placement_from_domain(abo
, AMDGPU_GEM_DOMAIN_CPU
);
161 *placement
= abo
->placement
;
165 * amdgpu_ttm_map_buffer - Map memory into the GART windows
166 * @bo: buffer object to map
167 * @mem: memory object to map
168 * @mm_cur: range to map
169 * @window: which GART window to use
170 * @ring: DMA ring to use for the copy
171 * @tmz: if we should setup a TMZ enabled mapping
172 * @size: in number of bytes to map, out number of bytes mapped
173 * @addr: resulting address inside the MC address space
175 * Setup one of the GART windows to access a specific piece of memory or return
176 * the physical address for local memory.
178 static int amdgpu_ttm_map_buffer(struct ttm_buffer_object
*bo
,
179 struct ttm_resource
*mem
,
180 struct amdgpu_res_cursor
*mm_cur
,
181 unsigned int window
, struct amdgpu_ring
*ring
,
182 bool tmz
, uint64_t *size
, uint64_t *addr
)
184 struct amdgpu_device
*adev
= ring
->adev
;
185 unsigned int offset
, num_pages
, num_dw
, num_bytes
;
186 uint64_t src_addr
, dst_addr
;
187 struct amdgpu_job
*job
;
193 BUG_ON(adev
->mman
.buffer_funcs
->copy_max_bytes
<
194 AMDGPU_GTT_MAX_TRANSFER_SIZE
* 8);
196 if (WARN_ON(mem
->mem_type
== AMDGPU_PL_PREEMPT
))
199 /* Map only what can't be accessed directly */
200 if (!tmz
&& mem
->start
!= AMDGPU_BO_INVALID_OFFSET
) {
201 *addr
= amdgpu_ttm_domain_start(adev
, mem
->mem_type
) +
208 * If start begins at an offset inside the page, then adjust the size
209 * and addr accordingly
211 offset
= mm_cur
->start
& ~PAGE_MASK
;
213 num_pages
= PFN_UP(*size
+ offset
);
214 num_pages
= min_t(uint32_t, num_pages
, AMDGPU_GTT_MAX_TRANSFER_SIZE
);
216 *size
= min(*size
, (uint64_t)num_pages
* PAGE_SIZE
- offset
);
218 *addr
= adev
->gmc
.gart_start
;
219 *addr
+= (u64
)window
* AMDGPU_GTT_MAX_TRANSFER_SIZE
*
220 AMDGPU_GPU_PAGE_SIZE
;
223 num_dw
= ALIGN(adev
->mman
.buffer_funcs
->copy_num_dw
, 8);
224 num_bytes
= num_pages
* 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE
;
226 r
= amdgpu_job_alloc_with_ib(adev
, &adev
->mman
.high_pr
,
227 AMDGPU_FENCE_OWNER_UNDEFINED
,
228 num_dw
* 4 + num_bytes
,
229 AMDGPU_IB_POOL_DELAYED
, &job
);
233 src_addr
= num_dw
* 4;
234 src_addr
+= job
->ibs
[0].gpu_addr
;
236 dst_addr
= amdgpu_bo_gpu_offset(adev
->gart
.bo
);
237 dst_addr
+= window
* AMDGPU_GTT_MAX_TRANSFER_SIZE
* 8;
238 amdgpu_emit_copy_buffer(adev
, &job
->ibs
[0], src_addr
,
239 dst_addr
, num_bytes
, 0);
241 amdgpu_ring_pad_ib(ring
, &job
->ibs
[0]);
242 WARN_ON(job
->ibs
[0].length_dw
> num_dw
);
244 flags
= amdgpu_ttm_tt_pte_flags(adev
, bo
->ttm
, mem
);
246 flags
|= AMDGPU_PTE_TMZ
;
248 cpu_addr
= &job
->ibs
[0].ptr
[num_dw
];
250 if (mem
->mem_type
== TTM_PL_TT
) {
251 dma_addr_t
*dma_addr
;
253 dma_addr
= &bo
->ttm
->dma_address
[mm_cur
->start
>> PAGE_SHIFT
];
254 amdgpu_gart_map(adev
, 0, num_pages
, dma_addr
, flags
, cpu_addr
);
256 dma_addr_t dma_address
;
258 dma_address
= mm_cur
->start
;
259 dma_address
+= adev
->vm_manager
.vram_base_offset
;
261 for (i
= 0; i
< num_pages
; ++i
) {
262 amdgpu_gart_map(adev
, i
<< PAGE_SHIFT
, 1, &dma_address
,
264 dma_address
+= PAGE_SIZE
;
268 dma_fence_put(amdgpu_job_submit(job
));
273 * amdgpu_ttm_copy_mem_to_mem - Helper function for copy
274 * @adev: amdgpu device
275 * @src: buffer/address where to read from
276 * @dst: buffer/address where to write to
277 * @size: number of bytes to copy
278 * @tmz: if a secure copy should be used
279 * @resv: resv object to sync to
280 * @f: Returns the last fence if multiple jobs are submitted.
282 * The function copies @size bytes from {src->mem + src->offset} to
283 * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
284 * move and different for a BO to BO copy.
287 int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device
*adev
,
288 const struct amdgpu_copy_mem
*src
,
289 const struct amdgpu_copy_mem
*dst
,
290 uint64_t size
, bool tmz
,
291 struct dma_resv
*resv
,
292 struct dma_fence
**f
)
294 struct amdgpu_ring
*ring
= adev
->mman
.buffer_funcs_ring
;
295 struct amdgpu_res_cursor src_mm
, dst_mm
;
296 struct dma_fence
*fence
= NULL
;
298 uint32_t copy_flags
= 0;
299 struct amdgpu_bo
*abo_src
, *abo_dst
;
301 if (!adev
->mman
.buffer_funcs_enabled
) {
302 DRM_ERROR("Trying to move memory with ring turned off.\n");
306 amdgpu_res_first(src
->mem
, src
->offset
, size
, &src_mm
);
307 amdgpu_res_first(dst
->mem
, dst
->offset
, size
, &dst_mm
);
309 mutex_lock(&adev
->mman
.gtt_window_lock
);
310 while (src_mm
.remaining
) {
311 uint64_t from
, to
, cur_size
, tiling_flags
;
312 uint32_t num_type
, data_format
, max_com
;
313 struct dma_fence
*next
;
315 /* Never copy more than 256MiB at once to avoid a timeout */
316 cur_size
= min3(src_mm
.size
, dst_mm
.size
, 256ULL << 20);
318 /* Map src to window 0 and dst to window 1. */
319 r
= amdgpu_ttm_map_buffer(src
->bo
, src
->mem
, &src_mm
,
320 0, ring
, tmz
, &cur_size
, &from
);
324 r
= amdgpu_ttm_map_buffer(dst
->bo
, dst
->mem
, &dst_mm
,
325 1, ring
, tmz
, &cur_size
, &to
);
329 abo_src
= ttm_to_amdgpu_bo(src
->bo
);
330 abo_dst
= ttm_to_amdgpu_bo(dst
->bo
);
332 copy_flags
|= AMDGPU_COPY_FLAGS_TMZ
;
333 if ((abo_src
->flags
& AMDGPU_GEM_CREATE_GFX12_DCC
) &&
334 (abo_src
->tbo
.resource
->mem_type
== TTM_PL_VRAM
))
335 copy_flags
|= AMDGPU_COPY_FLAGS_READ_DECOMPRESSED
;
336 if ((abo_dst
->flags
& AMDGPU_GEM_CREATE_GFX12_DCC
) &&
337 (dst
->mem
->mem_type
== TTM_PL_VRAM
)) {
338 copy_flags
|= AMDGPU_COPY_FLAGS_WRITE_COMPRESSED
;
339 amdgpu_bo_get_tiling_flags(abo_dst
, &tiling_flags
);
340 max_com
= AMDGPU_TILING_GET(tiling_flags
, GFX12_DCC_MAX_COMPRESSED_BLOCK
);
341 num_type
= AMDGPU_TILING_GET(tiling_flags
, GFX12_DCC_NUMBER_TYPE
);
342 data_format
= AMDGPU_TILING_GET(tiling_flags
, GFX12_DCC_DATA_FORMAT
);
343 copy_flags
|= (AMDGPU_COPY_FLAGS_SET(MAX_COMPRESSED
, max_com
) |
344 AMDGPU_COPY_FLAGS_SET(NUMBER_TYPE
, num_type
) |
345 AMDGPU_COPY_FLAGS_SET(DATA_FORMAT
, data_format
));
348 r
= amdgpu_copy_buffer(ring
, from
, to
, cur_size
, resv
,
349 &next
, false, true, copy_flags
);
353 dma_fence_put(fence
);
356 amdgpu_res_next(&src_mm
, cur_size
);
357 amdgpu_res_next(&dst_mm
, cur_size
);
360 mutex_unlock(&adev
->mman
.gtt_window_lock
);
362 *f
= dma_fence_get(fence
);
363 dma_fence_put(fence
);
368 * amdgpu_move_blit - Copy an entire buffer to another buffer
370 * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to
371 * help move buffers to and from VRAM.
373 static int amdgpu_move_blit(struct ttm_buffer_object
*bo
,
375 struct ttm_resource
*new_mem
,
376 struct ttm_resource
*old_mem
)
378 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->bdev
);
379 struct amdgpu_bo
*abo
= ttm_to_amdgpu_bo(bo
);
380 struct amdgpu_copy_mem src
, dst
;
381 struct dma_fence
*fence
= NULL
;
391 r
= amdgpu_ttm_copy_mem_to_mem(adev
, &src
, &dst
,
393 amdgpu_bo_encrypted(abo
),
394 bo
->base
.resv
, &fence
);
398 /* clear the space being freed */
399 if (old_mem
->mem_type
== TTM_PL_VRAM
&&
400 (abo
->flags
& AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE
)) {
401 struct dma_fence
*wipe_fence
= NULL
;
403 r
= amdgpu_fill_buffer(abo
, 0, NULL
, &wipe_fence
,
407 } else if (wipe_fence
) {
408 amdgpu_vram_mgr_set_cleared(bo
->resource
);
409 dma_fence_put(fence
);
414 /* Always block for VM page tables before committing the new location */
415 if (bo
->type
== ttm_bo_type_kernel
)
416 r
= ttm_bo_move_accel_cleanup(bo
, fence
, true, false, new_mem
);
418 r
= ttm_bo_move_accel_cleanup(bo
, fence
, evict
, true, new_mem
);
419 dma_fence_put(fence
);
424 dma_fence_wait(fence
, false);
425 dma_fence_put(fence
);
430 * amdgpu_res_cpu_visible - Check that resource can be accessed by CPU
431 * @adev: amdgpu device
432 * @res: the resource to check
434 * Returns: true if the full resource is CPU visible, false otherwise.
436 bool amdgpu_res_cpu_visible(struct amdgpu_device
*adev
,
437 struct ttm_resource
*res
)
439 struct amdgpu_res_cursor cursor
;
444 if (res
->mem_type
== TTM_PL_SYSTEM
|| res
->mem_type
== TTM_PL_TT
||
445 res
->mem_type
== AMDGPU_PL_PREEMPT
|| res
->mem_type
== AMDGPU_PL_DOORBELL
)
448 if (res
->mem_type
!= TTM_PL_VRAM
)
451 amdgpu_res_first(res
, 0, res
->size
, &cursor
);
452 while (cursor
.remaining
) {
453 if ((cursor
.start
+ cursor
.size
) > adev
->gmc
.visible_vram_size
)
455 amdgpu_res_next(&cursor
, cursor
.size
);
462 * amdgpu_res_copyable - Check that memory can be accessed by ttm_bo_move_memcpy
464 * Called by amdgpu_bo_move()
466 static bool amdgpu_res_copyable(struct amdgpu_device
*adev
,
467 struct ttm_resource
*mem
)
469 if (!amdgpu_res_cpu_visible(adev
, mem
))
472 /* ttm_resource_ioremap only supports contiguous memory */
473 if (mem
->mem_type
== TTM_PL_VRAM
&&
474 !(mem
->placement
& TTM_PL_FLAG_CONTIGUOUS
))
481 * amdgpu_bo_move - Move a buffer object to a new memory location
483 * Called by ttm_bo_handle_move_mem()
485 static int amdgpu_bo_move(struct ttm_buffer_object
*bo
, bool evict
,
486 struct ttm_operation_ctx
*ctx
,
487 struct ttm_resource
*new_mem
,
488 struct ttm_place
*hop
)
490 struct amdgpu_device
*adev
;
491 struct amdgpu_bo
*abo
;
492 struct ttm_resource
*old_mem
= bo
->resource
;
495 if (new_mem
->mem_type
== TTM_PL_TT
||
496 new_mem
->mem_type
== AMDGPU_PL_PREEMPT
) {
497 r
= amdgpu_ttm_backend_bind(bo
->bdev
, bo
->ttm
, new_mem
);
502 abo
= ttm_to_amdgpu_bo(bo
);
503 adev
= amdgpu_ttm_adev(bo
->bdev
);
505 if (!old_mem
|| (old_mem
->mem_type
== TTM_PL_SYSTEM
&&
507 amdgpu_bo_move_notify(bo
, evict
, new_mem
);
508 ttm_bo_move_null(bo
, new_mem
);
511 if (old_mem
->mem_type
== TTM_PL_SYSTEM
&&
512 (new_mem
->mem_type
== TTM_PL_TT
||
513 new_mem
->mem_type
== AMDGPU_PL_PREEMPT
)) {
514 amdgpu_bo_move_notify(bo
, evict
, new_mem
);
515 ttm_bo_move_null(bo
, new_mem
);
518 if ((old_mem
->mem_type
== TTM_PL_TT
||
519 old_mem
->mem_type
== AMDGPU_PL_PREEMPT
) &&
520 new_mem
->mem_type
== TTM_PL_SYSTEM
) {
521 r
= ttm_bo_wait_ctx(bo
, ctx
);
525 amdgpu_ttm_backend_unbind(bo
->bdev
, bo
->ttm
);
526 amdgpu_bo_move_notify(bo
, evict
, new_mem
);
527 ttm_resource_free(bo
, &bo
->resource
);
528 ttm_bo_assign_mem(bo
, new_mem
);
532 if (old_mem
->mem_type
== AMDGPU_PL_GDS
||
533 old_mem
->mem_type
== AMDGPU_PL_GWS
||
534 old_mem
->mem_type
== AMDGPU_PL_OA
||
535 old_mem
->mem_type
== AMDGPU_PL_DOORBELL
||
536 new_mem
->mem_type
== AMDGPU_PL_GDS
||
537 new_mem
->mem_type
== AMDGPU_PL_GWS
||
538 new_mem
->mem_type
== AMDGPU_PL_OA
||
539 new_mem
->mem_type
== AMDGPU_PL_DOORBELL
) {
540 /* Nothing to save here */
541 amdgpu_bo_move_notify(bo
, evict
, new_mem
);
542 ttm_bo_move_null(bo
, new_mem
);
546 if (bo
->type
== ttm_bo_type_device
&&
547 new_mem
->mem_type
== TTM_PL_VRAM
&&
548 old_mem
->mem_type
!= TTM_PL_VRAM
) {
549 /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
550 * accesses the BO after it's moved.
552 abo
->flags
&= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
;
555 if (adev
->mman
.buffer_funcs_enabled
&&
556 ((old_mem
->mem_type
== TTM_PL_SYSTEM
&&
557 new_mem
->mem_type
== TTM_PL_VRAM
) ||
558 (old_mem
->mem_type
== TTM_PL_VRAM
&&
559 new_mem
->mem_type
== TTM_PL_SYSTEM
))) {
562 hop
->mem_type
= TTM_PL_TT
;
563 hop
->flags
= TTM_PL_FLAG_TEMPORARY
;
567 amdgpu_bo_move_notify(bo
, evict
, new_mem
);
568 if (adev
->mman
.buffer_funcs_enabled
)
569 r
= amdgpu_move_blit(bo
, evict
, new_mem
, old_mem
);
574 /* Check that all memory is CPU accessible */
575 if (!amdgpu_res_copyable(adev
, old_mem
) ||
576 !amdgpu_res_copyable(adev
, new_mem
)) {
577 pr_err("Move buffer fallback to memcpy unavailable\n");
581 r
= ttm_bo_move_memcpy(bo
, ctx
, new_mem
);
586 /* update statistics after the move */
588 atomic64_inc(&adev
->num_evictions
);
589 atomic64_add(bo
->base
.size
, &adev
->num_bytes_moved
);
594 * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault
596 * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
598 static int amdgpu_ttm_io_mem_reserve(struct ttm_device
*bdev
,
599 struct ttm_resource
*mem
)
601 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bdev
);
603 switch (mem
->mem_type
) {
608 case AMDGPU_PL_PREEMPT
:
611 mem
->bus
.offset
= mem
->start
<< PAGE_SHIFT
;
613 if (adev
->mman
.aper_base_kaddr
&&
614 mem
->placement
& TTM_PL_FLAG_CONTIGUOUS
)
615 mem
->bus
.addr
= (u8
*)adev
->mman
.aper_base_kaddr
+
618 mem
->bus
.offset
+= adev
->gmc
.aper_base
;
619 mem
->bus
.is_iomem
= true;
621 case AMDGPU_PL_DOORBELL
:
622 mem
->bus
.offset
= mem
->start
<< PAGE_SHIFT
;
623 mem
->bus
.offset
+= adev
->doorbell
.base
;
624 mem
->bus
.is_iomem
= true;
625 mem
->bus
.caching
= ttm_uncached
;
633 static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object
*bo
,
634 unsigned long page_offset
)
636 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->bdev
);
637 struct amdgpu_res_cursor cursor
;
639 amdgpu_res_first(bo
->resource
, (u64
)page_offset
<< PAGE_SHIFT
, 0,
642 if (bo
->resource
->mem_type
== AMDGPU_PL_DOORBELL
)
643 return ((uint64_t)(adev
->doorbell
.base
+ cursor
.start
)) >> PAGE_SHIFT
;
645 return (adev
->gmc
.aper_base
+ cursor
.start
) >> PAGE_SHIFT
;
649 * amdgpu_ttm_domain_start - Returns GPU start address
650 * @adev: amdgpu device object
651 * @type: type of the memory
654 * GPU start address of a memory domain
657 uint64_t amdgpu_ttm_domain_start(struct amdgpu_device
*adev
, uint32_t type
)
661 return adev
->gmc
.gart_start
;
663 return adev
->gmc
.vram_start
;
670 * TTM backend functions.
672 struct amdgpu_ttm_tt
{
674 struct drm_gem_object
*gobj
;
677 struct task_struct
*usertask
;
683 #define ttm_to_amdgpu_ttm_tt(ptr) container_of(ptr, struct amdgpu_ttm_tt, ttm)
685 #ifdef CONFIG_DRM_AMDGPU_USERPTR
687 * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
688 * memory and start HMM tracking CPU page table update
690 * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
691 * once afterwards to stop HMM tracking
693 int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo
*bo
, struct page
**pages
,
694 struct hmm_range
**range
)
696 struct ttm_tt
*ttm
= bo
->tbo
.ttm
;
697 struct amdgpu_ttm_tt
*gtt
= ttm_to_amdgpu_ttm_tt(ttm
);
698 unsigned long start
= gtt
->userptr
;
699 struct vm_area_struct
*vma
;
700 struct mm_struct
*mm
;
704 /* Make sure get_user_pages_done() can cleanup gracefully */
707 mm
= bo
->notifier
.mm
;
709 DRM_DEBUG_DRIVER("BO is not registered?\n");
713 if (!mmget_not_zero(mm
)) /* Happens during process shutdown */
717 vma
= vma_lookup(mm
, start
);
718 if (unlikely(!vma
)) {
722 if (unlikely((gtt
->userflags
& AMDGPU_GEM_USERPTR_ANONONLY
) &&
728 readonly
= amdgpu_ttm_tt_is_readonly(ttm
);
729 r
= amdgpu_hmm_range_get_pages(&bo
->notifier
, start
, ttm
->num_pages
,
730 readonly
, NULL
, pages
, range
);
732 mmap_read_unlock(mm
);
734 pr_debug("failed %d to get user pages 0x%lx\n", r
, start
);
741 /* amdgpu_ttm_tt_discard_user_pages - Discard range and pfn array allocations
743 void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt
*ttm
,
744 struct hmm_range
*range
)
746 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
748 if (gtt
&& gtt
->userptr
&& range
)
749 amdgpu_hmm_range_get_pages_done(range
);
753 * amdgpu_ttm_tt_get_user_pages_done - stop HMM track the CPU page table change
754 * Check if the pages backing this ttm range have been invalidated
756 * Returns: true if pages are still valid
758 bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt
*ttm
,
759 struct hmm_range
*range
)
761 struct amdgpu_ttm_tt
*gtt
= ttm_to_amdgpu_ttm_tt(ttm
);
763 if (!gtt
|| !gtt
->userptr
|| !range
)
766 DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n",
767 gtt
->userptr
, ttm
->num_pages
);
769 WARN_ONCE(!range
->hmm_pfns
, "No user pages to check\n");
771 return !amdgpu_hmm_range_get_pages_done(range
);
776 * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
778 * Called by amdgpu_cs_list_validate(). This creates the page list
779 * that backs user memory and will ultimately be mapped into the device
782 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt
*ttm
, struct page
**pages
)
786 for (i
= 0; i
< ttm
->num_pages
; ++i
)
787 ttm
->pages
[i
] = pages
? pages
[i
] : NULL
;
791 * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages
793 * Called by amdgpu_ttm_backend_bind()
795 static int amdgpu_ttm_tt_pin_userptr(struct ttm_device
*bdev
,
798 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bdev
);
799 struct amdgpu_ttm_tt
*gtt
= ttm_to_amdgpu_ttm_tt(ttm
);
800 int write
= !(gtt
->userflags
& AMDGPU_GEM_USERPTR_READONLY
);
801 enum dma_data_direction direction
= write
?
802 DMA_BIDIRECTIONAL
: DMA_TO_DEVICE
;
805 /* Allocate an SG array and squash pages into it */
806 r
= sg_alloc_table_from_pages(ttm
->sg
, ttm
->pages
, ttm
->num_pages
, 0,
807 (u64
)ttm
->num_pages
<< PAGE_SHIFT
,
812 /* Map SG to device */
813 r
= dma_map_sgtable(adev
->dev
, ttm
->sg
, direction
, 0);
815 goto release_sg_table
;
817 /* convert SG to linear array of pages and dma addresses */
818 drm_prime_sg_to_dma_addr_array(ttm
->sg
, gtt
->ttm
.dma_address
,
824 sg_free_table(ttm
->sg
);
832 * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
834 static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device
*bdev
,
837 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bdev
);
838 struct amdgpu_ttm_tt
*gtt
= ttm_to_amdgpu_ttm_tt(ttm
);
839 int write
= !(gtt
->userflags
& AMDGPU_GEM_USERPTR_READONLY
);
840 enum dma_data_direction direction
= write
?
841 DMA_BIDIRECTIONAL
: DMA_TO_DEVICE
;
843 /* double check that we don't free the table twice */
844 if (!ttm
->sg
|| !ttm
->sg
->sgl
)
847 /* unmap the pages mapped to the device */
848 dma_unmap_sgtable(adev
->dev
, ttm
->sg
, direction
, 0);
849 sg_free_table(ttm
->sg
);
853 * total_pages is constructed as MQD0+CtrlStack0 + MQD1+CtrlStack1 + ...
854 * MQDn+CtrlStackn where n is the number of XCCs per partition.
855 * pages_per_xcc is the size of one MQD+CtrlStack. The first page is MQD
856 * and uses memory type default, UC. The rest of pages_per_xcc are
857 * Ctrl stack and modify their memory type to NC.
859 static void amdgpu_ttm_gart_bind_gfx9_mqd(struct amdgpu_device
*adev
,
860 struct ttm_tt
*ttm
, uint64_t flags
)
862 struct amdgpu_ttm_tt
*gtt
= (void *)ttm
;
863 uint64_t total_pages
= ttm
->num_pages
;
864 int num_xcc
= max(1U, adev
->gfx
.num_xcc_per_xcp
);
865 uint64_t page_idx
, pages_per_xcc
;
867 uint64_t ctrl_flags
= AMDGPU_PTE_MTYPE_VG10(flags
, AMDGPU_MTYPE_NC
);
869 pages_per_xcc
= total_pages
;
870 do_div(pages_per_xcc
, num_xcc
);
872 for (i
= 0, page_idx
= 0; i
< num_xcc
; i
++, page_idx
+= pages_per_xcc
) {
873 /* MQD page: use default flags */
874 amdgpu_gart_bind(adev
,
875 gtt
->offset
+ (page_idx
<< PAGE_SHIFT
),
876 1, >t
->ttm
.dma_address
[page_idx
], flags
);
878 * Ctrl pages - modify the memory type to NC (ctrl_flags) from
879 * the second page of the BO onward.
881 amdgpu_gart_bind(adev
,
882 gtt
->offset
+ ((page_idx
+ 1) << PAGE_SHIFT
),
884 >t
->ttm
.dma_address
[page_idx
+ 1],
889 static void amdgpu_ttm_gart_bind(struct amdgpu_device
*adev
,
890 struct ttm_buffer_object
*tbo
,
893 struct amdgpu_bo
*abo
= ttm_to_amdgpu_bo(tbo
);
894 struct ttm_tt
*ttm
= tbo
->ttm
;
895 struct amdgpu_ttm_tt
*gtt
= ttm_to_amdgpu_ttm_tt(ttm
);
897 if (amdgpu_bo_encrypted(abo
))
898 flags
|= AMDGPU_PTE_TMZ
;
900 if (abo
->flags
& AMDGPU_GEM_CREATE_CP_MQD_GFX9
) {
901 amdgpu_ttm_gart_bind_gfx9_mqd(adev
, ttm
, flags
);
903 amdgpu_gart_bind(adev
, gtt
->offset
, ttm
->num_pages
,
904 gtt
->ttm
.dma_address
, flags
);
910 * amdgpu_ttm_backend_bind - Bind GTT memory
912 * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
913 * This handles binding GTT memory to the device address space.
915 static int amdgpu_ttm_backend_bind(struct ttm_device
*bdev
,
917 struct ttm_resource
*bo_mem
)
919 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bdev
);
920 struct amdgpu_ttm_tt
*gtt
= ttm_to_amdgpu_ttm_tt(ttm
);
931 r
= amdgpu_ttm_tt_pin_userptr(bdev
, ttm
);
933 DRM_ERROR("failed to pin userptr\n");
936 } else if (ttm
->page_flags
& TTM_TT_FLAG_EXTERNAL
) {
938 struct dma_buf_attachment
*attach
;
939 struct sg_table
*sgt
;
941 attach
= gtt
->gobj
->import_attach
;
942 sgt
= dma_buf_map_attachment(attach
, DMA_BIDIRECTIONAL
);
949 drm_prime_sg_to_dma_addr_array(ttm
->sg
, gtt
->ttm
.dma_address
,
953 if (!ttm
->num_pages
) {
954 WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
955 ttm
->num_pages
, bo_mem
, ttm
);
958 if (bo_mem
->mem_type
!= TTM_PL_TT
||
959 !amdgpu_gtt_mgr_has_gart_addr(bo_mem
)) {
960 gtt
->offset
= AMDGPU_BO_INVALID_OFFSET
;
964 /* compute PTE flags relevant to this BO memory */
965 flags
= amdgpu_ttm_tt_pte_flags(adev
, ttm
, bo_mem
);
967 /* bind pages into GART page tables */
968 gtt
->offset
= (u64
)bo_mem
->start
<< PAGE_SHIFT
;
969 amdgpu_gart_bind(adev
, gtt
->offset
, ttm
->num_pages
,
970 gtt
->ttm
.dma_address
, flags
);
976 * amdgpu_ttm_alloc_gart - Make sure buffer object is accessible either
977 * through AGP or GART aperture.
979 * If bo is accessible through AGP aperture, then use AGP aperture
980 * to access bo; otherwise allocate logical space in GART aperture
981 * and map bo to GART aperture.
983 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object
*bo
)
985 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->bdev
);
986 struct ttm_operation_ctx ctx
= { false, false };
987 struct amdgpu_ttm_tt
*gtt
= ttm_to_amdgpu_ttm_tt(bo
->ttm
);
988 struct ttm_placement placement
;
989 struct ttm_place placements
;
990 struct ttm_resource
*tmp
;
991 uint64_t addr
, flags
;
994 if (bo
->resource
->start
!= AMDGPU_BO_INVALID_OFFSET
)
997 addr
= amdgpu_gmc_agp_addr(bo
);
998 if (addr
!= AMDGPU_BO_INVALID_OFFSET
)
1001 /* allocate GART space */
1002 placement
.num_placement
= 1;
1003 placement
.placement
= &placements
;
1004 placements
.fpfn
= 0;
1005 placements
.lpfn
= adev
->gmc
.gart_size
>> PAGE_SHIFT
;
1006 placements
.mem_type
= TTM_PL_TT
;
1007 placements
.flags
= bo
->resource
->placement
;
1009 r
= ttm_bo_mem_space(bo
, &placement
, &tmp
, &ctx
);
1013 /* compute PTE flags for this buffer object */
1014 flags
= amdgpu_ttm_tt_pte_flags(adev
, bo
->ttm
, tmp
);
1017 gtt
->offset
= (u64
)tmp
->start
<< PAGE_SHIFT
;
1018 amdgpu_ttm_gart_bind(adev
, bo
, flags
);
1019 amdgpu_gart_invalidate_tlb(adev
);
1020 ttm_resource_free(bo
, &bo
->resource
);
1021 ttm_bo_assign_mem(bo
, tmp
);
1027 * amdgpu_ttm_recover_gart - Rebind GTT pages
1029 * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to
1030 * rebind GTT pages during a GPU reset.
1032 void amdgpu_ttm_recover_gart(struct ttm_buffer_object
*tbo
)
1034 struct amdgpu_device
*adev
= amdgpu_ttm_adev(tbo
->bdev
);
1040 flags
= amdgpu_ttm_tt_pte_flags(adev
, tbo
->ttm
, tbo
->resource
);
1041 amdgpu_ttm_gart_bind(adev
, tbo
, flags
);
1045 * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages
1047 * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
1050 static void amdgpu_ttm_backend_unbind(struct ttm_device
*bdev
,
1053 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bdev
);
1054 struct amdgpu_ttm_tt
*gtt
= ttm_to_amdgpu_ttm_tt(ttm
);
1056 /* if the pages have userptr pinning then clear that first */
1058 amdgpu_ttm_tt_unpin_userptr(bdev
, ttm
);
1059 } else if (ttm
->sg
&& gtt
->gobj
->import_attach
) {
1060 struct dma_buf_attachment
*attach
;
1062 attach
= gtt
->gobj
->import_attach
;
1063 dma_buf_unmap_attachment(attach
, ttm
->sg
, DMA_BIDIRECTIONAL
);
1070 if (gtt
->offset
== AMDGPU_BO_INVALID_OFFSET
)
1073 /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
1074 amdgpu_gart_unbind(adev
, gtt
->offset
, ttm
->num_pages
);
1078 static void amdgpu_ttm_backend_destroy(struct ttm_device
*bdev
,
1081 struct amdgpu_ttm_tt
*gtt
= ttm_to_amdgpu_ttm_tt(ttm
);
1084 put_task_struct(gtt
->usertask
);
1086 ttm_tt_fini(>t
->ttm
);
1091 * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO
1093 * @bo: The buffer object to create a GTT ttm_tt object around
1094 * @page_flags: Page flags to be added to the ttm_tt object
1096 * Called by ttm_tt_create().
1098 static struct ttm_tt
*amdgpu_ttm_tt_create(struct ttm_buffer_object
*bo
,
1099 uint32_t page_flags
)
1101 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->bdev
);
1102 struct amdgpu_bo
*abo
= ttm_to_amdgpu_bo(bo
);
1103 struct amdgpu_ttm_tt
*gtt
;
1104 enum ttm_caching caching
;
1106 gtt
= kzalloc(sizeof(struct amdgpu_ttm_tt
), GFP_KERNEL
);
1110 gtt
->gobj
= &bo
->base
;
1111 if (adev
->gmc
.mem_partitions
&& abo
->xcp_id
>= 0)
1112 gtt
->pool_id
= KFD_XCP_MEM_ID(adev
, abo
->xcp_id
);
1114 gtt
->pool_id
= abo
->xcp_id
;
1116 if (abo
->flags
& AMDGPU_GEM_CREATE_CPU_GTT_USWC
)
1117 caching
= ttm_write_combined
;
1119 caching
= ttm_cached
;
1121 /* allocate space for the uninitialized page entries */
1122 if (ttm_sg_tt_init(>t
->ttm
, bo
, page_flags
, caching
)) {
1130 * amdgpu_ttm_tt_populate - Map GTT pages visible to the device
1132 * Map the pages of a ttm_tt object to an address space visible
1133 * to the underlying device.
1135 static int amdgpu_ttm_tt_populate(struct ttm_device
*bdev
,
1137 struct ttm_operation_ctx
*ctx
)
1139 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bdev
);
1140 struct amdgpu_ttm_tt
*gtt
= ttm_to_amdgpu_ttm_tt(ttm
);
1141 struct ttm_pool
*pool
;
1145 /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
1147 ttm
->sg
= kzalloc(sizeof(struct sg_table
), GFP_KERNEL
);
1153 if (ttm
->page_flags
& TTM_TT_FLAG_EXTERNAL
)
1156 if (adev
->mman
.ttm_pools
&& gtt
->pool_id
>= 0)
1157 pool
= &adev
->mman
.ttm_pools
[gtt
->pool_id
];
1159 pool
= &adev
->mman
.bdev
.pool
;
1160 ret
= ttm_pool_alloc(pool
, ttm
, ctx
);
1164 for (i
= 0; i
< ttm
->num_pages
; ++i
)
1165 ttm
->pages
[i
]->mapping
= bdev
->dev_mapping
;
1171 * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays
1173 * Unmaps pages of a ttm_tt object from the device address space and
1174 * unpopulates the page array backing it.
1176 static void amdgpu_ttm_tt_unpopulate(struct ttm_device
*bdev
,
1179 struct amdgpu_ttm_tt
*gtt
= ttm_to_amdgpu_ttm_tt(ttm
);
1180 struct amdgpu_device
*adev
;
1181 struct ttm_pool
*pool
;
1184 amdgpu_ttm_backend_unbind(bdev
, ttm
);
1187 amdgpu_ttm_tt_set_user_pages(ttm
, NULL
);
1193 if (ttm
->page_flags
& TTM_TT_FLAG_EXTERNAL
)
1196 for (i
= 0; i
< ttm
->num_pages
; ++i
)
1197 ttm
->pages
[i
]->mapping
= NULL
;
1199 adev
= amdgpu_ttm_adev(bdev
);
1201 if (adev
->mman
.ttm_pools
&& gtt
->pool_id
>= 0)
1202 pool
= &adev
->mman
.ttm_pools
[gtt
->pool_id
];
1204 pool
= &adev
->mman
.bdev
.pool
;
1206 return ttm_pool_free(pool
, ttm
);
1210 * amdgpu_ttm_tt_get_userptr - Return the userptr GTT ttm_tt for the current
1213 * @tbo: The ttm_buffer_object that contains the userptr
1214 * @user_addr: The returned value
1216 int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object
*tbo
,
1217 uint64_t *user_addr
)
1219 struct amdgpu_ttm_tt
*gtt
;
1224 gtt
= (void *)tbo
->ttm
;
1225 *user_addr
= gtt
->userptr
;
1230 * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
1233 * @bo: The ttm_buffer_object to bind this userptr to
1234 * @addr: The address in the current tasks VM space to use
1235 * @flags: Requirements of userptr object.
1237 * Called by amdgpu_gem_userptr_ioctl() and kfd_ioctl_alloc_memory_of_gpu() to
1238 * bind userptr pages to current task and by kfd_ioctl_acquire_vm() to
1239 * initialize GPU VM for a KFD process.
1241 int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object
*bo
,
1242 uint64_t addr
, uint32_t flags
)
1244 struct amdgpu_ttm_tt
*gtt
;
1247 /* TODO: We want a separate TTM object type for userptrs */
1248 bo
->ttm
= amdgpu_ttm_tt_create(bo
, 0);
1249 if (bo
->ttm
== NULL
)
1253 /* Set TTM_TT_FLAG_EXTERNAL before populate but after create. */
1254 bo
->ttm
->page_flags
|= TTM_TT_FLAG_EXTERNAL
;
1256 gtt
= ttm_to_amdgpu_ttm_tt(bo
->ttm
);
1257 gtt
->userptr
= addr
;
1258 gtt
->userflags
= flags
;
1261 put_task_struct(gtt
->usertask
);
1262 gtt
->usertask
= current
->group_leader
;
1263 get_task_struct(gtt
->usertask
);
1269 * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object
1271 struct mm_struct
*amdgpu_ttm_tt_get_usermm(struct ttm_tt
*ttm
)
1273 struct amdgpu_ttm_tt
*gtt
= ttm_to_amdgpu_ttm_tt(ttm
);
1278 if (gtt
->usertask
== NULL
)
1281 return gtt
->usertask
->mm
;
1285 * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an
1286 * address range for the current task.
1289 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt
*ttm
, unsigned long start
,
1290 unsigned long end
, unsigned long *userptr
)
1292 struct amdgpu_ttm_tt
*gtt
= ttm_to_amdgpu_ttm_tt(ttm
);
1295 if (gtt
== NULL
|| !gtt
->userptr
)
1298 /* Return false if no part of the ttm_tt object lies within
1301 size
= (unsigned long)gtt
->ttm
.num_pages
* PAGE_SIZE
;
1302 if (gtt
->userptr
> end
|| gtt
->userptr
+ size
<= start
)
1306 *userptr
= gtt
->userptr
;
1311 * amdgpu_ttm_tt_is_userptr - Have the pages backing by userptr?
1313 bool amdgpu_ttm_tt_is_userptr(struct ttm_tt
*ttm
)
1315 struct amdgpu_ttm_tt
*gtt
= ttm_to_amdgpu_ttm_tt(ttm
);
1317 if (gtt
== NULL
|| !gtt
->userptr
)
1324 * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only?
1326 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt
*ttm
)
1328 struct amdgpu_ttm_tt
*gtt
= ttm_to_amdgpu_ttm_tt(ttm
);
1333 return !!(gtt
->userflags
& AMDGPU_GEM_USERPTR_READONLY
);
1337 * amdgpu_ttm_tt_pde_flags - Compute PDE flags for ttm_tt object
1339 * @ttm: The ttm_tt object to compute the flags for
1340 * @mem: The memory registry backing this ttm_tt object
1342 * Figure out the flags to use for a VM PDE (Page Directory Entry).
1344 uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt
*ttm
, struct ttm_resource
*mem
)
1348 if (mem
&& mem
->mem_type
!= TTM_PL_SYSTEM
)
1349 flags
|= AMDGPU_PTE_VALID
;
1351 if (mem
&& (mem
->mem_type
== TTM_PL_TT
||
1352 mem
->mem_type
== AMDGPU_PL_DOORBELL
||
1353 mem
->mem_type
== AMDGPU_PL_PREEMPT
)) {
1354 flags
|= AMDGPU_PTE_SYSTEM
;
1356 if (ttm
->caching
== ttm_cached
)
1357 flags
|= AMDGPU_PTE_SNOOPED
;
1360 if (mem
&& mem
->mem_type
== TTM_PL_VRAM
&&
1361 mem
->bus
.caching
== ttm_cached
)
1362 flags
|= AMDGPU_PTE_SNOOPED
;
1368 * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object
1370 * @adev: amdgpu_device pointer
1371 * @ttm: The ttm_tt object to compute the flags for
1372 * @mem: The memory registry backing this ttm_tt object
1374 * Figure out the flags to use for a VM PTE (Page Table Entry).
1376 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device
*adev
, struct ttm_tt
*ttm
,
1377 struct ttm_resource
*mem
)
1379 uint64_t flags
= amdgpu_ttm_tt_pde_flags(ttm
, mem
);
1381 flags
|= adev
->gart
.gart_pte_flags
;
1382 flags
|= AMDGPU_PTE_READABLE
;
1384 if (!amdgpu_ttm_tt_is_readonly(ttm
))
1385 flags
|= AMDGPU_PTE_WRITEABLE
;
1391 * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer
1394 * Return true if eviction is sensible. Called by ttm_mem_evict_first() on
1395 * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until
1396 * it can find space for a new object and by ttm_bo_force_list_clean() which is
1397 * used to clean out a memory space.
1399 static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object
*bo
,
1400 const struct ttm_place
*place
)
1402 struct dma_resv_iter resv_cursor
;
1403 struct dma_fence
*f
;
1405 if (!amdgpu_bo_is_amdgpu_bo(bo
))
1406 return ttm_bo_eviction_valuable(bo
, place
);
1409 if (bo
->resource
->mem_type
== TTM_PL_SYSTEM
)
1412 if (bo
->type
== ttm_bo_type_kernel
&&
1413 !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo
)))
1416 /* If bo is a KFD BO, check if the bo belongs to the current process.
1417 * If true, then return false as any KFD process needs all its BOs to
1418 * be resident to run successfully
1420 dma_resv_for_each_fence(&resv_cursor
, bo
->base
.resv
,
1421 DMA_RESV_USAGE_BOOKKEEP
, f
) {
1422 if (amdkfd_fence_check_mm(f
, current
->mm
) &&
1423 !(place
->flags
& TTM_PL_FLAG_CONTIGUOUS
))
1427 /* Preemptible BOs don't own system resources managed by the
1428 * driver (pages, VRAM, GART space). They point to resources
1429 * owned by someone else (e.g. pageable memory in user mode
1430 * or a DMABuf). They are used in a preemptible context so we
1431 * can guarantee no deadlocks and good QoS in case of MMU
1432 * notifiers or DMABuf move notifiers from the resource owner.
1434 if (bo
->resource
->mem_type
== AMDGPU_PL_PREEMPT
)
1437 if (bo
->resource
->mem_type
== TTM_PL_TT
&&
1438 amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo
)))
1441 return ttm_bo_eviction_valuable(bo
, place
);
1444 static void amdgpu_ttm_vram_mm_access(struct amdgpu_device
*adev
, loff_t pos
,
1445 void *buf
, size_t size
, bool write
)
1448 uint64_t aligned_pos
= ALIGN_DOWN(pos
, 4);
1449 uint64_t bytes
= 4 - (pos
& 0x3);
1450 uint32_t shift
= (pos
& 0x3) * 8;
1451 uint32_t mask
= 0xffffffff << shift
;
1455 mask
&= 0xffffffff >> (bytes
- size
) * 8;
1459 if (mask
!= 0xffffffff) {
1460 amdgpu_device_mm_access(adev
, aligned_pos
, &value
, 4, false);
1463 value
|= (*(uint32_t *)buf
<< shift
) & mask
;
1464 amdgpu_device_mm_access(adev
, aligned_pos
, &value
, 4, true);
1466 value
= (value
& mask
) >> shift
;
1467 memcpy(buf
, &value
, bytes
);
1470 amdgpu_device_mm_access(adev
, aligned_pos
, buf
, 4, write
);
1479 static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object
*bo
,
1480 unsigned long offset
, void *buf
,
1483 struct amdgpu_bo
*abo
= ttm_to_amdgpu_bo(bo
);
1484 struct amdgpu_device
*adev
= amdgpu_ttm_adev(abo
->tbo
.bdev
);
1485 struct amdgpu_res_cursor src_mm
;
1486 struct amdgpu_job
*job
;
1487 struct dma_fence
*fence
;
1488 uint64_t src_addr
, dst_addr
;
1489 unsigned int num_dw
;
1492 if (len
!= PAGE_SIZE
)
1495 if (!adev
->mman
.sdma_access_ptr
)
1498 if (!drm_dev_enter(adev_to_drm(adev
), &idx
))
1502 memcpy(adev
->mman
.sdma_access_ptr
, buf
, len
);
1504 num_dw
= ALIGN(adev
->mman
.buffer_funcs
->copy_num_dw
, 8);
1505 r
= amdgpu_job_alloc_with_ib(adev
, &adev
->mman
.high_pr
,
1506 AMDGPU_FENCE_OWNER_UNDEFINED
,
1507 num_dw
* 4, AMDGPU_IB_POOL_DELAYED
,
1512 amdgpu_res_first(abo
->tbo
.resource
, offset
, len
, &src_mm
);
1513 src_addr
= amdgpu_ttm_domain_start(adev
, bo
->resource
->mem_type
) +
1515 dst_addr
= amdgpu_bo_gpu_offset(adev
->mman
.sdma_access_bo
);
1517 swap(src_addr
, dst_addr
);
1519 amdgpu_emit_copy_buffer(adev
, &job
->ibs
[0], src_addr
, dst_addr
,
1522 amdgpu_ring_pad_ib(adev
->mman
.buffer_funcs_ring
, &job
->ibs
[0]);
1523 WARN_ON(job
->ibs
[0].length_dw
> num_dw
);
1525 fence
= amdgpu_job_submit(job
);
1527 if (!dma_fence_wait_timeout(fence
, false, adev
->sdma_timeout
))
1529 dma_fence_put(fence
);
1532 memcpy(buf
, adev
->mman
.sdma_access_ptr
, len
);
1539 * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
1541 * @bo: The buffer object to read/write
1542 * @offset: Offset into buffer object
1543 * @buf: Secondary buffer to write/read from
1544 * @len: Length in bytes of access
1545 * @write: true if writing
1547 * This is used to access VRAM that backs a buffer object via MMIO
1548 * access for debugging purposes.
1550 static int amdgpu_ttm_access_memory(struct ttm_buffer_object
*bo
,
1551 unsigned long offset
, void *buf
, int len
,
1554 struct amdgpu_bo
*abo
= ttm_to_amdgpu_bo(bo
);
1555 struct amdgpu_device
*adev
= amdgpu_ttm_adev(abo
->tbo
.bdev
);
1556 struct amdgpu_res_cursor cursor
;
1559 if (bo
->resource
->mem_type
!= TTM_PL_VRAM
)
1562 if (amdgpu_device_has_timeouts_enabled(adev
) &&
1563 !amdgpu_ttm_access_memory_sdma(bo
, offset
, buf
, len
, write
))
1566 amdgpu_res_first(bo
->resource
, offset
, len
, &cursor
);
1567 while (cursor
.remaining
) {
1568 size_t count
, size
= cursor
.size
;
1569 loff_t pos
= cursor
.start
;
1571 count
= amdgpu_device_aper_access(adev
, pos
, buf
, size
, write
);
1574 /* using MM to access rest vram and handle un-aligned address */
1577 amdgpu_ttm_vram_mm_access(adev
, pos
, buf
, size
, write
);
1582 amdgpu_res_next(&cursor
, cursor
.size
);
1589 amdgpu_bo_delete_mem_notify(struct ttm_buffer_object
*bo
)
1591 amdgpu_bo_move_notify(bo
, false, NULL
);
1594 static struct ttm_device_funcs amdgpu_bo_driver
= {
1595 .ttm_tt_create
= &amdgpu_ttm_tt_create
,
1596 .ttm_tt_populate
= &amdgpu_ttm_tt_populate
,
1597 .ttm_tt_unpopulate
= &amdgpu_ttm_tt_unpopulate
,
1598 .ttm_tt_destroy
= &amdgpu_ttm_backend_destroy
,
1599 .eviction_valuable
= amdgpu_ttm_bo_eviction_valuable
,
1600 .evict_flags
= &amdgpu_evict_flags
,
1601 .move
= &amdgpu_bo_move
,
1602 .delete_mem_notify
= &amdgpu_bo_delete_mem_notify
,
1603 .release_notify
= &amdgpu_bo_release_notify
,
1604 .io_mem_reserve
= &amdgpu_ttm_io_mem_reserve
,
1605 .io_mem_pfn
= amdgpu_ttm_io_mem_pfn
,
1606 .access_memory
= &amdgpu_ttm_access_memory
,
1610 * Firmware Reservation functions
1613 * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram
1615 * @adev: amdgpu_device pointer
1617 * free fw reserved vram if it has been reserved.
1619 static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device
*adev
)
1621 amdgpu_bo_free_kernel(&adev
->mman
.fw_vram_usage_reserved_bo
,
1622 NULL
, &adev
->mman
.fw_vram_usage_va
);
1626 * Driver Reservation functions
1629 * amdgpu_ttm_drv_reserve_vram_fini - free drv reserved vram
1631 * @adev: amdgpu_device pointer
1633 * free drv reserved vram if it has been reserved.
1635 static void amdgpu_ttm_drv_reserve_vram_fini(struct amdgpu_device
*adev
)
1637 amdgpu_bo_free_kernel(&adev
->mman
.drv_vram_usage_reserved_bo
,
1639 &adev
->mman
.drv_vram_usage_va
);
1643 * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
1645 * @adev: amdgpu_device pointer
1647 * create bo vram reservation from fw.
1649 static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device
*adev
)
1651 uint64_t vram_size
= adev
->gmc
.visible_vram_size
;
1653 adev
->mman
.fw_vram_usage_va
= NULL
;
1654 adev
->mman
.fw_vram_usage_reserved_bo
= NULL
;
1656 if (adev
->mman
.fw_vram_usage_size
== 0 ||
1657 adev
->mman
.fw_vram_usage_size
> vram_size
)
1660 return amdgpu_bo_create_kernel_at(adev
,
1661 adev
->mman
.fw_vram_usage_start_offset
,
1662 adev
->mman
.fw_vram_usage_size
,
1663 &adev
->mman
.fw_vram_usage_reserved_bo
,
1664 &adev
->mman
.fw_vram_usage_va
);
1668 * amdgpu_ttm_drv_reserve_vram_init - create bo vram reservation from driver
1670 * @adev: amdgpu_device pointer
1672 * create bo vram reservation from drv.
1674 static int amdgpu_ttm_drv_reserve_vram_init(struct amdgpu_device
*adev
)
1676 u64 vram_size
= adev
->gmc
.visible_vram_size
;
1678 adev
->mman
.drv_vram_usage_va
= NULL
;
1679 adev
->mman
.drv_vram_usage_reserved_bo
= NULL
;
1681 if (adev
->mman
.drv_vram_usage_size
== 0 ||
1682 adev
->mman
.drv_vram_usage_size
> vram_size
)
1685 return amdgpu_bo_create_kernel_at(adev
,
1686 adev
->mman
.drv_vram_usage_start_offset
,
1687 adev
->mman
.drv_vram_usage_size
,
1688 &adev
->mman
.drv_vram_usage_reserved_bo
,
1689 &adev
->mman
.drv_vram_usage_va
);
1693 * Memoy training reservation functions
1697 * amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram
1699 * @adev: amdgpu_device pointer
1701 * free memory training reserved vram if it has been reserved.
1703 static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device
*adev
)
1705 struct psp_memory_training_context
*ctx
= &adev
->psp
.mem_train_ctx
;
1707 ctx
->init
= PSP_MEM_TRAIN_NOT_SUPPORT
;
1708 amdgpu_bo_free_kernel(&ctx
->c2p_bo
, NULL
, NULL
);
1714 static void amdgpu_ttm_training_data_block_init(struct amdgpu_device
*adev
,
1715 uint32_t reserve_size
)
1717 struct psp_memory_training_context
*ctx
= &adev
->psp
.mem_train_ctx
;
1719 memset(ctx
, 0, sizeof(*ctx
));
1721 ctx
->c2p_train_data_offset
=
1722 ALIGN((adev
->gmc
.mc_vram_size
- reserve_size
- SZ_1M
), SZ_1M
);
1723 ctx
->p2c_train_data_offset
=
1724 (adev
->gmc
.mc_vram_size
- GDDR6_MEM_TRAINING_OFFSET
);
1725 ctx
->train_data_size
=
1726 GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES
;
1728 DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
1729 ctx
->train_data_size
,
1730 ctx
->p2c_train_data_offset
,
1731 ctx
->c2p_train_data_offset
);
1735 * reserve TMR memory at the top of VRAM which holds
1736 * IP Discovery data and is protected by PSP.
1738 static int amdgpu_ttm_reserve_tmr(struct amdgpu_device
*adev
)
1740 struct psp_memory_training_context
*ctx
= &adev
->psp
.mem_train_ctx
;
1741 bool mem_train_support
= false;
1742 uint32_t reserve_size
= 0;
1745 if (adev
->bios
&& !amdgpu_sriov_vf(adev
)) {
1746 if (amdgpu_atomfirmware_mem_training_supported(adev
))
1747 mem_train_support
= true;
1749 DRM_DEBUG("memory training does not support!\n");
1753 * Query reserved tmr size through atom firmwareinfo for Sienna_Cichlid and onwards for all
1754 * the use cases (IP discovery/G6 memory training/profiling/diagnostic data.etc)
1756 * Otherwise, fallback to legacy approach to check and reserve tmr block for ip
1757 * discovery data and G6 memory training data respectively
1761 amdgpu_atomfirmware_get_fw_reserved_fb_size(adev
);
1764 (amdgpu_ip_version(adev
, GC_HWIP
, 0) == IP_VERSION(9, 4, 3) ||
1765 amdgpu_ip_version(adev
, GC_HWIP
, 0) == IP_VERSION(9, 4, 4)))
1766 reserve_size
= max(reserve_size
, (uint32_t)280 << 20);
1767 else if (!reserve_size
)
1768 reserve_size
= DISCOVERY_TMR_OFFSET
;
1770 if (mem_train_support
) {
1771 /* reserve vram for mem train according to TMR location */
1772 amdgpu_ttm_training_data_block_init(adev
, reserve_size
);
1773 ret
= amdgpu_bo_create_kernel_at(adev
,
1774 ctx
->c2p_train_data_offset
,
1775 ctx
->train_data_size
,
1779 DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret
);
1780 amdgpu_ttm_training_reserve_vram_fini(adev
);
1783 ctx
->init
= PSP_MEM_TRAIN_RESERVE_SUCCESS
;
1786 if (!adev
->gmc
.is_app_apu
) {
1787 ret
= amdgpu_bo_create_kernel_at(
1788 adev
, adev
->gmc
.real_vram_size
- reserve_size
,
1789 reserve_size
, &adev
->mman
.fw_reserved_memory
, NULL
);
1791 DRM_ERROR("alloc tmr failed(%d)!\n", ret
);
1792 amdgpu_bo_free_kernel(&adev
->mman
.fw_reserved_memory
,
1797 DRM_DEBUG_DRIVER("backdoor fw loading path for PSP TMR, no reservation needed\n");
1803 static int amdgpu_ttm_pools_init(struct amdgpu_device
*adev
)
1807 if (!adev
->gmc
.is_app_apu
|| !adev
->gmc
.num_mem_partitions
)
1810 adev
->mman
.ttm_pools
= kcalloc(adev
->gmc
.num_mem_partitions
,
1811 sizeof(*adev
->mman
.ttm_pools
),
1813 if (!adev
->mman
.ttm_pools
)
1816 for (i
= 0; i
< adev
->gmc
.num_mem_partitions
; i
++) {
1817 ttm_pool_init(&adev
->mman
.ttm_pools
[i
], adev
->dev
,
1818 adev
->gmc
.mem_partitions
[i
].numa
.node
,
1824 static void amdgpu_ttm_pools_fini(struct amdgpu_device
*adev
)
1828 if (!adev
->gmc
.is_app_apu
|| !adev
->mman
.ttm_pools
)
1831 for (i
= 0; i
< adev
->gmc
.num_mem_partitions
; i
++)
1832 ttm_pool_fini(&adev
->mman
.ttm_pools
[i
]);
1834 kfree(adev
->mman
.ttm_pools
);
1835 adev
->mman
.ttm_pools
= NULL
;
1839 * amdgpu_ttm_init - Init the memory management (ttm) as well as various
1840 * gtt/vram related fields.
1842 * This initializes all of the memory space pools that the TTM layer
1843 * will need such as the GTT space (system memory mapped to the device),
1844 * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which
1845 * can be mapped per VMID.
1847 int amdgpu_ttm_init(struct amdgpu_device
*adev
)
1852 mutex_init(&adev
->mman
.gtt_window_lock
);
1854 dma_set_max_seg_size(adev
->dev
, UINT_MAX
);
1855 /* No others user of address space so set it to 0 */
1856 r
= ttm_device_init(&adev
->mman
.bdev
, &amdgpu_bo_driver
, adev
->dev
,
1857 adev_to_drm(adev
)->anon_inode
->i_mapping
,
1858 adev_to_drm(adev
)->vma_offset_manager
,
1860 dma_addressing_limited(adev
->dev
));
1862 DRM_ERROR("failed initializing buffer object driver(%d).\n", r
);
1866 r
= amdgpu_ttm_pools_init(adev
);
1868 DRM_ERROR("failed to init ttm pools(%d).\n", r
);
1871 adev
->mman
.initialized
= true;
1873 /* Initialize VRAM pool with all of VRAM divided into pages */
1874 r
= amdgpu_vram_mgr_init(adev
);
1876 DRM_ERROR("Failed initializing VRAM heap.\n");
1880 /* Change the size here instead of the init above so only lpfn is affected */
1881 amdgpu_ttm_set_buffer_funcs_status(adev
, false);
1884 if (adev
->gmc
.xgmi
.connected_to_cpu
)
1885 adev
->mman
.aper_base_kaddr
= ioremap_cache(adev
->gmc
.aper_base
,
1886 adev
->gmc
.visible_vram_size
);
1888 else if (adev
->gmc
.is_app_apu
)
1890 "No need to ioremap when real vram size is 0\n");
1893 adev
->mman
.aper_base_kaddr
= ioremap_wc(adev
->gmc
.aper_base
,
1894 adev
->gmc
.visible_vram_size
);
1898 *The reserved vram for firmware must be pinned to the specified
1899 *place on the VRAM, so reserve it early.
1901 r
= amdgpu_ttm_fw_reserve_vram_init(adev
);
1906 *The reserved vram for driver must be pinned to the specified
1907 *place on the VRAM, so reserve it early.
1909 r
= amdgpu_ttm_drv_reserve_vram_init(adev
);
1914 * only NAVI10 and onwards ASIC support for IP discovery.
1915 * If IP discovery enabled, a block of memory should be
1916 * reserved for IP discovey.
1918 if (adev
->mman
.discovery_bin
) {
1919 r
= amdgpu_ttm_reserve_tmr(adev
);
1924 /* allocate memory as required for VGA
1925 * This is used for VGA emulation and pre-OS scanout buffers to
1926 * avoid display artifacts while transitioning between pre-OS
1929 if (!adev
->gmc
.is_app_apu
) {
1930 r
= amdgpu_bo_create_kernel_at(adev
, 0,
1931 adev
->mman
.stolen_vga_size
,
1932 &adev
->mman
.stolen_vga_memory
,
1937 r
= amdgpu_bo_create_kernel_at(adev
, adev
->mman
.stolen_vga_size
,
1938 adev
->mman
.stolen_extended_size
,
1939 &adev
->mman
.stolen_extended_memory
,
1945 r
= amdgpu_bo_create_kernel_at(adev
,
1946 adev
->mman
.stolen_reserved_offset
,
1947 adev
->mman
.stolen_reserved_size
,
1948 &adev
->mman
.stolen_reserved_memory
,
1953 DRM_DEBUG_DRIVER("Skipped stolen memory reservation\n");
1956 DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
1957 (unsigned int)(adev
->gmc
.real_vram_size
/ (1024 * 1024)));
1959 /* Compute GTT size, either based on TTM limit
1960 * or whatever the user passed on module init.
1962 if (amdgpu_gtt_size
== -1)
1963 gtt_size
= ttm_tt_pages_limit() << PAGE_SHIFT
;
1965 gtt_size
= (uint64_t)amdgpu_gtt_size
<< 20;
1967 /* Initialize GTT memory pool */
1968 r
= amdgpu_gtt_mgr_init(adev
, gtt_size
);
1970 DRM_ERROR("Failed initializing GTT heap.\n");
1973 DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
1974 (unsigned int)(gtt_size
/ (1024 * 1024)));
1976 /* Initialize doorbell pool on PCI BAR */
1977 r
= amdgpu_ttm_init_on_chip(adev
, AMDGPU_PL_DOORBELL
, adev
->doorbell
.size
/ PAGE_SIZE
);
1979 DRM_ERROR("Failed initializing doorbell heap.\n");
1983 /* Create a boorbell page for kernel usages */
1984 r
= amdgpu_doorbell_create_kernel_doorbells(adev
);
1986 DRM_ERROR("Failed to initialize kernel doorbells.\n");
1990 /* Initialize preemptible memory pool */
1991 r
= amdgpu_preempt_mgr_init(adev
);
1993 DRM_ERROR("Failed initializing PREEMPT heap.\n");
1997 /* Initialize various on-chip memory pools */
1998 r
= amdgpu_ttm_init_on_chip(adev
, AMDGPU_PL_GDS
, adev
->gds
.gds_size
);
2000 DRM_ERROR("Failed initializing GDS heap.\n");
2004 r
= amdgpu_ttm_init_on_chip(adev
, AMDGPU_PL_GWS
, adev
->gds
.gws_size
);
2006 DRM_ERROR("Failed initializing gws heap.\n");
2010 r
= amdgpu_ttm_init_on_chip(adev
, AMDGPU_PL_OA
, adev
->gds
.oa_size
);
2012 DRM_ERROR("Failed initializing oa heap.\n");
2015 if (amdgpu_bo_create_kernel(adev
, PAGE_SIZE
, PAGE_SIZE
,
2016 AMDGPU_GEM_DOMAIN_GTT
,
2017 &adev
->mman
.sdma_access_bo
, NULL
,
2018 &adev
->mman
.sdma_access_ptr
))
2019 DRM_WARN("Debug VRAM access will use slowpath MM access\n");
2025 * amdgpu_ttm_fini - De-initialize the TTM memory pools
2027 void amdgpu_ttm_fini(struct amdgpu_device
*adev
)
2031 if (!adev
->mman
.initialized
)
2034 amdgpu_ttm_pools_fini(adev
);
2036 amdgpu_ttm_training_reserve_vram_fini(adev
);
2037 /* return the stolen vga memory back to VRAM */
2038 if (!adev
->gmc
.is_app_apu
) {
2039 amdgpu_bo_free_kernel(&adev
->mman
.stolen_vga_memory
, NULL
, NULL
);
2040 amdgpu_bo_free_kernel(&adev
->mman
.stolen_extended_memory
, NULL
, NULL
);
2041 /* return the FW reserved memory back to VRAM */
2042 amdgpu_bo_free_kernel(&adev
->mman
.fw_reserved_memory
, NULL
,
2044 if (adev
->mman
.stolen_reserved_size
)
2045 amdgpu_bo_free_kernel(&adev
->mman
.stolen_reserved_memory
,
2048 amdgpu_bo_free_kernel(&adev
->mman
.sdma_access_bo
, NULL
,
2049 &adev
->mman
.sdma_access_ptr
);
2050 amdgpu_ttm_fw_reserve_vram_fini(adev
);
2051 amdgpu_ttm_drv_reserve_vram_fini(adev
);
2053 if (drm_dev_enter(adev_to_drm(adev
), &idx
)) {
2055 if (adev
->mman
.aper_base_kaddr
)
2056 iounmap(adev
->mman
.aper_base_kaddr
);
2057 adev
->mman
.aper_base_kaddr
= NULL
;
2062 amdgpu_vram_mgr_fini(adev
);
2063 amdgpu_gtt_mgr_fini(adev
);
2064 amdgpu_preempt_mgr_fini(adev
);
2065 ttm_range_man_fini(&adev
->mman
.bdev
, AMDGPU_PL_GDS
);
2066 ttm_range_man_fini(&adev
->mman
.bdev
, AMDGPU_PL_GWS
);
2067 ttm_range_man_fini(&adev
->mman
.bdev
, AMDGPU_PL_OA
);
2068 ttm_device_fini(&adev
->mman
.bdev
);
2069 adev
->mman
.initialized
= false;
2070 DRM_INFO("amdgpu: ttm finalized\n");
2074 * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
2076 * @adev: amdgpu_device pointer
2077 * @enable: true when we can use buffer functions.
2079 * Enable/disable use of buffer functions during suspend/resume. This should
2080 * only be called at bootup or when userspace isn't running.
2082 void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device
*adev
, bool enable
)
2084 struct ttm_resource_manager
*man
= ttm_manager_type(&adev
->mman
.bdev
, TTM_PL_VRAM
);
2088 if (!adev
->mman
.initialized
|| amdgpu_in_reset(adev
) ||
2089 adev
->mman
.buffer_funcs_enabled
== enable
|| adev
->gmc
.is_app_apu
)
2093 struct amdgpu_ring
*ring
;
2094 struct drm_gpu_scheduler
*sched
;
2096 ring
= adev
->mman
.buffer_funcs_ring
;
2097 sched
= &ring
->sched
;
2098 r
= drm_sched_entity_init(&adev
->mman
.high_pr
,
2099 DRM_SCHED_PRIORITY_KERNEL
, &sched
,
2102 DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
2107 r
= drm_sched_entity_init(&adev
->mman
.low_pr
,
2108 DRM_SCHED_PRIORITY_NORMAL
, &sched
,
2111 DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
2113 goto error_free_entity
;
2116 drm_sched_entity_destroy(&adev
->mman
.high_pr
);
2117 drm_sched_entity_destroy(&adev
->mman
.low_pr
);
2118 dma_fence_put(man
->move
);
2122 /* this just adjusts TTM size idea, which sets lpfn to the correct value */
2124 size
= adev
->gmc
.real_vram_size
;
2126 size
= adev
->gmc
.visible_vram_size
;
2128 adev
->mman
.buffer_funcs_enabled
= enable
;
2133 drm_sched_entity_destroy(&adev
->mman
.high_pr
);
2136 static int amdgpu_ttm_prepare_job(struct amdgpu_device
*adev
,
2138 unsigned int num_dw
,
2139 struct dma_resv
*resv
,
2140 bool vm_needs_flush
,
2141 struct amdgpu_job
**job
,
2144 enum amdgpu_ib_pool_type pool
= direct_submit
?
2145 AMDGPU_IB_POOL_DIRECT
:
2146 AMDGPU_IB_POOL_DELAYED
;
2148 struct drm_sched_entity
*entity
= delayed
? &adev
->mman
.low_pr
:
2149 &adev
->mman
.high_pr
;
2150 r
= amdgpu_job_alloc_with_ib(adev
, entity
,
2151 AMDGPU_FENCE_OWNER_UNDEFINED
,
2152 num_dw
* 4, pool
, job
);
2156 if (vm_needs_flush
) {
2157 (*job
)->vm_pd_addr
= amdgpu_gmc_pd_addr(adev
->gmc
.pdb0_bo
?
2160 (*job
)->vm_needs_flush
= true;
2165 return drm_sched_job_add_resv_dependencies(&(*job
)->base
, resv
,
2166 DMA_RESV_USAGE_BOOKKEEP
);
2169 int amdgpu_copy_buffer(struct amdgpu_ring
*ring
, uint64_t src_offset
,
2170 uint64_t dst_offset
, uint32_t byte_count
,
2171 struct dma_resv
*resv
,
2172 struct dma_fence
**fence
, bool direct_submit
,
2173 bool vm_needs_flush
, uint32_t copy_flags
)
2175 struct amdgpu_device
*adev
= ring
->adev
;
2176 unsigned int num_loops
, num_dw
;
2177 struct amdgpu_job
*job
;
2182 if (!direct_submit
&& !ring
->sched
.ready
) {
2183 DRM_ERROR("Trying to move memory with ring turned off.\n");
2187 max_bytes
= adev
->mman
.buffer_funcs
->copy_max_bytes
;
2188 num_loops
= DIV_ROUND_UP(byte_count
, max_bytes
);
2189 num_dw
= ALIGN(num_loops
* adev
->mman
.buffer_funcs
->copy_num_dw
, 8);
2190 r
= amdgpu_ttm_prepare_job(adev
, direct_submit
, num_dw
,
2191 resv
, vm_needs_flush
, &job
, false);
2195 for (i
= 0; i
< num_loops
; i
++) {
2196 uint32_t cur_size_in_bytes
= min(byte_count
, max_bytes
);
2198 amdgpu_emit_copy_buffer(adev
, &job
->ibs
[0], src_offset
,
2199 dst_offset
, cur_size_in_bytes
, copy_flags
);
2200 src_offset
+= cur_size_in_bytes
;
2201 dst_offset
+= cur_size_in_bytes
;
2202 byte_count
-= cur_size_in_bytes
;
2205 amdgpu_ring_pad_ib(ring
, &job
->ibs
[0]);
2206 WARN_ON(job
->ibs
[0].length_dw
> num_dw
);
2208 r
= amdgpu_job_submit_direct(job
, ring
, fence
);
2210 *fence
= amdgpu_job_submit(job
);
2217 amdgpu_job_free(job
);
2218 DRM_ERROR("Error scheduling IBs (%d)\n", r
);
2222 static int amdgpu_ttm_fill_mem(struct amdgpu_ring
*ring
, uint32_t src_data
,
2223 uint64_t dst_addr
, uint32_t byte_count
,
2224 struct dma_resv
*resv
,
2225 struct dma_fence
**fence
,
2226 bool vm_needs_flush
, bool delayed
)
2228 struct amdgpu_device
*adev
= ring
->adev
;
2229 unsigned int num_loops
, num_dw
;
2230 struct amdgpu_job
*job
;
2235 max_bytes
= adev
->mman
.buffer_funcs
->fill_max_bytes
;
2236 num_loops
= DIV_ROUND_UP_ULL(byte_count
, max_bytes
);
2237 num_dw
= ALIGN(num_loops
* adev
->mman
.buffer_funcs
->fill_num_dw
, 8);
2238 r
= amdgpu_ttm_prepare_job(adev
, false, num_dw
, resv
, vm_needs_flush
,
2243 for (i
= 0; i
< num_loops
; i
++) {
2244 uint32_t cur_size
= min(byte_count
, max_bytes
);
2246 amdgpu_emit_fill_buffer(adev
, &job
->ibs
[0], src_data
, dst_addr
,
2249 dst_addr
+= cur_size
;
2250 byte_count
-= cur_size
;
2253 amdgpu_ring_pad_ib(ring
, &job
->ibs
[0]);
2254 WARN_ON(job
->ibs
[0].length_dw
> num_dw
);
2255 *fence
= amdgpu_job_submit(job
);
2260 * amdgpu_ttm_clear_buffer - clear memory buffers
2261 * @bo: amdgpu buffer object
2262 * @resv: reservation object
2263 * @fence: dma_fence associated with the operation
2265 * Clear the memory buffer resource.
2268 * 0 for success or a negative error code on failure.
2270 int amdgpu_ttm_clear_buffer(struct amdgpu_bo
*bo
,
2271 struct dma_resv
*resv
,
2272 struct dma_fence
**fence
)
2274 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->tbo
.bdev
);
2275 struct amdgpu_ring
*ring
= adev
->mman
.buffer_funcs_ring
;
2276 struct amdgpu_res_cursor cursor
;
2280 if (!adev
->mman
.buffer_funcs_enabled
)
2286 *fence
= dma_fence_get_stub();
2288 amdgpu_res_first(bo
->tbo
.resource
, 0, amdgpu_bo_size(bo
), &cursor
);
2290 mutex_lock(&adev
->mman
.gtt_window_lock
);
2291 while (cursor
.remaining
) {
2292 struct dma_fence
*next
= NULL
;
2295 if (amdgpu_res_cleared(&cursor
)) {
2296 amdgpu_res_next(&cursor
, cursor
.size
);
2300 /* Never clear more than 256MiB at once to avoid timeouts */
2301 size
= min(cursor
.size
, 256ULL << 20);
2303 r
= amdgpu_ttm_map_buffer(&bo
->tbo
, bo
->tbo
.resource
, &cursor
,
2304 1, ring
, false, &size
, &addr
);
2308 r
= amdgpu_ttm_fill_mem(ring
, 0, addr
, size
, resv
,
2313 dma_fence_put(*fence
);
2316 amdgpu_res_next(&cursor
, size
);
2319 mutex_unlock(&adev
->mman
.gtt_window_lock
);
2324 int amdgpu_fill_buffer(struct amdgpu_bo
*bo
,
2326 struct dma_resv
*resv
,
2327 struct dma_fence
**f
,
2330 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->tbo
.bdev
);
2331 struct amdgpu_ring
*ring
= adev
->mman
.buffer_funcs_ring
;
2332 struct dma_fence
*fence
= NULL
;
2333 struct amdgpu_res_cursor dst
;
2336 if (!adev
->mman
.buffer_funcs_enabled
) {
2337 DRM_ERROR("Trying to clear memory with ring turned off.\n");
2341 amdgpu_res_first(bo
->tbo
.resource
, 0, amdgpu_bo_size(bo
), &dst
);
2343 mutex_lock(&adev
->mman
.gtt_window_lock
);
2344 while (dst
.remaining
) {
2345 struct dma_fence
*next
;
2346 uint64_t cur_size
, to
;
2348 /* Never fill more than 256MiB at once to avoid timeouts */
2349 cur_size
= min(dst
.size
, 256ULL << 20);
2351 r
= amdgpu_ttm_map_buffer(&bo
->tbo
, bo
->tbo
.resource
, &dst
,
2352 1, ring
, false, &cur_size
, &to
);
2356 r
= amdgpu_ttm_fill_mem(ring
, src_data
, to
, cur_size
, resv
,
2357 &next
, true, delayed
);
2361 dma_fence_put(fence
);
2364 amdgpu_res_next(&dst
, cur_size
);
2367 mutex_unlock(&adev
->mman
.gtt_window_lock
);
2369 *f
= dma_fence_get(fence
);
2370 dma_fence_put(fence
);
2375 * amdgpu_ttm_evict_resources - evict memory buffers
2376 * @adev: amdgpu device object
2377 * @mem_type: evicted BO's memory type
2379 * Evicts all @mem_type buffers on the lru list of the memory type.
2382 * 0 for success or a negative error code on failure.
2384 int amdgpu_ttm_evict_resources(struct amdgpu_device
*adev
, int mem_type
)
2386 struct ttm_resource_manager
*man
;
2394 man
= ttm_manager_type(&adev
->mman
.bdev
, mem_type
);
2397 DRM_ERROR("Trying to evict invalid memory type\n");
2401 return ttm_resource_manager_evict_all(&adev
->mman
.bdev
, man
);
2404 #if defined(CONFIG_DEBUG_FS)
2406 static int amdgpu_ttm_page_pool_show(struct seq_file
*m
, void *unused
)
2408 struct amdgpu_device
*adev
= m
->private;
2410 return ttm_pool_debugfs(&adev
->mman
.bdev
.pool
, m
);
2413 DEFINE_SHOW_ATTRIBUTE(amdgpu_ttm_page_pool
);
2416 * amdgpu_ttm_vram_read - Linear read access to VRAM
2418 * Accesses VRAM via MMIO for debugging purposes.
2420 static ssize_t
amdgpu_ttm_vram_read(struct file
*f
, char __user
*buf
,
2421 size_t size
, loff_t
*pos
)
2423 struct amdgpu_device
*adev
= file_inode(f
)->i_private
;
2426 if (size
& 0x3 || *pos
& 0x3)
2429 if (*pos
>= adev
->gmc
.mc_vram_size
)
2432 size
= min(size
, (size_t)(adev
->gmc
.mc_vram_size
- *pos
));
2434 size_t bytes
= min(size
, AMDGPU_TTM_VRAM_MAX_DW_READ
* 4);
2435 uint32_t value
[AMDGPU_TTM_VRAM_MAX_DW_READ
];
2437 amdgpu_device_vram_access(adev
, *pos
, value
, bytes
, false);
2438 if (copy_to_user(buf
, value
, bytes
))
2451 * amdgpu_ttm_vram_write - Linear write access to VRAM
2453 * Accesses VRAM via MMIO for debugging purposes.
2455 static ssize_t
amdgpu_ttm_vram_write(struct file
*f
, const char __user
*buf
,
2456 size_t size
, loff_t
*pos
)
2458 struct amdgpu_device
*adev
= file_inode(f
)->i_private
;
2462 if (size
& 0x3 || *pos
& 0x3)
2465 if (*pos
>= adev
->gmc
.mc_vram_size
)
2471 if (*pos
>= adev
->gmc
.mc_vram_size
)
2474 r
= get_user(value
, (uint32_t *)buf
);
2478 amdgpu_device_mm_access(adev
, *pos
, &value
, 4, true);
2489 static const struct file_operations amdgpu_ttm_vram_fops
= {
2490 .owner
= THIS_MODULE
,
2491 .read
= amdgpu_ttm_vram_read
,
2492 .write
= amdgpu_ttm_vram_write
,
2493 .llseek
= default_llseek
,
2497 * amdgpu_iomem_read - Virtual read access to GPU mapped memory
2499 * This function is used to read memory that has been mapped to the
2500 * GPU and the known addresses are not physical addresses but instead
2501 * bus addresses (e.g., what you'd put in an IB or ring buffer).
2503 static ssize_t
amdgpu_iomem_read(struct file
*f
, char __user
*buf
,
2504 size_t size
, loff_t
*pos
)
2506 struct amdgpu_device
*adev
= file_inode(f
)->i_private
;
2507 struct iommu_domain
*dom
;
2511 /* retrieve the IOMMU domain if any for this device */
2512 dom
= iommu_get_domain_for_dev(adev
->dev
);
2515 phys_addr_t addr
= *pos
& PAGE_MASK
;
2516 loff_t off
= *pos
& ~PAGE_MASK
;
2517 size_t bytes
= PAGE_SIZE
- off
;
2522 bytes
= min(bytes
, size
);
2524 /* Translate the bus address to a physical address. If
2525 * the domain is NULL it means there is no IOMMU active
2526 * and the address translation is the identity
2528 addr
= dom
? iommu_iova_to_phys(dom
, addr
) : addr
;
2530 pfn
= addr
>> PAGE_SHIFT
;
2531 if (!pfn_valid(pfn
))
2534 p
= pfn_to_page(pfn
);
2535 if (p
->mapping
!= adev
->mman
.bdev
.dev_mapping
)
2538 ptr
= kmap_local_page(p
);
2539 r
= copy_to_user(buf
, ptr
+ off
, bytes
);
2553 * amdgpu_iomem_write - Virtual write access to GPU mapped memory
2555 * This function is used to write memory that has been mapped to the
2556 * GPU and the known addresses are not physical addresses but instead
2557 * bus addresses (e.g., what you'd put in an IB or ring buffer).
2559 static ssize_t
amdgpu_iomem_write(struct file
*f
, const char __user
*buf
,
2560 size_t size
, loff_t
*pos
)
2562 struct amdgpu_device
*adev
= file_inode(f
)->i_private
;
2563 struct iommu_domain
*dom
;
2567 dom
= iommu_get_domain_for_dev(adev
->dev
);
2570 phys_addr_t addr
= *pos
& PAGE_MASK
;
2571 loff_t off
= *pos
& ~PAGE_MASK
;
2572 size_t bytes
= PAGE_SIZE
- off
;
2577 bytes
= min(bytes
, size
);
2579 addr
= dom
? iommu_iova_to_phys(dom
, addr
) : addr
;
2581 pfn
= addr
>> PAGE_SHIFT
;
2582 if (!pfn_valid(pfn
))
2585 p
= pfn_to_page(pfn
);
2586 if (p
->mapping
!= adev
->mman
.bdev
.dev_mapping
)
2589 ptr
= kmap_local_page(p
);
2590 r
= copy_from_user(ptr
+ off
, buf
, bytes
);
2603 static const struct file_operations amdgpu_ttm_iomem_fops
= {
2604 .owner
= THIS_MODULE
,
2605 .read
= amdgpu_iomem_read
,
2606 .write
= amdgpu_iomem_write
,
2607 .llseek
= default_llseek
2612 void amdgpu_ttm_debugfs_init(struct amdgpu_device
*adev
)
2614 #if defined(CONFIG_DEBUG_FS)
2615 struct drm_minor
*minor
= adev_to_drm(adev
)->primary
;
2616 struct dentry
*root
= minor
->debugfs_root
;
2618 debugfs_create_file_size("amdgpu_vram", 0444, root
, adev
,
2619 &amdgpu_ttm_vram_fops
, adev
->gmc
.mc_vram_size
);
2620 debugfs_create_file("amdgpu_iomem", 0444, root
, adev
,
2621 &amdgpu_ttm_iomem_fops
);
2622 debugfs_create_file("ttm_page_pool", 0444, root
, adev
,
2623 &amdgpu_ttm_page_pool_fops
);
2624 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev
->mman
.bdev
,
2626 root
, "amdgpu_vram_mm");
2627 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev
->mman
.bdev
,
2629 root
, "amdgpu_gtt_mm");
2630 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev
->mman
.bdev
,
2632 root
, "amdgpu_gds_mm");
2633 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev
->mman
.bdev
,
2635 root
, "amdgpu_gws_mm");
2636 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev
->mman
.bdev
,
2638 root
, "amdgpu_oa_mm");