2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
32 #include <linux/list.h>
33 #include <linux/slab.h>
35 #include <drm/amdgpu_drm.h>
36 #include <drm/drm_cache.h>
38 #include "amdgpu_trace.h"
39 #include "amdgpu_amdkfd.h"
44 * This defines the interfaces to operate on an &amdgpu_bo buffer object which
45 * represents memory used by driver (VRAM, system memory, etc.). The driver
46 * provides DRM/GEM APIs to userspace. DRM/GEM APIs then use these interfaces
47 * to create/destroy/set buffer object which are then managed by the kernel TTM
49 * The interfaces are also used internally by kernel clients, including gfx,
50 * uvd, etc. for kernel managed allocations used by the GPU.
55 * amdgpu_bo_subtract_pin_size - Remove BO from pin_size accounting
57 * @bo: &amdgpu_bo buffer object
59 * This function is called when a BO stops being pinned, and updates the
60 * &amdgpu_device pin_size values accordingly.
62 static void amdgpu_bo_subtract_pin_size(struct amdgpu_bo
*bo
)
64 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->tbo
.bdev
);
66 if (bo
->tbo
.mem
.mem_type
== TTM_PL_VRAM
) {
67 atomic64_sub(amdgpu_bo_size(bo
), &adev
->vram_pin_size
);
68 atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo
),
69 &adev
->visible_pin_size
);
70 } else if (bo
->tbo
.mem
.mem_type
== TTM_PL_TT
) {
71 atomic64_sub(amdgpu_bo_size(bo
), &adev
->gart_pin_size
);
75 static void amdgpu_bo_destroy(struct ttm_buffer_object
*tbo
)
77 struct amdgpu_device
*adev
= amdgpu_ttm_adev(tbo
->bdev
);
78 struct amdgpu_bo
*bo
= ttm_to_amdgpu_bo(tbo
);
80 if (bo
->pin_count
> 0)
81 amdgpu_bo_subtract_pin_size(bo
);
84 amdgpu_amdkfd_unreserve_memory_limit(bo
);
88 if (bo
->gem_base
.import_attach
)
89 drm_prime_gem_destroy(&bo
->gem_base
, bo
->tbo
.sg
);
90 drm_gem_object_release(&bo
->gem_base
);
91 amdgpu_bo_unref(&bo
->parent
);
92 if (!list_empty(&bo
->shadow_list
)) {
93 mutex_lock(&adev
->shadow_list_lock
);
94 list_del_init(&bo
->shadow_list
);
95 mutex_unlock(&adev
->shadow_list_lock
);
102 * amdgpu_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo
103 * @bo: buffer object to be checked
105 * Uses destroy function associated with the object to determine if this is
109 * true if the object belongs to &amdgpu_bo, false if not.
111 bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object
*bo
)
113 if (bo
->destroy
== &amdgpu_bo_destroy
)
119 * amdgpu_bo_placement_from_domain - set buffer's placement
120 * @abo: &amdgpu_bo buffer object whose placement is to be set
121 * @domain: requested domain
123 * Sets buffer's placement according to requested domain and the buffer's
126 void amdgpu_bo_placement_from_domain(struct amdgpu_bo
*abo
, u32 domain
)
128 struct amdgpu_device
*adev
= amdgpu_ttm_adev(abo
->tbo
.bdev
);
129 struct ttm_placement
*placement
= &abo
->placement
;
130 struct ttm_place
*places
= abo
->placements
;
131 u64 flags
= abo
->flags
;
134 if (domain
& AMDGPU_GEM_DOMAIN_VRAM
) {
135 unsigned visible_pfn
= adev
->gmc
.visible_vram_size
>> PAGE_SHIFT
;
139 places
[c
].flags
= TTM_PL_FLAG_WC
| TTM_PL_FLAG_UNCACHED
|
142 if (flags
& AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
)
143 places
[c
].lpfn
= visible_pfn
;
145 places
[c
].flags
|= TTM_PL_FLAG_TOPDOWN
;
147 if (flags
& AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS
)
148 places
[c
].flags
|= TTM_PL_FLAG_CONTIGUOUS
;
152 if (domain
& AMDGPU_GEM_DOMAIN_GTT
) {
155 places
[c
].flags
= TTM_PL_FLAG_TT
;
156 if (flags
& AMDGPU_GEM_CREATE_CPU_GTT_USWC
)
157 places
[c
].flags
|= TTM_PL_FLAG_WC
|
158 TTM_PL_FLAG_UNCACHED
;
160 places
[c
].flags
|= TTM_PL_FLAG_CACHED
;
164 if (domain
& AMDGPU_GEM_DOMAIN_CPU
) {
167 places
[c
].flags
= TTM_PL_FLAG_SYSTEM
;
168 if (flags
& AMDGPU_GEM_CREATE_CPU_GTT_USWC
)
169 places
[c
].flags
|= TTM_PL_FLAG_WC
|
170 TTM_PL_FLAG_UNCACHED
;
172 places
[c
].flags
|= TTM_PL_FLAG_CACHED
;
176 if (domain
& AMDGPU_GEM_DOMAIN_GDS
) {
179 places
[c
].flags
= TTM_PL_FLAG_UNCACHED
| AMDGPU_PL_FLAG_GDS
;
183 if (domain
& AMDGPU_GEM_DOMAIN_GWS
) {
186 places
[c
].flags
= TTM_PL_FLAG_UNCACHED
| AMDGPU_PL_FLAG_GWS
;
190 if (domain
& AMDGPU_GEM_DOMAIN_OA
) {
193 places
[c
].flags
= TTM_PL_FLAG_UNCACHED
| AMDGPU_PL_FLAG_OA
;
200 places
[c
].flags
= TTM_PL_MASK_CACHING
| TTM_PL_FLAG_SYSTEM
;
204 BUG_ON(c
>= AMDGPU_BO_MAX_PLACEMENTS
);
206 placement
->num_placement
= c
;
207 placement
->placement
= places
;
209 placement
->num_busy_placement
= c
;
210 placement
->busy_placement
= places
;
214 * amdgpu_bo_create_reserved - create reserved BO for kernel use
216 * @adev: amdgpu device object
217 * @size: size for the new BO
218 * @align: alignment for the new BO
219 * @domain: where to place it
220 * @bo_ptr: used to initialize BOs in structures
221 * @gpu_addr: GPU addr of the pinned BO
222 * @cpu_addr: optional CPU address mapping
224 * Allocates and pins a BO for kernel internal use, and returns it still
227 * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
230 * 0 on success, negative error code otherwise.
232 int amdgpu_bo_create_reserved(struct amdgpu_device
*adev
,
233 unsigned long size
, int align
,
234 u32 domain
, struct amdgpu_bo
**bo_ptr
,
235 u64
*gpu_addr
, void **cpu_addr
)
237 struct amdgpu_bo_param bp
;
242 amdgpu_bo_unref(bo_ptr
);
246 memset(&bp
, 0, sizeof(bp
));
248 bp
.byte_align
= align
;
250 bp
.flags
= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
|
251 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS
;
252 bp
.type
= ttm_bo_type_kernel
;
256 r
= amdgpu_bo_create(adev
, &bp
, bo_ptr
);
258 dev_err(adev
->dev
, "(%d) failed to allocate kernel bo\n",
265 r
= amdgpu_bo_reserve(*bo_ptr
, false);
267 dev_err(adev
->dev
, "(%d) failed to reserve kernel bo\n", r
);
271 r
= amdgpu_bo_pin(*bo_ptr
, domain
);
273 dev_err(adev
->dev
, "(%d) kernel bo pin failed\n", r
);
274 goto error_unreserve
;
277 r
= amdgpu_ttm_alloc_gart(&(*bo_ptr
)->tbo
);
279 dev_err(adev
->dev
, "%p bind failed\n", *bo_ptr
);
284 *gpu_addr
= amdgpu_bo_gpu_offset(*bo_ptr
);
287 r
= amdgpu_bo_kmap(*bo_ptr
, cpu_addr
);
289 dev_err(adev
->dev
, "(%d) kernel bo map failed\n", r
);
297 amdgpu_bo_unpin(*bo_ptr
);
299 amdgpu_bo_unreserve(*bo_ptr
);
303 amdgpu_bo_unref(bo_ptr
);
309 * amdgpu_bo_create_kernel - create BO for kernel use
311 * @adev: amdgpu device object
312 * @size: size for the new BO
313 * @align: alignment for the new BO
314 * @domain: where to place it
315 * @bo_ptr: used to initialize BOs in structures
316 * @gpu_addr: GPU addr of the pinned BO
317 * @cpu_addr: optional CPU address mapping
319 * Allocates and pins a BO for kernel internal use.
321 * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
324 * 0 on success, negative error code otherwise.
326 int amdgpu_bo_create_kernel(struct amdgpu_device
*adev
,
327 unsigned long size
, int align
,
328 u32 domain
, struct amdgpu_bo
**bo_ptr
,
329 u64
*gpu_addr
, void **cpu_addr
)
333 r
= amdgpu_bo_create_reserved(adev
, size
, align
, domain
, bo_ptr
,
340 amdgpu_bo_unreserve(*bo_ptr
);
346 * amdgpu_bo_free_kernel - free BO for kernel use
348 * @bo: amdgpu BO to free
349 * @gpu_addr: pointer to where the BO's GPU memory space address was stored
350 * @cpu_addr: pointer to where the BO's CPU memory space address was stored
352 * unmaps and unpin a BO for kernel internal use.
354 void amdgpu_bo_free_kernel(struct amdgpu_bo
**bo
, u64
*gpu_addr
,
360 if (likely(amdgpu_bo_reserve(*bo
, true) == 0)) {
362 amdgpu_bo_kunmap(*bo
);
364 amdgpu_bo_unpin(*bo
);
365 amdgpu_bo_unreserve(*bo
);
376 /* Validate bo size is bit bigger then the request domain */
377 static bool amdgpu_bo_validate_size(struct amdgpu_device
*adev
,
378 unsigned long size
, u32 domain
)
380 struct ttm_mem_type_manager
*man
= NULL
;
383 * If GTT is part of requested domains the check must succeed to
384 * allow fall back to GTT
386 if (domain
& AMDGPU_GEM_DOMAIN_GTT
) {
387 man
= &adev
->mman
.bdev
.man
[TTM_PL_TT
];
389 if (size
< (man
->size
<< PAGE_SHIFT
))
395 if (domain
& AMDGPU_GEM_DOMAIN_VRAM
) {
396 man
= &adev
->mman
.bdev
.man
[TTM_PL_VRAM
];
398 if (size
< (man
->size
<< PAGE_SHIFT
))
405 /* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU */
409 DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size
,
410 man
->size
<< PAGE_SHIFT
);
414 static int amdgpu_bo_do_create(struct amdgpu_device
*adev
,
415 struct amdgpu_bo_param
*bp
,
416 struct amdgpu_bo
**bo_ptr
)
418 struct ttm_operation_ctx ctx
= {
419 .interruptible
= (bp
->type
!= ttm_bo_type_kernel
),
420 .no_wait_gpu
= false,
422 .flags
= TTM_OPT_FLAG_ALLOW_RES_EVICT
424 struct amdgpu_bo
*bo
;
425 unsigned long page_align
, size
= bp
->size
;
429 /* Note that GDS/GWS/OA allocates 1 page per byte/resource. */
430 if (bp
->domain
& (AMDGPU_GEM_DOMAIN_GWS
| AMDGPU_GEM_DOMAIN_OA
)) {
431 /* GWS and OA don't need any alignment. */
432 page_align
= bp
->byte_align
;
434 } else if (bp
->domain
& AMDGPU_GEM_DOMAIN_GDS
) {
435 /* Both size and alignment must be a multiple of 4. */
436 page_align
= ALIGN(bp
->byte_align
, 4);
437 size
= ALIGN(size
, 4) << PAGE_SHIFT
;
439 /* Memory should be aligned at least to a page size. */
440 page_align
= ALIGN(bp
->byte_align
, PAGE_SIZE
) >> PAGE_SHIFT
;
441 size
= ALIGN(size
, PAGE_SIZE
);
444 if (!amdgpu_bo_validate_size(adev
, size
, bp
->domain
))
449 acc_size
= ttm_bo_dma_acc_size(&adev
->mman
.bdev
, size
,
450 sizeof(struct amdgpu_bo
));
452 bo
= kzalloc(sizeof(struct amdgpu_bo
), GFP_KERNEL
);
455 drm_gem_private_object_init(adev
->ddev
, &bo
->gem_base
, size
);
456 INIT_LIST_HEAD(&bo
->shadow_list
);
458 bo
->preferred_domains
= bp
->preferred_domain
? bp
->preferred_domain
:
460 bo
->allowed_domains
= bo
->preferred_domains
;
461 if (bp
->type
!= ttm_bo_type_kernel
&&
462 bo
->allowed_domains
== AMDGPU_GEM_DOMAIN_VRAM
)
463 bo
->allowed_domains
|= AMDGPU_GEM_DOMAIN_GTT
;
465 bo
->flags
= bp
->flags
;
468 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
469 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
471 bo
->flags
&= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC
;
472 #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
473 /* Don't try to enable write-combining when it can't work, or things
475 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
478 #ifndef CONFIG_COMPILE_TEST
479 #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
480 thanks to write-combining
483 if (bo
->flags
& AMDGPU_GEM_CREATE_CPU_GTT_USWC
)
484 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
485 "better performance thanks to write-combining\n");
486 bo
->flags
&= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC
;
488 /* For architectures that don't support WC memory,
489 * mask out the WC flag from the BO
491 if (!drm_arch_can_wc_memory())
492 bo
->flags
&= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC
;
495 bo
->tbo
.bdev
= &adev
->mman
.bdev
;
496 amdgpu_bo_placement_from_domain(bo
, bp
->domain
);
497 if (bp
->type
== ttm_bo_type_kernel
)
498 bo
->tbo
.priority
= 1;
500 r
= ttm_bo_init_reserved(&adev
->mman
.bdev
, &bo
->tbo
, size
, bp
->type
,
501 &bo
->placement
, page_align
, &ctx
, acc_size
,
502 NULL
, bp
->resv
, &amdgpu_bo_destroy
);
503 if (unlikely(r
!= 0))
506 if (!amdgpu_gmc_vram_full_visible(&adev
->gmc
) &&
507 bo
->tbo
.mem
.mem_type
== TTM_PL_VRAM
&&
508 bo
->tbo
.mem
.start
< adev
->gmc
.visible_vram_size
>> PAGE_SHIFT
)
509 amdgpu_cs_report_moved_bytes(adev
, ctx
.bytes_moved
,
512 amdgpu_cs_report_moved_bytes(adev
, ctx
.bytes_moved
, 0);
514 if (bp
->flags
& AMDGPU_GEM_CREATE_VRAM_CLEARED
&&
515 bo
->tbo
.mem
.placement
& TTM_PL_FLAG_VRAM
) {
516 struct dma_fence
*fence
;
518 r
= amdgpu_fill_buffer(bo
, 0, bo
->tbo
.resv
, &fence
);
522 amdgpu_bo_fence(bo
, fence
, false);
523 dma_fence_put(bo
->tbo
.moving
);
524 bo
->tbo
.moving
= dma_fence_get(fence
);
525 dma_fence_put(fence
);
528 amdgpu_bo_unreserve(bo
);
531 trace_amdgpu_bo_create(bo
);
533 /* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */
534 if (bp
->type
== ttm_bo_type_device
)
535 bo
->flags
&= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
;
541 ww_mutex_unlock(&bo
->tbo
.resv
->lock
);
542 amdgpu_bo_unref(&bo
);
546 static int amdgpu_bo_create_shadow(struct amdgpu_device
*adev
,
548 struct amdgpu_bo
*bo
)
550 struct amdgpu_bo_param bp
;
556 memset(&bp
, 0, sizeof(bp
));
558 bp
.domain
= AMDGPU_GEM_DOMAIN_GTT
;
559 bp
.flags
= AMDGPU_GEM_CREATE_CPU_GTT_USWC
|
560 AMDGPU_GEM_CREATE_SHADOW
;
561 bp
.type
= ttm_bo_type_kernel
;
562 bp
.resv
= bo
->tbo
.resv
;
564 r
= amdgpu_bo_do_create(adev
, &bp
, &bo
->shadow
);
566 bo
->shadow
->parent
= amdgpu_bo_ref(bo
);
567 mutex_lock(&adev
->shadow_list_lock
);
568 list_add_tail(&bo
->shadow
->shadow_list
, &adev
->shadow_list
);
569 mutex_unlock(&adev
->shadow_list_lock
);
576 * amdgpu_bo_create - create an &amdgpu_bo buffer object
577 * @adev: amdgpu device object
578 * @bp: parameters to be used for the buffer object
579 * @bo_ptr: pointer to the buffer object pointer
581 * Creates an &amdgpu_bo buffer object; and if requested, also creates a
583 * Shadow object is used to backup the original buffer object, and is always
587 * 0 for success or a negative error code on failure.
589 int amdgpu_bo_create(struct amdgpu_device
*adev
,
590 struct amdgpu_bo_param
*bp
,
591 struct amdgpu_bo
**bo_ptr
)
593 u64 flags
= bp
->flags
;
596 bp
->flags
= bp
->flags
& ~AMDGPU_GEM_CREATE_SHADOW
;
597 r
= amdgpu_bo_do_create(adev
, bp
, bo_ptr
);
601 if ((flags
& AMDGPU_GEM_CREATE_SHADOW
) && !(adev
->flags
& AMD_IS_APU
)) {
603 WARN_ON(reservation_object_lock((*bo_ptr
)->tbo
.resv
,
606 r
= amdgpu_bo_create_shadow(adev
, bp
->size
, *bo_ptr
);
609 reservation_object_unlock((*bo_ptr
)->tbo
.resv
);
612 amdgpu_bo_unref(bo_ptr
);
619 * amdgpu_bo_validate - validate an &amdgpu_bo buffer object
620 * @bo: pointer to the buffer object
622 * Sets placement according to domain; and changes placement and caching
623 * policy of the buffer object according to the placement.
624 * This is used for validating shadow bos. It calls ttm_bo_validate() to
625 * make sure the buffer is resident where it needs to be.
628 * 0 for success or a negative error code on failure.
630 int amdgpu_bo_validate(struct amdgpu_bo
*bo
)
632 struct ttm_operation_ctx ctx
= { false, false };
639 domain
= bo
->preferred_domains
;
642 amdgpu_bo_placement_from_domain(bo
, domain
);
643 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, &ctx
);
644 if (unlikely(r
== -ENOMEM
) && domain
!= bo
->allowed_domains
) {
645 domain
= bo
->allowed_domains
;
653 * amdgpu_bo_restore_shadow - restore an &amdgpu_bo shadow
655 * @shadow: &amdgpu_bo shadow to be restored
656 * @fence: dma_fence associated with the operation
658 * Copies a buffer object's shadow content back to the object.
659 * This is used for recovering a buffer from its shadow in case of a gpu
660 * reset where vram context may be lost.
663 * 0 for success or a negative error code on failure.
665 int amdgpu_bo_restore_shadow(struct amdgpu_bo
*shadow
, struct dma_fence
**fence
)
668 struct amdgpu_device
*adev
= amdgpu_ttm_adev(shadow
->tbo
.bdev
);
669 struct amdgpu_ring
*ring
= adev
->mman
.buffer_funcs_ring
;
670 uint64_t shadow_addr
, parent_addr
;
672 shadow_addr
= amdgpu_bo_gpu_offset(shadow
);
673 parent_addr
= amdgpu_bo_gpu_offset(shadow
->parent
);
675 return amdgpu_copy_buffer(ring
, shadow_addr
, parent_addr
,
676 amdgpu_bo_size(shadow
), NULL
, fence
,
681 * amdgpu_bo_kmap - map an &amdgpu_bo buffer object
682 * @bo: &amdgpu_bo buffer object to be mapped
683 * @ptr: kernel virtual address to be returned
685 * Calls ttm_bo_kmap() to set up the kernel virtual mapping; calls
686 * amdgpu_bo_kptr() to get the kernel virtual address.
689 * 0 for success or a negative error code on failure.
691 int amdgpu_bo_kmap(struct amdgpu_bo
*bo
, void **ptr
)
696 if (bo
->flags
& AMDGPU_GEM_CREATE_NO_CPU_ACCESS
)
699 kptr
= amdgpu_bo_kptr(bo
);
706 r
= reservation_object_wait_timeout_rcu(bo
->tbo
.resv
, false, false,
707 MAX_SCHEDULE_TIMEOUT
);
711 r
= ttm_bo_kmap(&bo
->tbo
, 0, bo
->tbo
.num_pages
, &bo
->kmap
);
716 *ptr
= amdgpu_bo_kptr(bo
);
722 * amdgpu_bo_kptr - returns a kernel virtual address of the buffer object
723 * @bo: &amdgpu_bo buffer object
725 * Calls ttm_kmap_obj_virtual() to get the kernel virtual address
728 * the virtual address of a buffer object area.
730 void *amdgpu_bo_kptr(struct amdgpu_bo
*bo
)
734 return ttm_kmap_obj_virtual(&bo
->kmap
, &is_iomem
);
738 * amdgpu_bo_kunmap - unmap an &amdgpu_bo buffer object
739 * @bo: &amdgpu_bo buffer object to be unmapped
741 * Unmaps a kernel map set up by amdgpu_bo_kmap().
743 void amdgpu_bo_kunmap(struct amdgpu_bo
*bo
)
746 ttm_bo_kunmap(&bo
->kmap
);
750 * amdgpu_bo_ref - reference an &amdgpu_bo buffer object
751 * @bo: &amdgpu_bo buffer object
753 * References the contained &ttm_buffer_object.
756 * a refcounted pointer to the &amdgpu_bo buffer object.
758 struct amdgpu_bo
*amdgpu_bo_ref(struct amdgpu_bo
*bo
)
763 ttm_bo_get(&bo
->tbo
);
768 * amdgpu_bo_unref - unreference an &amdgpu_bo buffer object
769 * @bo: &amdgpu_bo buffer object
771 * Unreferences the contained &ttm_buffer_object and clear the pointer
773 void amdgpu_bo_unref(struct amdgpu_bo
**bo
)
775 struct ttm_buffer_object
*tbo
;
786 * amdgpu_bo_pin_restricted - pin an &amdgpu_bo buffer object
787 * @bo: &amdgpu_bo buffer object to be pinned
788 * @domain: domain to be pinned to
789 * @min_offset: the start of requested address range
790 * @max_offset: the end of requested address range
792 * Pins the buffer object according to requested domain and address range. If
793 * the memory is unbound gart memory, binds the pages into gart table. Adjusts
794 * pin_count and pin_size accordingly.
796 * Pinning means to lock pages in memory along with keeping them at a fixed
797 * offset. It is required when a buffer can not be moved, for example, when
798 * a display buffer is being scanned out.
800 * Compared with amdgpu_bo_pin(), this function gives more flexibility on
801 * where to pin a buffer if there are specific restrictions on where a buffer
805 * 0 for success or a negative error code on failure.
807 int amdgpu_bo_pin_restricted(struct amdgpu_bo
*bo
, u32 domain
,
808 u64 min_offset
, u64 max_offset
)
810 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->tbo
.bdev
);
811 struct ttm_operation_ctx ctx
= { false, false };
814 if (amdgpu_ttm_tt_get_usermm(bo
->tbo
.ttm
))
817 if (WARN_ON_ONCE(min_offset
> max_offset
))
820 /* A shared bo cannot be migrated to VRAM */
821 if (bo
->prime_shared_count
) {
822 if (domain
& AMDGPU_GEM_DOMAIN_GTT
)
823 domain
= AMDGPU_GEM_DOMAIN_GTT
;
828 /* This assumes only APU display buffers are pinned with (VRAM|GTT).
829 * See function amdgpu_display_supported_domains()
831 domain
= amdgpu_bo_get_preferred_pin_domain(adev
, domain
);
834 uint32_t mem_type
= bo
->tbo
.mem
.mem_type
;
836 if (!(domain
& amdgpu_mem_type_to_domain(mem_type
)))
841 if (max_offset
!= 0) {
842 u64 domain_start
= bo
->tbo
.bdev
->man
[mem_type
].gpu_offset
;
843 WARN_ON_ONCE(max_offset
<
844 (amdgpu_bo_gpu_offset(bo
) - domain_start
));
850 bo
->flags
|= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS
;
851 /* force to pin into visible video ram */
852 if (!(bo
->flags
& AMDGPU_GEM_CREATE_NO_CPU_ACCESS
))
853 bo
->flags
|= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
;
854 amdgpu_bo_placement_from_domain(bo
, domain
);
855 for (i
= 0; i
< bo
->placement
.num_placement
; i
++) {
858 fpfn
= min_offset
>> PAGE_SHIFT
;
859 lpfn
= max_offset
>> PAGE_SHIFT
;
861 if (fpfn
> bo
->placements
[i
].fpfn
)
862 bo
->placements
[i
].fpfn
= fpfn
;
863 if (!bo
->placements
[i
].lpfn
||
864 (lpfn
&& lpfn
< bo
->placements
[i
].lpfn
))
865 bo
->placements
[i
].lpfn
= lpfn
;
866 bo
->placements
[i
].flags
|= TTM_PL_FLAG_NO_EVICT
;
869 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, &ctx
);
871 dev_err(adev
->dev
, "%p pin failed\n", bo
);
877 domain
= amdgpu_mem_type_to_domain(bo
->tbo
.mem
.mem_type
);
878 if (domain
== AMDGPU_GEM_DOMAIN_VRAM
) {
879 atomic64_add(amdgpu_bo_size(bo
), &adev
->vram_pin_size
);
880 atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo
),
881 &adev
->visible_pin_size
);
882 } else if (domain
== AMDGPU_GEM_DOMAIN_GTT
) {
883 atomic64_add(amdgpu_bo_size(bo
), &adev
->gart_pin_size
);
891 * amdgpu_bo_pin - pin an &amdgpu_bo buffer object
892 * @bo: &amdgpu_bo buffer object to be pinned
893 * @domain: domain to be pinned to
895 * A simple wrapper to amdgpu_bo_pin_restricted().
896 * Provides a simpler API for buffers that do not have any strict restrictions
897 * on where a buffer must be located.
900 * 0 for success or a negative error code on failure.
902 int amdgpu_bo_pin(struct amdgpu_bo
*bo
, u32 domain
)
904 return amdgpu_bo_pin_restricted(bo
, domain
, 0, 0);
908 * amdgpu_bo_unpin - unpin an &amdgpu_bo buffer object
909 * @bo: &amdgpu_bo buffer object to be unpinned
911 * Decreases the pin_count, and clears the flags if pin_count reaches 0.
912 * Changes placement and pin size accordingly.
915 * 0 for success or a negative error code on failure.
917 int amdgpu_bo_unpin(struct amdgpu_bo
*bo
)
919 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->tbo
.bdev
);
920 struct ttm_operation_ctx ctx
= { false, false };
923 if (WARN_ON_ONCE(!bo
->pin_count
)) {
924 dev_warn(adev
->dev
, "%p unpin not necessary\n", bo
);
931 amdgpu_bo_subtract_pin_size(bo
);
933 for (i
= 0; i
< bo
->placement
.num_placement
; i
++) {
934 bo
->placements
[i
].lpfn
= 0;
935 bo
->placements
[i
].flags
&= ~TTM_PL_FLAG_NO_EVICT
;
937 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, &ctx
);
939 dev_err(adev
->dev
, "%p validate failed for unpin\n", bo
);
945 * amdgpu_bo_evict_vram - evict VRAM buffers
946 * @adev: amdgpu device object
948 * Evicts all VRAM buffers on the lru list of the memory type.
949 * Mainly used for evicting vram at suspend time.
952 * 0 for success or a negative error code on failure.
954 int amdgpu_bo_evict_vram(struct amdgpu_device
*adev
)
956 /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
957 #ifndef CONFIG_HIBERNATION
958 if (adev
->flags
& AMD_IS_APU
) {
959 /* Useless to evict on IGP chips */
963 return ttm_bo_evict_mm(&adev
->mman
.bdev
, TTM_PL_VRAM
);
966 static const char *amdgpu_vram_names
[] = {
979 * amdgpu_bo_init - initialize memory manager
980 * @adev: amdgpu device object
982 * Calls amdgpu_ttm_init() to initialize amdgpu memory manager.
985 * 0 for success or a negative error code on failure.
987 int amdgpu_bo_init(struct amdgpu_device
*adev
)
989 /* reserve PAT memory space to WC for VRAM */
990 arch_io_reserve_memtype_wc(adev
->gmc
.aper_base
,
991 adev
->gmc
.aper_size
);
993 /* Add an MTRR for the VRAM */
994 adev
->gmc
.vram_mtrr
= arch_phys_wc_add(adev
->gmc
.aper_base
,
995 adev
->gmc
.aper_size
);
996 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
997 adev
->gmc
.mc_vram_size
>> 20,
998 (unsigned long long)adev
->gmc
.aper_size
>> 20);
999 DRM_INFO("RAM width %dbits %s\n",
1000 adev
->gmc
.vram_width
, amdgpu_vram_names
[adev
->gmc
.vram_type
]);
1001 return amdgpu_ttm_init(adev
);
1005 * amdgpu_bo_late_init - late init
1006 * @adev: amdgpu device object
1008 * Calls amdgpu_ttm_late_init() to free resources used earlier during
1012 * 0 for success or a negative error code on failure.
1014 int amdgpu_bo_late_init(struct amdgpu_device
*adev
)
1016 amdgpu_ttm_late_init(adev
);
1022 * amdgpu_bo_fini - tear down memory manager
1023 * @adev: amdgpu device object
1025 * Reverses amdgpu_bo_init() to tear down memory manager.
1027 void amdgpu_bo_fini(struct amdgpu_device
*adev
)
1029 amdgpu_ttm_fini(adev
);
1030 arch_phys_wc_del(adev
->gmc
.vram_mtrr
);
1031 arch_io_free_memtype_wc(adev
->gmc
.aper_base
, adev
->gmc
.aper_size
);
1035 * amdgpu_bo_fbdev_mmap - mmap fbdev memory
1036 * @bo: &amdgpu_bo buffer object
1037 * @vma: vma as input from the fbdev mmap method
1039 * Calls ttm_fbdev_mmap() to mmap fbdev memory if it is backed by a bo.
1042 * 0 for success or a negative error code on failure.
1044 int amdgpu_bo_fbdev_mmap(struct amdgpu_bo
*bo
,
1045 struct vm_area_struct
*vma
)
1047 return ttm_fbdev_mmap(vma
, &bo
->tbo
);
1051 * amdgpu_bo_set_tiling_flags - set tiling flags
1052 * @bo: &amdgpu_bo buffer object
1053 * @tiling_flags: new flags
1055 * Sets buffer object's tiling flags with the new one. Used by GEM ioctl or
1056 * kernel driver to set the tiling flags on a buffer.
1059 * 0 for success or a negative error code on failure.
1061 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo
*bo
, u64 tiling_flags
)
1063 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->tbo
.bdev
);
1065 if (adev
->family
<= AMDGPU_FAMILY_CZ
&&
1066 AMDGPU_TILING_GET(tiling_flags
, TILE_SPLIT
) > 6)
1069 bo
->tiling_flags
= tiling_flags
;
1074 * amdgpu_bo_get_tiling_flags - get tiling flags
1075 * @bo: &amdgpu_bo buffer object
1076 * @tiling_flags: returned flags
1078 * Gets buffer object's tiling flags. Used by GEM ioctl or kernel driver to
1079 * set the tiling flags on a buffer.
1081 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo
*bo
, u64
*tiling_flags
)
1083 lockdep_assert_held(&bo
->tbo
.resv
->lock
.base
);
1086 *tiling_flags
= bo
->tiling_flags
;
1090 * amdgpu_bo_set_metadata - set metadata
1091 * @bo: &amdgpu_bo buffer object
1092 * @metadata: new metadata
1093 * @metadata_size: size of the new metadata
1094 * @flags: flags of the new metadata
1096 * Sets buffer object's metadata, its size and flags.
1097 * Used via GEM ioctl.
1100 * 0 for success or a negative error code on failure.
1102 int amdgpu_bo_set_metadata (struct amdgpu_bo
*bo
, void *metadata
,
1103 uint32_t metadata_size
, uint64_t flags
)
1107 if (!metadata_size
) {
1108 if (bo
->metadata_size
) {
1109 kfree(bo
->metadata
);
1110 bo
->metadata
= NULL
;
1111 bo
->metadata_size
= 0;
1116 if (metadata
== NULL
)
1119 buffer
= kmemdup(metadata
, metadata_size
, GFP_KERNEL
);
1123 kfree(bo
->metadata
);
1124 bo
->metadata_flags
= flags
;
1125 bo
->metadata
= buffer
;
1126 bo
->metadata_size
= metadata_size
;
1132 * amdgpu_bo_get_metadata - get metadata
1133 * @bo: &amdgpu_bo buffer object
1134 * @buffer: returned metadata
1135 * @buffer_size: size of the buffer
1136 * @metadata_size: size of the returned metadata
1137 * @flags: flags of the returned metadata
1139 * Gets buffer object's metadata, its size and flags. buffer_size shall not be
1140 * less than metadata_size.
1141 * Used via GEM ioctl.
1144 * 0 for success or a negative error code on failure.
1146 int amdgpu_bo_get_metadata(struct amdgpu_bo
*bo
, void *buffer
,
1147 size_t buffer_size
, uint32_t *metadata_size
,
1150 if (!buffer
&& !metadata_size
)
1154 if (buffer_size
< bo
->metadata_size
)
1157 if (bo
->metadata_size
)
1158 memcpy(buffer
, bo
->metadata
, bo
->metadata_size
);
1162 *metadata_size
= bo
->metadata_size
;
1164 *flags
= bo
->metadata_flags
;
1170 * amdgpu_bo_move_notify - notification about a memory move
1171 * @bo: pointer to a buffer object
1172 * @evict: if this move is evicting the buffer from the graphics address space
1173 * @new_mem: new information of the bufer object
1175 * Marks the corresponding &amdgpu_bo buffer object as invalid, also performs
1177 * TTM driver callback which is called when ttm moves a buffer.
1179 void amdgpu_bo_move_notify(struct ttm_buffer_object
*bo
,
1181 struct ttm_mem_reg
*new_mem
)
1183 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->bdev
);
1184 struct amdgpu_bo
*abo
;
1185 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
1187 if (!amdgpu_bo_is_amdgpu_bo(bo
))
1190 abo
= ttm_to_amdgpu_bo(bo
);
1191 amdgpu_vm_bo_invalidate(adev
, abo
, evict
);
1193 amdgpu_bo_kunmap(abo
);
1195 /* remember the eviction */
1197 atomic64_inc(&adev
->num_evictions
);
1199 /* update statistics */
1203 /* move_notify is called before move happens */
1204 trace_amdgpu_bo_move(abo
, new_mem
->mem_type
, old_mem
->mem_type
);
1208 * amdgpu_bo_fault_reserve_notify - notification about a memory fault
1209 * @bo: pointer to a buffer object
1211 * Notifies the driver we are taking a fault on this BO and have reserved it,
1212 * also performs bookkeeping.
1213 * TTM driver callback for dealing with vm faults.
1216 * 0 for success or a negative error code on failure.
1218 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object
*bo
)
1220 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->bdev
);
1221 struct ttm_operation_ctx ctx
= { false, false };
1222 struct amdgpu_bo
*abo
;
1223 unsigned long offset
, size
;
1226 if (!amdgpu_bo_is_amdgpu_bo(bo
))
1229 abo
= ttm_to_amdgpu_bo(bo
);
1231 /* Remember that this BO was accessed by the CPU */
1232 abo
->flags
|= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
;
1234 if (bo
->mem
.mem_type
!= TTM_PL_VRAM
)
1237 size
= bo
->mem
.num_pages
<< PAGE_SHIFT
;
1238 offset
= bo
->mem
.start
<< PAGE_SHIFT
;
1239 if ((offset
+ size
) <= adev
->gmc
.visible_vram_size
)
1242 /* Can't move a pinned BO to visible VRAM */
1243 if (abo
->pin_count
> 0)
1246 /* hurrah the memory is not visible ! */
1247 atomic64_inc(&adev
->num_vram_cpu_page_faults
);
1248 amdgpu_bo_placement_from_domain(abo
, AMDGPU_GEM_DOMAIN_VRAM
|
1249 AMDGPU_GEM_DOMAIN_GTT
);
1251 /* Avoid costly evictions; only set GTT as a busy placement */
1252 abo
->placement
.num_busy_placement
= 1;
1253 abo
->placement
.busy_placement
= &abo
->placements
[1];
1255 r
= ttm_bo_validate(bo
, &abo
->placement
, &ctx
);
1256 if (unlikely(r
!= 0))
1259 offset
= bo
->mem
.start
<< PAGE_SHIFT
;
1260 /* this should never happen */
1261 if (bo
->mem
.mem_type
== TTM_PL_VRAM
&&
1262 (offset
+ size
) > adev
->gmc
.visible_vram_size
)
1269 * amdgpu_bo_fence - add fence to buffer object
1271 * @bo: buffer object in question
1272 * @fence: fence to add
1273 * @shared: true if fence should be added shared
1276 void amdgpu_bo_fence(struct amdgpu_bo
*bo
, struct dma_fence
*fence
,
1279 struct reservation_object
*resv
= bo
->tbo
.resv
;
1282 reservation_object_add_shared_fence(resv
, fence
);
1284 reservation_object_add_excl_fence(resv
, fence
);
1288 * amdgpu_sync_wait_resv - Wait for BO reservation fences
1290 * @bo: buffer object
1291 * @owner: fence owner
1292 * @intr: Whether the wait is interruptible
1295 * 0 on success, errno otherwise.
1297 int amdgpu_bo_sync_wait(struct amdgpu_bo
*bo
, void *owner
, bool intr
)
1299 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->tbo
.bdev
);
1300 struct amdgpu_sync sync
;
1303 amdgpu_sync_create(&sync
);
1304 amdgpu_sync_resv(adev
, &sync
, bo
->tbo
.resv
, owner
, false);
1305 r
= amdgpu_sync_wait(&sync
, intr
);
1306 amdgpu_sync_free(&sync
);
1312 * amdgpu_bo_gpu_offset - return GPU offset of bo
1313 * @bo: amdgpu object for which we query the offset
1315 * Note: object should either be pinned or reserved when calling this
1316 * function, it might be useful to add check for this for debugging.
1319 * current GPU offset of the object.
1321 u64
amdgpu_bo_gpu_offset(struct amdgpu_bo
*bo
)
1323 WARN_ON_ONCE(bo
->tbo
.mem
.mem_type
== TTM_PL_SYSTEM
);
1324 WARN_ON_ONCE(!ww_mutex_is_locked(&bo
->tbo
.resv
->lock
) &&
1325 !bo
->pin_count
&& bo
->tbo
.type
!= ttm_bo_type_kernel
);
1326 WARN_ON_ONCE(bo
->tbo
.mem
.start
== AMDGPU_BO_INVALID_OFFSET
);
1327 WARN_ON_ONCE(bo
->tbo
.mem
.mem_type
== TTM_PL_VRAM
&&
1328 !(bo
->flags
& AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS
));
1330 return amdgpu_gmc_sign_extend(bo
->tbo
.offset
);
1334 * amdgpu_bo_get_preferred_pin_domain - get preferred domain for scanout
1335 * @adev: amdgpu device object
1336 * @domain: allowed :ref:`memory domains <amdgpu_memory_domains>`
1339 * Which of the allowed domains is preferred for pinning the BO for scanout.
1341 uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device
*adev
,
1344 if (domain
== (AMDGPU_GEM_DOMAIN_VRAM
| AMDGPU_GEM_DOMAIN_GTT
)) {
1345 domain
= AMDGPU_GEM_DOMAIN_VRAM
;
1346 if (adev
->gmc
.real_vram_size
<= AMDGPU_SG_THRESHOLD
)
1347 domain
= AMDGPU_GEM_DOMAIN_GTT
;