2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
32 #include <linux/list.h>
33 #include <linux/slab.h>
35 #include <drm/amdgpu_drm.h>
37 #include "amdgpu_trace.h"
40 int amdgpu_ttm_init(struct amdgpu_device
*adev
);
41 void amdgpu_ttm_fini(struct amdgpu_device
*adev
);
43 static u64
amdgpu_get_vis_part_size(struct amdgpu_device
*adev
,
44 struct ttm_mem_reg
*mem
)
47 if (mem
->start
<< PAGE_SHIFT
< adev
->mc
.visible_vram_size
) {
48 ret
= (u64
)((mem
->start
<< PAGE_SHIFT
) + mem
->size
) >
49 adev
->mc
.visible_vram_size
?
50 adev
->mc
.visible_vram_size
- (mem
->start
<< PAGE_SHIFT
) :
56 static void amdgpu_update_memory_usage(struct amdgpu_device
*adev
,
57 struct ttm_mem_reg
*old_mem
,
58 struct ttm_mem_reg
*new_mem
)
65 switch (new_mem
->mem_type
) {
67 atomic64_add(new_mem
->size
, &adev
->gtt_usage
);
70 atomic64_add(new_mem
->size
, &adev
->vram_usage
);
71 vis_size
= amdgpu_get_vis_part_size(adev
, new_mem
);
72 atomic64_add(vis_size
, &adev
->vram_vis_usage
);
78 switch (old_mem
->mem_type
) {
80 atomic64_sub(old_mem
->size
, &adev
->gtt_usage
);
83 atomic64_sub(old_mem
->size
, &adev
->vram_usage
);
84 vis_size
= amdgpu_get_vis_part_size(adev
, old_mem
);
85 atomic64_sub(vis_size
, &adev
->vram_vis_usage
);
91 static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object
*tbo
)
95 bo
= container_of(tbo
, struct amdgpu_bo
, tbo
);
97 amdgpu_update_memory_usage(bo
->adev
, &bo
->tbo
.mem
, NULL
);
99 mutex_lock(&bo
->adev
->gem
.mutex
);
100 list_del_init(&bo
->list
);
101 mutex_unlock(&bo
->adev
->gem
.mutex
);
102 drm_gem_object_release(&bo
->gem_base
);
103 amdgpu_bo_unref(&bo
->parent
);
108 bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object
*bo
)
110 if (bo
->destroy
== &amdgpu_ttm_bo_destroy
)
115 static void amdgpu_ttm_placement_init(struct amdgpu_device
*adev
,
116 struct ttm_placement
*placement
,
117 struct ttm_place
*placements
,
118 u32 domain
, u64 flags
)
122 placement
->placement
= placements
;
123 placement
->busy_placement
= placements
;
125 if (domain
& AMDGPU_GEM_DOMAIN_VRAM
) {
126 if (flags
& AMDGPU_GEM_CREATE_NO_CPU_ACCESS
&&
127 adev
->mc
.visible_vram_size
< adev
->mc
.real_vram_size
) {
129 adev
->mc
.visible_vram_size
>> PAGE_SHIFT
;
130 placements
[c
++].flags
= TTM_PL_FLAG_WC
| TTM_PL_FLAG_UNCACHED
|
131 TTM_PL_FLAG_VRAM
| TTM_PL_FLAG_TOPDOWN
;
133 placements
[c
].fpfn
= 0;
134 placements
[c
++].flags
= TTM_PL_FLAG_WC
| TTM_PL_FLAG_UNCACHED
|
136 if (!(flags
& AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
))
137 placements
[c
- 1].flags
|= TTM_PL_FLAG_TOPDOWN
;
140 if (domain
& AMDGPU_GEM_DOMAIN_GTT
) {
141 if (flags
& AMDGPU_GEM_CREATE_CPU_GTT_USWC
) {
142 placements
[c
].fpfn
= 0;
143 placements
[c
++].flags
= TTM_PL_FLAG_WC
| TTM_PL_FLAG_TT
|
144 TTM_PL_FLAG_UNCACHED
;
146 placements
[c
].fpfn
= 0;
147 placements
[c
++].flags
= TTM_PL_FLAG_CACHED
| TTM_PL_FLAG_TT
;
151 if (domain
& AMDGPU_GEM_DOMAIN_CPU
) {
152 if (flags
& AMDGPU_GEM_CREATE_CPU_GTT_USWC
) {
153 placements
[c
].fpfn
= 0;
154 placements
[c
++].flags
= TTM_PL_FLAG_WC
| TTM_PL_FLAG_SYSTEM
|
155 TTM_PL_FLAG_UNCACHED
;
157 placements
[c
].fpfn
= 0;
158 placements
[c
++].flags
= TTM_PL_FLAG_CACHED
| TTM_PL_FLAG_SYSTEM
;
162 if (domain
& AMDGPU_GEM_DOMAIN_GDS
) {
163 placements
[c
].fpfn
= 0;
164 placements
[c
++].flags
= TTM_PL_FLAG_UNCACHED
|
167 if (domain
& AMDGPU_GEM_DOMAIN_GWS
) {
168 placements
[c
].fpfn
= 0;
169 placements
[c
++].flags
= TTM_PL_FLAG_UNCACHED
|
172 if (domain
& AMDGPU_GEM_DOMAIN_OA
) {
173 placements
[c
].fpfn
= 0;
174 placements
[c
++].flags
= TTM_PL_FLAG_UNCACHED
|
179 placements
[c
].fpfn
= 0;
180 placements
[c
++].flags
= TTM_PL_MASK_CACHING
|
183 placement
->num_placement
= c
;
184 placement
->num_busy_placement
= c
;
186 for (i
= 0; i
< c
; i
++) {
187 if ((flags
& AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
) &&
188 (placements
[i
].flags
& TTM_PL_FLAG_VRAM
) &&
191 adev
->mc
.visible_vram_size
>> PAGE_SHIFT
;
193 placements
[i
].lpfn
= 0;
197 void amdgpu_ttm_placement_from_domain(struct amdgpu_bo
*rbo
, u32 domain
)
199 amdgpu_ttm_placement_init(rbo
->adev
, &rbo
->placement
,
200 rbo
->placements
, domain
, rbo
->flags
);
203 static void amdgpu_fill_placement_to_bo(struct amdgpu_bo
*bo
,
204 struct ttm_placement
*placement
)
206 BUG_ON(placement
->num_placement
> (AMDGPU_GEM_DOMAIN_MAX
+ 1));
208 memcpy(bo
->placements
, placement
->placement
,
209 placement
->num_placement
* sizeof(struct ttm_place
));
210 bo
->placement
.num_placement
= placement
->num_placement
;
211 bo
->placement
.num_busy_placement
= placement
->num_busy_placement
;
212 bo
->placement
.placement
= bo
->placements
;
213 bo
->placement
.busy_placement
= bo
->placements
;
216 int amdgpu_bo_create_restricted(struct amdgpu_device
*adev
,
217 unsigned long size
, int byte_align
,
218 bool kernel
, u32 domain
, u64 flags
,
220 struct ttm_placement
*placement
,
221 struct reservation_object
*resv
,
222 struct amdgpu_bo
**bo_ptr
)
224 struct amdgpu_bo
*bo
;
225 enum ttm_bo_type type
;
226 unsigned long page_align
;
230 page_align
= roundup(byte_align
, PAGE_SIZE
) >> PAGE_SHIFT
;
231 size
= ALIGN(size
, PAGE_SIZE
);
234 type
= ttm_bo_type_kernel
;
236 type
= ttm_bo_type_sg
;
238 type
= ttm_bo_type_device
;
242 acc_size
= ttm_bo_dma_acc_size(&adev
->mman
.bdev
, size
,
243 sizeof(struct amdgpu_bo
));
245 bo
= kzalloc(sizeof(struct amdgpu_bo
), GFP_KERNEL
);
248 r
= drm_gem_object_init(adev
->ddev
, &bo
->gem_base
, size
);
254 INIT_LIST_HEAD(&bo
->list
);
255 INIT_LIST_HEAD(&bo
->va
);
256 bo
->initial_domain
= domain
& (AMDGPU_GEM_DOMAIN_VRAM
|
257 AMDGPU_GEM_DOMAIN_GTT
|
258 AMDGPU_GEM_DOMAIN_CPU
|
259 AMDGPU_GEM_DOMAIN_GDS
|
260 AMDGPU_GEM_DOMAIN_GWS
|
261 AMDGPU_GEM_DOMAIN_OA
);
264 amdgpu_fill_placement_to_bo(bo
, placement
);
265 /* Kernel allocation are uninterruptible */
266 r
= ttm_bo_init(&adev
->mman
.bdev
, &bo
->tbo
, size
, type
,
267 &bo
->placement
, page_align
, !kernel
, NULL
,
268 acc_size
, sg
, resv
, &amdgpu_ttm_bo_destroy
);
269 if (unlikely(r
!= 0)) {
274 trace_amdgpu_bo_create(bo
);
279 int amdgpu_bo_create(struct amdgpu_device
*adev
,
280 unsigned long size
, int byte_align
,
281 bool kernel
, u32 domain
, u64 flags
,
283 struct reservation_object
*resv
,
284 struct amdgpu_bo
**bo_ptr
)
286 struct ttm_placement placement
= {0};
287 struct ttm_place placements
[AMDGPU_GEM_DOMAIN_MAX
+ 1];
289 memset(&placements
, 0,
290 (AMDGPU_GEM_DOMAIN_MAX
+ 1) * sizeof(struct ttm_place
));
292 amdgpu_ttm_placement_init(adev
, &placement
,
293 placements
, domain
, flags
);
295 return amdgpu_bo_create_restricted(adev
, size
, byte_align
, kernel
,
296 domain
, flags
, sg
, &placement
,
300 int amdgpu_bo_kmap(struct amdgpu_bo
*bo
, void **ptr
)
305 if (bo
->flags
& AMDGPU_GEM_CREATE_NO_CPU_ACCESS
)
314 r
= ttm_bo_kmap(&bo
->tbo
, 0, bo
->tbo
.num_pages
, &bo
->kmap
);
318 bo
->kptr
= ttm_kmap_obj_virtual(&bo
->kmap
, &is_iomem
);
325 void amdgpu_bo_kunmap(struct amdgpu_bo
*bo
)
327 if (bo
->kptr
== NULL
)
330 ttm_bo_kunmap(&bo
->kmap
);
333 struct amdgpu_bo
*amdgpu_bo_ref(struct amdgpu_bo
*bo
)
338 ttm_bo_reference(&bo
->tbo
);
342 void amdgpu_bo_unref(struct amdgpu_bo
**bo
)
344 struct ttm_buffer_object
*tbo
;
355 int amdgpu_bo_pin_restricted(struct amdgpu_bo
*bo
, u32 domain
,
356 u64 min_offset
, u64 max_offset
,
362 if (amdgpu_ttm_tt_has_userptr(bo
->tbo
.ttm
))
365 if (WARN_ON_ONCE(min_offset
> max_offset
))
371 *gpu_addr
= amdgpu_bo_gpu_offset(bo
);
373 if (max_offset
!= 0) {
375 if (domain
== AMDGPU_GEM_DOMAIN_VRAM
)
376 domain_start
= bo
->adev
->mc
.vram_start
;
378 domain_start
= bo
->adev
->mc
.gtt_start
;
379 WARN_ON_ONCE(max_offset
<
380 (amdgpu_bo_gpu_offset(bo
) - domain_start
));
385 amdgpu_ttm_placement_from_domain(bo
, domain
);
386 for (i
= 0; i
< bo
->placement
.num_placement
; i
++) {
387 /* force to pin into visible video ram */
388 if ((bo
->placements
[i
].flags
& TTM_PL_FLAG_VRAM
) &&
389 !(bo
->flags
& AMDGPU_GEM_CREATE_NO_CPU_ACCESS
) &&
390 (!max_offset
|| max_offset
> bo
->adev
->mc
.visible_vram_size
)) {
391 if (WARN_ON_ONCE(min_offset
>
392 bo
->adev
->mc
.visible_vram_size
))
394 fpfn
= min_offset
>> PAGE_SHIFT
;
395 lpfn
= bo
->adev
->mc
.visible_vram_size
>> PAGE_SHIFT
;
397 fpfn
= min_offset
>> PAGE_SHIFT
;
398 lpfn
= max_offset
>> PAGE_SHIFT
;
400 if (fpfn
> bo
->placements
[i
].fpfn
)
401 bo
->placements
[i
].fpfn
= fpfn
;
402 if (lpfn
&& lpfn
< bo
->placements
[i
].lpfn
)
403 bo
->placements
[i
].lpfn
= lpfn
;
404 bo
->placements
[i
].flags
|= TTM_PL_FLAG_NO_EVICT
;
407 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, false, false);
408 if (likely(r
== 0)) {
410 if (gpu_addr
!= NULL
)
411 *gpu_addr
= amdgpu_bo_gpu_offset(bo
);
412 if (domain
== AMDGPU_GEM_DOMAIN_VRAM
)
413 bo
->adev
->vram_pin_size
+= amdgpu_bo_size(bo
);
415 bo
->adev
->gart_pin_size
+= amdgpu_bo_size(bo
);
417 dev_err(bo
->adev
->dev
, "%p pin failed\n", bo
);
422 int amdgpu_bo_pin(struct amdgpu_bo
*bo
, u32 domain
, u64
*gpu_addr
)
424 return amdgpu_bo_pin_restricted(bo
, domain
, 0, 0, gpu_addr
);
427 int amdgpu_bo_unpin(struct amdgpu_bo
*bo
)
431 if (!bo
->pin_count
) {
432 dev_warn(bo
->adev
->dev
, "%p unpin not necessary\n", bo
);
438 for (i
= 0; i
< bo
->placement
.num_placement
; i
++) {
439 bo
->placements
[i
].lpfn
= 0;
440 bo
->placements
[i
].flags
&= ~TTM_PL_FLAG_NO_EVICT
;
442 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, false, false);
443 if (likely(r
== 0)) {
444 if (bo
->tbo
.mem
.mem_type
== TTM_PL_VRAM
)
445 bo
->adev
->vram_pin_size
-= amdgpu_bo_size(bo
);
447 bo
->adev
->gart_pin_size
-= amdgpu_bo_size(bo
);
449 dev_err(bo
->adev
->dev
, "%p validate failed for unpin\n", bo
);
454 int amdgpu_bo_evict_vram(struct amdgpu_device
*adev
)
456 /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
457 if (0 && (adev
->flags
& AMD_IS_APU
)) {
458 /* Useless to evict on IGP chips */
461 return ttm_bo_evict_mm(&adev
->mman
.bdev
, TTM_PL_VRAM
);
464 void amdgpu_bo_force_delete(struct amdgpu_device
*adev
)
466 struct amdgpu_bo
*bo
, *n
;
468 if (list_empty(&adev
->gem
.objects
)) {
471 dev_err(adev
->dev
, "Userspace still has active objects !\n");
472 list_for_each_entry_safe(bo
, n
, &adev
->gem
.objects
, list
) {
473 dev_err(adev
->dev
, "%p %p %lu %lu force free\n",
474 &bo
->gem_base
, bo
, (unsigned long)bo
->gem_base
.size
,
475 *((unsigned long *)&bo
->gem_base
.refcount
));
476 mutex_lock(&bo
->adev
->gem
.mutex
);
477 list_del_init(&bo
->list
);
478 mutex_unlock(&bo
->adev
->gem
.mutex
);
479 /* this should unref the ttm bo */
480 drm_gem_object_unreference_unlocked(&bo
->gem_base
);
484 int amdgpu_bo_init(struct amdgpu_device
*adev
)
486 /* Add an MTRR for the VRAM */
487 adev
->mc
.vram_mtrr
= arch_phys_wc_add(adev
->mc
.aper_base
,
489 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
490 adev
->mc
.mc_vram_size
>> 20,
491 (unsigned long long)adev
->mc
.aper_size
>> 20);
492 DRM_INFO("RAM width %dbits DDR\n",
493 adev
->mc
.vram_width
);
494 return amdgpu_ttm_init(adev
);
497 void amdgpu_bo_fini(struct amdgpu_device
*adev
)
499 amdgpu_ttm_fini(adev
);
500 arch_phys_wc_del(adev
->mc
.vram_mtrr
);
503 int amdgpu_bo_fbdev_mmap(struct amdgpu_bo
*bo
,
504 struct vm_area_struct
*vma
)
506 return ttm_fbdev_mmap(vma
, &bo
->tbo
);
509 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo
*bo
, u64 tiling_flags
)
511 if (AMDGPU_TILING_GET(tiling_flags
, TILE_SPLIT
) > 6)
514 bo
->tiling_flags
= tiling_flags
;
518 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo
*bo
, u64
*tiling_flags
)
520 lockdep_assert_held(&bo
->tbo
.resv
->lock
.base
);
523 *tiling_flags
= bo
->tiling_flags
;
526 int amdgpu_bo_set_metadata (struct amdgpu_bo
*bo
, void *metadata
,
527 uint32_t metadata_size
, uint64_t flags
)
531 if (!metadata_size
) {
532 if (bo
->metadata_size
) {
534 bo
->metadata_size
= 0;
539 if (metadata
== NULL
)
542 buffer
= kmemdup(metadata
, metadata_size
, GFP_KERNEL
);
547 bo
->metadata_flags
= flags
;
548 bo
->metadata
= buffer
;
549 bo
->metadata_size
= metadata_size
;
554 int amdgpu_bo_get_metadata(struct amdgpu_bo
*bo
, void *buffer
,
555 size_t buffer_size
, uint32_t *metadata_size
,
558 if (!buffer
&& !metadata_size
)
562 if (buffer_size
< bo
->metadata_size
)
565 if (bo
->metadata_size
)
566 memcpy(buffer
, bo
->metadata
, bo
->metadata_size
);
570 *metadata_size
= bo
->metadata_size
;
572 *flags
= bo
->metadata_flags
;
577 void amdgpu_bo_move_notify(struct ttm_buffer_object
*bo
,
578 struct ttm_mem_reg
*new_mem
)
580 struct amdgpu_bo
*rbo
;
582 if (!amdgpu_ttm_bo_is_amdgpu_bo(bo
))
585 rbo
= container_of(bo
, struct amdgpu_bo
, tbo
);
586 amdgpu_vm_bo_invalidate(rbo
->adev
, rbo
);
588 /* update statistics */
592 /* move_notify is called before move happens */
593 amdgpu_update_memory_usage(rbo
->adev
, &bo
->mem
, new_mem
);
596 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object
*bo
)
598 struct amdgpu_device
*adev
;
599 struct amdgpu_bo
*abo
;
600 unsigned long offset
, size
, lpfn
;
603 if (!amdgpu_ttm_bo_is_amdgpu_bo(bo
))
606 abo
= container_of(bo
, struct amdgpu_bo
, tbo
);
608 if (bo
->mem
.mem_type
!= TTM_PL_VRAM
)
611 size
= bo
->mem
.num_pages
<< PAGE_SHIFT
;
612 offset
= bo
->mem
.start
<< PAGE_SHIFT
;
613 if ((offset
+ size
) <= adev
->mc
.visible_vram_size
)
616 /* hurrah the memory is not visible ! */
617 amdgpu_ttm_placement_from_domain(abo
, AMDGPU_GEM_DOMAIN_VRAM
);
618 lpfn
= adev
->mc
.visible_vram_size
>> PAGE_SHIFT
;
619 for (i
= 0; i
< abo
->placement
.num_placement
; i
++) {
620 /* Force into visible VRAM */
621 if ((abo
->placements
[i
].flags
& TTM_PL_FLAG_VRAM
) &&
622 (!abo
->placements
[i
].lpfn
|| abo
->placements
[i
].lpfn
> lpfn
))
623 abo
->placements
[i
].lpfn
= lpfn
;
625 r
= ttm_bo_validate(bo
, &abo
->placement
, false, false);
626 if (unlikely(r
== -ENOMEM
)) {
627 amdgpu_ttm_placement_from_domain(abo
, AMDGPU_GEM_DOMAIN_GTT
);
628 return ttm_bo_validate(bo
, &abo
->placement
, false, false);
629 } else if (unlikely(r
!= 0)) {
633 offset
= bo
->mem
.start
<< PAGE_SHIFT
;
634 /* this should never happen */
635 if ((offset
+ size
) > adev
->mc
.visible_vram_size
)
642 * amdgpu_bo_fence - add fence to buffer object
644 * @bo: buffer object in question
645 * @fence: fence to add
646 * @shared: true if fence should be added shared
649 void amdgpu_bo_fence(struct amdgpu_bo
*bo
, struct fence
*fence
,
652 struct reservation_object
*resv
= bo
->tbo
.resv
;
655 reservation_object_add_shared_fence(resv
, fence
);
657 reservation_object_add_excl_fence(resv
, fence
);