2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #ifndef __AMDGPU_OBJECT_H__
29 #define __AMDGPU_OBJECT_H__
31 #include <drm/amdgpu_drm.h>
34 #define AMDGPU_BO_INVALID_OFFSET LONG_MAX
35 #define AMDGPU_BO_MAX_PLACEMENTS 3
37 struct amdgpu_bo_param
{
43 enum ttm_bo_type type
;
44 struct reservation_object
*resv
;
47 /* bo virtual addresses in a vm */
48 struct amdgpu_bo_va_mapping
{
49 struct amdgpu_bo_va
*bo_va
;
50 struct list_head list
;
54 uint64_t __subtree_last
;
59 /* User space allocated BO in a VM */
61 struct amdgpu_vm_bo_base base
;
63 /* protected by bo being reserved */
66 /* all other members protected by the VM PD being reserved */
67 struct dma_fence
*last_pt_update
;
69 /* mappings for this bo_va */
70 struct list_head invalids
;
71 struct list_head valids
;
73 /* If the mappings are cleared or filled */
78 /* Protected by tbo.reserved */
79 u32 preferred_domains
;
81 struct ttm_place placements
[AMDGPU_BO_MAX_PLACEMENTS
];
82 struct ttm_placement placement
;
83 struct ttm_buffer_object tbo
;
84 struct ttm_bo_kmap_obj kmap
;
91 unsigned prime_shared_count
;
92 /* list of all virtual address to which this bo is associated to */
94 /* Constant after initialization */
95 struct drm_gem_object gem_base
;
96 struct amdgpu_bo
*parent
;
97 struct amdgpu_bo
*shadow
;
99 struct ttm_bo_kmap_obj dma_buf_vmap
;
100 struct amdgpu_mn
*mn
;
103 struct list_head mn_list
;
104 struct list_head shadow_list
;
107 struct kgd_mem
*kfd_bo
;
110 static inline struct amdgpu_bo
*ttm_to_amdgpu_bo(struct ttm_buffer_object
*tbo
)
112 return container_of(tbo
, struct amdgpu_bo
, tbo
);
116 * amdgpu_mem_type_to_domain - return domain corresponding to mem_type
117 * @mem_type: ttm memory type
119 * Returns corresponding domain of the ttm mem_type
121 static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type
)
125 return AMDGPU_GEM_DOMAIN_VRAM
;
127 return AMDGPU_GEM_DOMAIN_GTT
;
129 return AMDGPU_GEM_DOMAIN_CPU
;
131 return AMDGPU_GEM_DOMAIN_GDS
;
133 return AMDGPU_GEM_DOMAIN_GWS
;
135 return AMDGPU_GEM_DOMAIN_OA
;
143 * amdgpu_bo_reserve - reserve bo
145 * @no_intr: don't return -ERESTARTSYS on pending signal
148 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
149 * a signal. Release all buffer reservations and return to user-space.
151 static inline int amdgpu_bo_reserve(struct amdgpu_bo
*bo
, bool no_intr
)
153 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->tbo
.bdev
);
156 r
= ttm_bo_reserve(&bo
->tbo
, !no_intr
, false, NULL
);
157 if (unlikely(r
!= 0)) {
158 if (r
!= -ERESTARTSYS
)
159 dev_err(adev
->dev
, "%p reserve failed\n", bo
);
165 static inline void amdgpu_bo_unreserve(struct amdgpu_bo
*bo
)
167 ttm_bo_unreserve(&bo
->tbo
);
170 static inline unsigned long amdgpu_bo_size(struct amdgpu_bo
*bo
)
172 return bo
->tbo
.num_pages
<< PAGE_SHIFT
;
175 static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo
*bo
)
177 return (bo
->tbo
.num_pages
<< PAGE_SHIFT
) / AMDGPU_GPU_PAGE_SIZE
;
180 static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo
*bo
)
182 return (bo
->tbo
.mem
.page_alignment
<< PAGE_SHIFT
) / AMDGPU_GPU_PAGE_SIZE
;
186 * amdgpu_bo_mmap_offset - return mmap offset of bo
187 * @bo: amdgpu object for which we query the offset
189 * Returns mmap offset of the object.
191 static inline u64
amdgpu_bo_mmap_offset(struct amdgpu_bo
*bo
)
193 return drm_vma_node_offset_addr(&bo
->tbo
.vma_node
);
197 * amdgpu_bo_gpu_accessible - return whether the bo is currently in memory that
198 * is accessible to the GPU.
200 static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo
*bo
)
202 switch (bo
->tbo
.mem
.mem_type
) {
203 case TTM_PL_TT
: return amdgpu_gtt_mgr_has_gart_addr(&bo
->tbo
.mem
);
204 case TTM_PL_VRAM
: return true;
205 default: return false;
210 * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM
212 static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo
*bo
)
214 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->tbo
.bdev
);
215 unsigned fpfn
= adev
->gmc
.visible_vram_size
>> PAGE_SHIFT
;
216 struct drm_mm_node
*node
= bo
->tbo
.mem
.mm_node
;
217 unsigned long pages_left
;
219 if (bo
->tbo
.mem
.mem_type
!= TTM_PL_VRAM
)
222 for (pages_left
= bo
->tbo
.mem
.num_pages
; pages_left
;
223 pages_left
-= node
->size
, node
++)
224 if (node
->start
< fpfn
)
231 * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
233 static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo
*bo
)
235 return bo
->flags
& AMDGPU_GEM_CREATE_EXPLICIT_SYNC
;
238 bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object
*bo
);
239 void amdgpu_bo_placement_from_domain(struct amdgpu_bo
*abo
, u32 domain
);
241 int amdgpu_bo_create(struct amdgpu_device
*adev
,
242 struct amdgpu_bo_param
*bp
,
243 struct amdgpu_bo
**bo_ptr
);
244 int amdgpu_bo_create_reserved(struct amdgpu_device
*adev
,
245 unsigned long size
, int align
,
246 u32 domain
, struct amdgpu_bo
**bo_ptr
,
247 u64
*gpu_addr
, void **cpu_addr
);
248 int amdgpu_bo_create_kernel(struct amdgpu_device
*adev
,
249 unsigned long size
, int align
,
250 u32 domain
, struct amdgpu_bo
**bo_ptr
,
251 u64
*gpu_addr
, void **cpu_addr
);
252 void amdgpu_bo_free_kernel(struct amdgpu_bo
**bo
, u64
*gpu_addr
,
254 int amdgpu_bo_kmap(struct amdgpu_bo
*bo
, void **ptr
);
255 void *amdgpu_bo_kptr(struct amdgpu_bo
*bo
);
256 void amdgpu_bo_kunmap(struct amdgpu_bo
*bo
);
257 struct amdgpu_bo
*amdgpu_bo_ref(struct amdgpu_bo
*bo
);
258 void amdgpu_bo_unref(struct amdgpu_bo
**bo
);
259 int amdgpu_bo_pin(struct amdgpu_bo
*bo
, u32 domain
);
260 int amdgpu_bo_pin_restricted(struct amdgpu_bo
*bo
, u32 domain
,
261 u64 min_offset
, u64 max_offset
);
262 int amdgpu_bo_unpin(struct amdgpu_bo
*bo
);
263 int amdgpu_bo_evict_vram(struct amdgpu_device
*adev
);
264 int amdgpu_bo_init(struct amdgpu_device
*adev
);
265 int amdgpu_bo_late_init(struct amdgpu_device
*adev
);
266 void amdgpu_bo_fini(struct amdgpu_device
*adev
);
267 int amdgpu_bo_fbdev_mmap(struct amdgpu_bo
*bo
,
268 struct vm_area_struct
*vma
);
269 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo
*bo
, u64 tiling_flags
);
270 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo
*bo
, u64
*tiling_flags
);
271 int amdgpu_bo_set_metadata (struct amdgpu_bo
*bo
, void *metadata
,
272 uint32_t metadata_size
, uint64_t flags
);
273 int amdgpu_bo_get_metadata(struct amdgpu_bo
*bo
, void *buffer
,
274 size_t buffer_size
, uint32_t *metadata_size
,
276 void amdgpu_bo_move_notify(struct ttm_buffer_object
*bo
,
278 struct ttm_mem_reg
*new_mem
);
279 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object
*bo
);
280 void amdgpu_bo_fence(struct amdgpu_bo
*bo
, struct dma_fence
*fence
,
282 u64
amdgpu_bo_gpu_offset(struct amdgpu_bo
*bo
);
283 int amdgpu_bo_backup_to_shadow(struct amdgpu_device
*adev
,
284 struct amdgpu_ring
*ring
,
285 struct amdgpu_bo
*bo
,
286 struct reservation_object
*resv
,
287 struct dma_fence
**fence
, bool direct
);
288 int amdgpu_bo_validate(struct amdgpu_bo
*bo
);
289 int amdgpu_bo_restore_from_shadow(struct amdgpu_device
*adev
,
290 struct amdgpu_ring
*ring
,
291 struct amdgpu_bo
*bo
,
292 struct reservation_object
*resv
,
293 struct dma_fence
**fence
,
295 uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device
*adev
,
302 static inline uint64_t amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo
*sa_bo
)
304 return sa_bo
->manager
->gpu_addr
+ sa_bo
->soffset
;
307 static inline void * amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo
*sa_bo
)
309 return sa_bo
->manager
->cpu_ptr
+ sa_bo
->soffset
;
312 int amdgpu_sa_bo_manager_init(struct amdgpu_device
*adev
,
313 struct amdgpu_sa_manager
*sa_manager
,
314 unsigned size
, u32 align
, u32 domain
);
315 void amdgpu_sa_bo_manager_fini(struct amdgpu_device
*adev
,
316 struct amdgpu_sa_manager
*sa_manager
);
317 int amdgpu_sa_bo_manager_start(struct amdgpu_device
*adev
,
318 struct amdgpu_sa_manager
*sa_manager
);
319 int amdgpu_sa_bo_new(struct amdgpu_sa_manager
*sa_manager
,
320 struct amdgpu_sa_bo
**sa_bo
,
321 unsigned size
, unsigned align
);
322 void amdgpu_sa_bo_free(struct amdgpu_device
*adev
,
323 struct amdgpu_sa_bo
**sa_bo
,
324 struct dma_fence
*fence
);
325 #if defined(CONFIG_DEBUG_FS)
326 void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager
*sa_manager
,