2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #ifndef __AMDGPU_OBJECT_H__
29 #define __AMDGPU_OBJECT_H__
31 #include <drm/amdgpu_drm.h>
34 #define AMDGPU_BO_INVALID_OFFSET LONG_MAX
36 /* bo virtual addresses in a vm */
37 struct amdgpu_bo_va_mapping
{
38 struct amdgpu_bo_va
*bo_va
;
39 struct list_head list
;
43 uint64_t __subtree_last
;
48 /* User space allocated BO in a VM */
50 struct amdgpu_vm_bo_base base
;
52 /* protected by bo being reserved */
55 /* all other members protected by the VM PD being reserved */
56 struct dma_fence
*last_pt_update
;
58 /* mappings for this bo_va */
59 struct list_head invalids
;
60 struct list_head valids
;
62 /* If the mappings are cleared or filled */
67 /* Protected by tbo.reserved */
68 u32 preferred_domains
;
70 struct ttm_place placements
[AMDGPU_GEM_DOMAIN_MAX
+ 1];
71 struct ttm_placement placement
;
72 struct ttm_buffer_object tbo
;
73 struct ttm_bo_kmap_obj kmap
;
80 unsigned prime_shared_count
;
81 /* list of all virtual address to which this bo is associated to */
83 /* Constant after initialization */
84 struct drm_gem_object gem_base
;
85 struct amdgpu_bo
*parent
;
86 struct amdgpu_bo
*shadow
;
88 struct ttm_bo_kmap_obj dma_buf_vmap
;
92 struct list_head mn_list
;
93 struct list_head shadow_list
;
97 static inline struct amdgpu_bo
*ttm_to_amdgpu_bo(struct ttm_buffer_object
*tbo
)
99 return container_of(tbo
, struct amdgpu_bo
, tbo
);
103 * amdgpu_mem_type_to_domain - return domain corresponding to mem_type
104 * @mem_type: ttm memory type
106 * Returns corresponding domain of the ttm mem_type
108 static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type
)
112 return AMDGPU_GEM_DOMAIN_VRAM
;
114 return AMDGPU_GEM_DOMAIN_GTT
;
116 return AMDGPU_GEM_DOMAIN_CPU
;
118 return AMDGPU_GEM_DOMAIN_GDS
;
120 return AMDGPU_GEM_DOMAIN_GWS
;
122 return AMDGPU_GEM_DOMAIN_OA
;
130 * amdgpu_bo_reserve - reserve bo
132 * @no_intr: don't return -ERESTARTSYS on pending signal
135 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
136 * a signal. Release all buffer reservations and return to user-space.
138 static inline int amdgpu_bo_reserve(struct amdgpu_bo
*bo
, bool no_intr
)
140 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->tbo
.bdev
);
143 r
= ttm_bo_reserve(&bo
->tbo
, !no_intr
, false, NULL
);
144 if (unlikely(r
!= 0)) {
145 if (r
!= -ERESTARTSYS
)
146 dev_err(adev
->dev
, "%p reserve failed\n", bo
);
152 static inline void amdgpu_bo_unreserve(struct amdgpu_bo
*bo
)
154 ttm_bo_unreserve(&bo
->tbo
);
157 static inline unsigned long amdgpu_bo_size(struct amdgpu_bo
*bo
)
159 return bo
->tbo
.num_pages
<< PAGE_SHIFT
;
162 static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo
*bo
)
164 return (bo
->tbo
.num_pages
<< PAGE_SHIFT
) / AMDGPU_GPU_PAGE_SIZE
;
167 static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo
*bo
)
169 return (bo
->tbo
.mem
.page_alignment
<< PAGE_SHIFT
) / AMDGPU_GPU_PAGE_SIZE
;
173 * amdgpu_bo_mmap_offset - return mmap offset of bo
174 * @bo: amdgpu object for which we query the offset
176 * Returns mmap offset of the object.
178 static inline u64
amdgpu_bo_mmap_offset(struct amdgpu_bo
*bo
)
180 return drm_vma_node_offset_addr(&bo
->tbo
.vma_node
);
184 * amdgpu_bo_gpu_accessible - return whether the bo is currently in memory that
185 * is accessible to the GPU.
187 static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo
*bo
)
189 switch (bo
->tbo
.mem
.mem_type
) {
190 case TTM_PL_TT
: return amdgpu_gtt_mgr_has_gart_addr(&bo
->tbo
.mem
);
191 case TTM_PL_VRAM
: return true;
192 default: return false;
197 * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
199 static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo
*bo
)
201 return bo
->flags
& AMDGPU_GEM_CREATE_EXPLICIT_SYNC
;
204 int amdgpu_bo_create(struct amdgpu_device
*adev
,
205 unsigned long size
, int byte_align
,
206 bool kernel
, u32 domain
, u64 flags
,
208 struct reservation_object
*resv
,
210 struct amdgpu_bo
**bo_ptr
);
211 int amdgpu_bo_create_reserved(struct amdgpu_device
*adev
,
212 unsigned long size
, int align
,
213 u32 domain
, struct amdgpu_bo
**bo_ptr
,
214 u64
*gpu_addr
, void **cpu_addr
);
215 int amdgpu_bo_create_kernel(struct amdgpu_device
*adev
,
216 unsigned long size
, int align
,
217 u32 domain
, struct amdgpu_bo
**bo_ptr
,
218 u64
*gpu_addr
, void **cpu_addr
);
219 void amdgpu_bo_free_kernel(struct amdgpu_bo
**bo
, u64
*gpu_addr
,
221 int amdgpu_bo_kmap(struct amdgpu_bo
*bo
, void **ptr
);
222 void *amdgpu_bo_kptr(struct amdgpu_bo
*bo
);
223 void amdgpu_bo_kunmap(struct amdgpu_bo
*bo
);
224 struct amdgpu_bo
*amdgpu_bo_ref(struct amdgpu_bo
*bo
);
225 void amdgpu_bo_unref(struct amdgpu_bo
**bo
);
226 int amdgpu_bo_pin(struct amdgpu_bo
*bo
, u32 domain
, u64
*gpu_addr
);
227 int amdgpu_bo_pin_restricted(struct amdgpu_bo
*bo
, u32 domain
,
228 u64 min_offset
, u64 max_offset
,
230 int amdgpu_bo_unpin(struct amdgpu_bo
*bo
);
231 int amdgpu_bo_evict_vram(struct amdgpu_device
*adev
);
232 int amdgpu_bo_init(struct amdgpu_device
*adev
);
233 void amdgpu_bo_fini(struct amdgpu_device
*adev
);
234 int amdgpu_bo_fbdev_mmap(struct amdgpu_bo
*bo
,
235 struct vm_area_struct
*vma
);
236 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo
*bo
, u64 tiling_flags
);
237 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo
*bo
, u64
*tiling_flags
);
238 int amdgpu_bo_set_metadata (struct amdgpu_bo
*bo
, void *metadata
,
239 uint32_t metadata_size
, uint64_t flags
);
240 int amdgpu_bo_get_metadata(struct amdgpu_bo
*bo
, void *buffer
,
241 size_t buffer_size
, uint32_t *metadata_size
,
243 void amdgpu_bo_move_notify(struct ttm_buffer_object
*bo
,
245 struct ttm_mem_reg
*new_mem
);
246 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object
*bo
);
247 void amdgpu_bo_fence(struct amdgpu_bo
*bo
, struct dma_fence
*fence
,
249 u64
amdgpu_bo_gpu_offset(struct amdgpu_bo
*bo
);
250 int amdgpu_bo_backup_to_shadow(struct amdgpu_device
*adev
,
251 struct amdgpu_ring
*ring
,
252 struct amdgpu_bo
*bo
,
253 struct reservation_object
*resv
,
254 struct dma_fence
**fence
, bool direct
);
255 int amdgpu_bo_validate(struct amdgpu_bo
*bo
);
256 int amdgpu_bo_restore_from_shadow(struct amdgpu_device
*adev
,
257 struct amdgpu_ring
*ring
,
258 struct amdgpu_bo
*bo
,
259 struct reservation_object
*resv
,
260 struct dma_fence
**fence
,
268 static inline uint64_t amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo
*sa_bo
)
270 return sa_bo
->manager
->gpu_addr
+ sa_bo
->soffset
;
273 static inline void * amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo
*sa_bo
)
275 return sa_bo
->manager
->cpu_ptr
+ sa_bo
->soffset
;
278 int amdgpu_sa_bo_manager_init(struct amdgpu_device
*adev
,
279 struct amdgpu_sa_manager
*sa_manager
,
280 unsigned size
, u32 align
, u32 domain
);
281 void amdgpu_sa_bo_manager_fini(struct amdgpu_device
*adev
,
282 struct amdgpu_sa_manager
*sa_manager
);
283 int amdgpu_sa_bo_manager_start(struct amdgpu_device
*adev
,
284 struct amdgpu_sa_manager
*sa_manager
);
285 int amdgpu_sa_bo_manager_suspend(struct amdgpu_device
*adev
,
286 struct amdgpu_sa_manager
*sa_manager
);
287 int amdgpu_sa_bo_new(struct amdgpu_sa_manager
*sa_manager
,
288 struct amdgpu_sa_bo
**sa_bo
,
289 unsigned size
, unsigned align
);
290 void amdgpu_sa_bo_free(struct amdgpu_device
*adev
,
291 struct amdgpu_sa_bo
**sa_bo
,
292 struct dma_fence
*fence
);
293 #if defined(CONFIG_DEBUG_FS)
294 void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager
*sa_manager
,