2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Christian König
28 struct amdgpu_gtt_mgr
{
34 struct amdgpu_gtt_node
{
35 struct drm_mm_node node
;
36 struct ttm_buffer_object
*tbo
;
40 * amdgpu_gtt_mgr_init - init GTT manager and DRM MM
42 * @man: TTM memory type manager
43 * @p_size: maximum size of GTT
45 * Allocate and initialize the GTT manager.
47 static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager
*man
,
50 struct amdgpu_device
*adev
= amdgpu_ttm_adev(man
->bdev
);
51 struct amdgpu_gtt_mgr
*mgr
;
54 mgr
= kzalloc(sizeof(*mgr
), GFP_KERNEL
);
58 start
= AMDGPU_GTT_MAX_TRANSFER_SIZE
* AMDGPU_GTT_NUM_TRANSFER_WINDOWS
;
59 size
= (adev
->gmc
.gart_size
>> PAGE_SHIFT
) - start
;
60 drm_mm_init(&mgr
->mm
, start
, size
);
61 spin_lock_init(&mgr
->lock
);
62 atomic64_set(&mgr
->available
, p_size
);
68 * amdgpu_gtt_mgr_fini - free and destroy GTT manager
70 * @man: TTM memory type manager
72 * Destroy and free the GTT manager, returns -EBUSY if ranges are still
73 * allocated inside it.
75 static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager
*man
)
77 struct amdgpu_gtt_mgr
*mgr
= man
->priv
;
78 spin_lock(&mgr
->lock
);
79 drm_mm_takedown(&mgr
->mm
);
80 spin_unlock(&mgr
->lock
);
87 * amdgpu_gtt_mgr_has_gart_addr - Check if mem has address space
89 * @mem: the mem object to check
91 * Check if a mem object has already address space allocated.
93 bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg
*mem
)
95 struct amdgpu_gtt_node
*node
= mem
->mm_node
;
97 return (node
->node
.start
!= AMDGPU_BO_INVALID_OFFSET
);
101 * amdgpu_gtt_mgr_alloc - allocate new ranges
103 * @man: TTM memory type manager
104 * @tbo: TTM BO we need this range for
105 * @place: placement flags and restrictions
106 * @mem: the resulting mem object
108 * Allocate the address space for a node.
110 static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager
*man
,
111 struct ttm_buffer_object
*tbo
,
112 const struct ttm_place
*place
,
113 struct ttm_mem_reg
*mem
)
115 struct amdgpu_device
*adev
= amdgpu_ttm_adev(man
->bdev
);
116 struct amdgpu_gtt_mgr
*mgr
= man
->priv
;
117 struct amdgpu_gtt_node
*node
= mem
->mm_node
;
118 enum drm_mm_insert_mode mode
;
119 unsigned long fpfn
, lpfn
;
122 if (amdgpu_gtt_mgr_has_gart_addr(mem
))
130 if (place
&& place
->lpfn
)
133 lpfn
= adev
->gart
.num_cpu_pages
;
135 mode
= DRM_MM_INSERT_BEST
;
136 if (place
&& place
->flags
& TTM_PL_FLAG_TOPDOWN
)
137 mode
= DRM_MM_INSERT_HIGH
;
139 spin_lock(&mgr
->lock
);
140 r
= drm_mm_insert_node_in_range(&mgr
->mm
, &node
->node
, mem
->num_pages
,
141 mem
->page_alignment
, 0, fpfn
, lpfn
,
143 spin_unlock(&mgr
->lock
);
146 mem
->start
= node
->node
.start
;
152 * amdgpu_gtt_mgr_new - allocate a new node
154 * @man: TTM memory type manager
155 * @tbo: TTM BO we need this range for
156 * @place: placement flags and restrictions
157 * @mem: the resulting mem object
159 * Dummy, allocate the node but no space for it yet.
161 static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager
*man
,
162 struct ttm_buffer_object
*tbo
,
163 const struct ttm_place
*place
,
164 struct ttm_mem_reg
*mem
)
166 struct amdgpu_gtt_mgr
*mgr
= man
->priv
;
167 struct amdgpu_gtt_node
*node
;
170 spin_lock(&mgr
->lock
);
171 if ((&tbo
->mem
== mem
|| tbo
->mem
.mem_type
!= TTM_PL_TT
) &&
172 atomic64_read(&mgr
->available
) < mem
->num_pages
) {
173 spin_unlock(&mgr
->lock
);
176 atomic64_sub(mem
->num_pages
, &mgr
->available
);
177 spin_unlock(&mgr
->lock
);
179 node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
185 node
->node
.start
= AMDGPU_BO_INVALID_OFFSET
;
186 node
->node
.size
= mem
->num_pages
;
190 if (place
->fpfn
|| place
->lpfn
|| place
->flags
& TTM_PL_FLAG_TOPDOWN
) {
191 r
= amdgpu_gtt_mgr_alloc(man
, tbo
, place
, mem
);
199 mem
->start
= node
->node
.start
;
204 atomic64_add(mem
->num_pages
, &mgr
->available
);
210 * amdgpu_gtt_mgr_del - free ranges
212 * @man: TTM memory type manager
213 * @tbo: TTM BO we need this range for
214 * @place: placement flags and restrictions
215 * @mem: TTM memory object
217 * Free the allocated GTT again.
219 static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager
*man
,
220 struct ttm_mem_reg
*mem
)
222 struct amdgpu_gtt_mgr
*mgr
= man
->priv
;
223 struct amdgpu_gtt_node
*node
= mem
->mm_node
;
228 spin_lock(&mgr
->lock
);
229 if (node
->node
.start
!= AMDGPU_BO_INVALID_OFFSET
)
230 drm_mm_remove_node(&node
->node
);
231 spin_unlock(&mgr
->lock
);
232 atomic64_add(mem
->num_pages
, &mgr
->available
);
239 * amdgpu_gtt_mgr_usage - return usage of GTT domain
241 * @man: TTM memory type manager
243 * Return how many bytes are used in the GTT domain
245 uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager
*man
)
247 struct amdgpu_gtt_mgr
*mgr
= man
->priv
;
248 s64 result
= man
->size
- atomic64_read(&mgr
->available
);
250 return (result
> 0 ? result
: 0) * PAGE_SIZE
;
253 int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager
*man
)
255 struct amdgpu_gtt_mgr
*mgr
= man
->priv
;
256 struct amdgpu_gtt_node
*node
;
257 struct drm_mm_node
*mm_node
;
260 spin_lock(&mgr
->lock
);
261 drm_mm_for_each_node(mm_node
, &mgr
->mm
) {
262 node
= container_of(mm_node
, struct amdgpu_gtt_node
, node
);
263 r
= amdgpu_ttm_recover_gart(node
->tbo
);
267 spin_unlock(&mgr
->lock
);
273 * amdgpu_gtt_mgr_debug - dump VRAM table
275 * @man: TTM memory type manager
276 * @printer: DRM printer to use
278 * Dump the table content using printk.
280 static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager
*man
,
281 struct drm_printer
*printer
)
283 struct amdgpu_gtt_mgr
*mgr
= man
->priv
;
285 spin_lock(&mgr
->lock
);
286 drm_mm_print(&mgr
->mm
, printer
);
287 spin_unlock(&mgr
->lock
);
289 drm_printf(printer
, "man size:%llu pages, gtt available:%lld pages, usage:%lluMB\n",
290 man
->size
, (u64
)atomic64_read(&mgr
->available
),
291 amdgpu_gtt_mgr_usage(man
) >> 20);
294 const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func
= {
295 .init
= amdgpu_gtt_mgr_init
,
296 .takedown
= amdgpu_gtt_mgr_fini
,
297 .get_node
= amdgpu_gtt_mgr_new
,
298 .put_node
= amdgpu_gtt_mgr_del
,
299 .debug
= amdgpu_gtt_mgr_debug