treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vram_mgr.c
blob82a3299e53c042f6c8a5f8e688e0f36a15ca60e4
1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Christian König
25 #include "amdgpu.h"
26 #include "amdgpu_vm.h"
27 #include "amdgpu_atomfirmware.h"
28 #include "atom.h"
30 struct amdgpu_vram_mgr {
31 struct drm_mm mm;
32 spinlock_t lock;
33 atomic64_t usage;
34 atomic64_t vis_usage;
37 /**
38 * DOC: mem_info_vram_total
40 * The amdgpu driver provides a sysfs API for reporting current total VRAM
41 * available on the device
42 * The file mem_info_vram_total is used for this and returns the total
43 * amount of VRAM in bytes
45 static ssize_t amdgpu_mem_info_vram_total_show(struct device *dev,
46 struct device_attribute *attr, char *buf)
48 struct drm_device *ddev = dev_get_drvdata(dev);
49 struct amdgpu_device *adev = ddev->dev_private;
51 return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.real_vram_size);
54 /**
55 * DOC: mem_info_vis_vram_total
57 * The amdgpu driver provides a sysfs API for reporting current total
58 * visible VRAM available on the device
59 * The file mem_info_vis_vram_total is used for this and returns the total
60 * amount of visible VRAM in bytes
62 static ssize_t amdgpu_mem_info_vis_vram_total_show(struct device *dev,
63 struct device_attribute *attr, char *buf)
65 struct drm_device *ddev = dev_get_drvdata(dev);
66 struct amdgpu_device *adev = ddev->dev_private;
68 return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.visible_vram_size);
71 /**
72 * DOC: mem_info_vram_used
74 * The amdgpu driver provides a sysfs API for reporting current total VRAM
75 * available on the device
76 * The file mem_info_vram_used is used for this and returns the total
77 * amount of currently used VRAM in bytes
79 static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev,
80 struct device_attribute *attr, char *buf)
82 struct drm_device *ddev = dev_get_drvdata(dev);
83 struct amdgpu_device *adev = ddev->dev_private;
85 return snprintf(buf, PAGE_SIZE, "%llu\n",
86 amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]));
89 /**
90 * DOC: mem_info_vis_vram_used
92 * The amdgpu driver provides a sysfs API for reporting current total of
93 * used visible VRAM
94 * The file mem_info_vis_vram_used is used for this and returns the total
95 * amount of currently used visible VRAM in bytes
97 static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev,
98 struct device_attribute *attr, char *buf)
100 struct drm_device *ddev = dev_get_drvdata(dev);
101 struct amdgpu_device *adev = ddev->dev_private;
103 return snprintf(buf, PAGE_SIZE, "%llu\n",
104 amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]));
107 static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev,
108 struct device_attribute *attr,
109 char *buf)
111 struct drm_device *ddev = dev_get_drvdata(dev);
112 struct amdgpu_device *adev = ddev->dev_private;
114 switch (adev->gmc.vram_vendor) {
115 case SAMSUNG:
116 return snprintf(buf, PAGE_SIZE, "samsung\n");
117 case INFINEON:
118 return snprintf(buf, PAGE_SIZE, "infineon\n");
119 case ELPIDA:
120 return snprintf(buf, PAGE_SIZE, "elpida\n");
121 case ETRON:
122 return snprintf(buf, PAGE_SIZE, "etron\n");
123 case NANYA:
124 return snprintf(buf, PAGE_SIZE, "nanya\n");
125 case HYNIX:
126 return snprintf(buf, PAGE_SIZE, "hynix\n");
127 case MOSEL:
128 return snprintf(buf, PAGE_SIZE, "mosel\n");
129 case WINBOND:
130 return snprintf(buf, PAGE_SIZE, "winbond\n");
131 case ESMT:
132 return snprintf(buf, PAGE_SIZE, "esmt\n");
133 case MICRON:
134 return snprintf(buf, PAGE_SIZE, "micron\n");
135 default:
136 return snprintf(buf, PAGE_SIZE, "unknown\n");
140 static DEVICE_ATTR(mem_info_vram_total, S_IRUGO,
141 amdgpu_mem_info_vram_total_show, NULL);
142 static DEVICE_ATTR(mem_info_vis_vram_total, S_IRUGO,
143 amdgpu_mem_info_vis_vram_total_show,NULL);
144 static DEVICE_ATTR(mem_info_vram_used, S_IRUGO,
145 amdgpu_mem_info_vram_used_show, NULL);
146 static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO,
147 amdgpu_mem_info_vis_vram_used_show, NULL);
148 static DEVICE_ATTR(mem_info_vram_vendor, S_IRUGO,
149 amdgpu_mem_info_vram_vendor, NULL);
152 * amdgpu_vram_mgr_init - init VRAM manager and DRM MM
154 * @man: TTM memory type manager
155 * @p_size: maximum size of VRAM
157 * Allocate and initialize the VRAM manager.
159 static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
160 unsigned long p_size)
162 struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
163 struct amdgpu_vram_mgr *mgr;
164 int ret;
166 mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
167 if (!mgr)
168 return -ENOMEM;
170 drm_mm_init(&mgr->mm, 0, p_size);
171 spin_lock_init(&mgr->lock);
172 man->priv = mgr;
174 /* Add the two VRAM-related sysfs files */
175 ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_total);
176 if (ret) {
177 DRM_ERROR("Failed to create device file mem_info_vram_total\n");
178 return ret;
180 ret = device_create_file(adev->dev, &dev_attr_mem_info_vis_vram_total);
181 if (ret) {
182 DRM_ERROR("Failed to create device file mem_info_vis_vram_total\n");
183 return ret;
185 ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_used);
186 if (ret) {
187 DRM_ERROR("Failed to create device file mem_info_vram_used\n");
188 return ret;
190 ret = device_create_file(adev->dev, &dev_attr_mem_info_vis_vram_used);
191 if (ret) {
192 DRM_ERROR("Failed to create device file mem_info_vis_vram_used\n");
193 return ret;
195 ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_vendor);
196 if (ret) {
197 DRM_ERROR("Failed to create device file mem_info_vram_vendor\n");
198 return ret;
201 return 0;
205 * amdgpu_vram_mgr_fini - free and destroy VRAM manager
207 * @man: TTM memory type manager
209 * Destroy and free the VRAM manager, returns -EBUSY if ranges are still
210 * allocated inside it.
212 static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
214 struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
215 struct amdgpu_vram_mgr *mgr = man->priv;
217 spin_lock(&mgr->lock);
218 drm_mm_takedown(&mgr->mm);
219 spin_unlock(&mgr->lock);
220 kfree(mgr);
221 man->priv = NULL;
222 device_remove_file(adev->dev, &dev_attr_mem_info_vram_total);
223 device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_total);
224 device_remove_file(adev->dev, &dev_attr_mem_info_vram_used);
225 device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_used);
226 device_remove_file(adev->dev, &dev_attr_mem_info_vram_vendor);
227 return 0;
231 * amdgpu_vram_mgr_vis_size - Calculate visible node size
233 * @adev: amdgpu device structure
234 * @node: MM node structure
236 * Calculate how many bytes of the MM node are inside visible VRAM
238 static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
239 struct drm_mm_node *node)
241 uint64_t start = node->start << PAGE_SHIFT;
242 uint64_t end = (node->size + node->start) << PAGE_SHIFT;
244 if (start >= adev->gmc.visible_vram_size)
245 return 0;
247 return (end > adev->gmc.visible_vram_size ?
248 adev->gmc.visible_vram_size : end) - start;
252 * amdgpu_vram_mgr_bo_visible_size - CPU visible BO size
254 * @bo: &amdgpu_bo buffer object (must be in VRAM)
256 * Returns:
257 * How much of the given &amdgpu_bo buffer object lies in CPU visible VRAM.
259 u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
261 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
262 struct ttm_mem_reg *mem = &bo->tbo.mem;
263 struct drm_mm_node *nodes = mem->mm_node;
264 unsigned pages = mem->num_pages;
265 u64 usage;
267 if (amdgpu_gmc_vram_full_visible(&adev->gmc))
268 return amdgpu_bo_size(bo);
270 if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
271 return 0;
273 for (usage = 0; nodes && pages; pages -= nodes->size, nodes++)
274 usage += amdgpu_vram_mgr_vis_size(adev, nodes);
276 return usage;
280 * amdgpu_vram_mgr_virt_start - update virtual start address
282 * @mem: ttm_mem_reg to update
283 * @node: just allocated node
285 * Calculate a virtual BO start address to easily check if everything is CPU
286 * accessible.
288 static void amdgpu_vram_mgr_virt_start(struct ttm_mem_reg *mem,
289 struct drm_mm_node *node)
291 unsigned long start;
293 start = node->start + node->size;
294 if (start > mem->num_pages)
295 start -= mem->num_pages;
296 else
297 start = 0;
298 mem->start = max(mem->start, start);
302 * amdgpu_vram_mgr_new - allocate new ranges
304 * @man: TTM memory type manager
305 * @tbo: TTM BO we need this range for
306 * @place: placement flags and restrictions
307 * @mem: the resulting mem object
309 * Allocate VRAM for the given BO.
311 static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
312 struct ttm_buffer_object *tbo,
313 const struct ttm_place *place,
314 struct ttm_mem_reg *mem)
316 struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
317 struct amdgpu_vram_mgr *mgr = man->priv;
318 struct drm_mm *mm = &mgr->mm;
319 struct drm_mm_node *nodes;
320 enum drm_mm_insert_mode mode;
321 unsigned long lpfn, num_nodes, pages_per_node, pages_left;
322 uint64_t vis_usage = 0, mem_bytes, max_bytes;
323 unsigned i;
324 int r;
326 lpfn = place->lpfn;
327 if (!lpfn)
328 lpfn = man->size;
330 max_bytes = adev->gmc.mc_vram_size;
331 if (tbo->type != ttm_bo_type_kernel)
332 max_bytes -= AMDGPU_VM_RESERVED_VRAM;
334 /* bail out quickly if there's likely not enough VRAM for this BO */
335 mem_bytes = (u64)mem->num_pages << PAGE_SHIFT;
336 if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) {
337 atomic64_sub(mem_bytes, &mgr->usage);
338 mem->mm_node = NULL;
339 return 0;
342 if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
343 pages_per_node = ~0ul;
344 num_nodes = 1;
345 } else {
346 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
347 pages_per_node = HPAGE_PMD_NR;
348 #else
349 /* default to 2MB */
350 pages_per_node = (2UL << (20UL - PAGE_SHIFT));
351 #endif
352 pages_per_node = max((uint32_t)pages_per_node, mem->page_alignment);
353 num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
356 nodes = kvmalloc_array((uint32_t)num_nodes, sizeof(*nodes),
357 GFP_KERNEL | __GFP_ZERO);
358 if (!nodes) {
359 atomic64_sub(mem_bytes, &mgr->usage);
360 return -ENOMEM;
363 mode = DRM_MM_INSERT_BEST;
364 if (place->flags & TTM_PL_FLAG_TOPDOWN)
365 mode = DRM_MM_INSERT_HIGH;
367 mem->start = 0;
368 pages_left = mem->num_pages;
370 spin_lock(&mgr->lock);
371 for (i = 0; pages_left >= pages_per_node; ++i) {
372 unsigned long pages = rounddown_pow_of_two(pages_left);
374 r = drm_mm_insert_node_in_range(mm, &nodes[i], pages,
375 pages_per_node, 0,
376 place->fpfn, lpfn,
377 mode);
378 if (unlikely(r))
379 break;
381 vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]);
382 amdgpu_vram_mgr_virt_start(mem, &nodes[i]);
383 pages_left -= pages;
386 for (; pages_left; ++i) {
387 unsigned long pages = min(pages_left, pages_per_node);
388 uint32_t alignment = mem->page_alignment;
390 if (pages == pages_per_node)
391 alignment = pages_per_node;
393 r = drm_mm_insert_node_in_range(mm, &nodes[i],
394 pages, alignment, 0,
395 place->fpfn, lpfn,
396 mode);
397 if (unlikely(r))
398 goto error;
400 vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]);
401 amdgpu_vram_mgr_virt_start(mem, &nodes[i]);
402 pages_left -= pages;
404 spin_unlock(&mgr->lock);
406 atomic64_add(vis_usage, &mgr->vis_usage);
408 mem->mm_node = nodes;
410 return 0;
412 error:
413 while (i--)
414 drm_mm_remove_node(&nodes[i]);
415 spin_unlock(&mgr->lock);
416 atomic64_sub(mem->num_pages << PAGE_SHIFT, &mgr->usage);
418 kvfree(nodes);
419 return r == -ENOSPC ? 0 : r;
423 * amdgpu_vram_mgr_del - free ranges
425 * @man: TTM memory type manager
426 * @tbo: TTM BO we need this range for
427 * @place: placement flags and restrictions
428 * @mem: TTM memory object
430 * Free the allocated VRAM again.
432 static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
433 struct ttm_mem_reg *mem)
435 struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
436 struct amdgpu_vram_mgr *mgr = man->priv;
437 struct drm_mm_node *nodes = mem->mm_node;
438 uint64_t usage = 0, vis_usage = 0;
439 unsigned pages = mem->num_pages;
441 if (!mem->mm_node)
442 return;
444 spin_lock(&mgr->lock);
445 while (pages) {
446 pages -= nodes->size;
447 drm_mm_remove_node(nodes);
448 usage += nodes->size << PAGE_SHIFT;
449 vis_usage += amdgpu_vram_mgr_vis_size(adev, nodes);
450 ++nodes;
452 spin_unlock(&mgr->lock);
454 atomic64_sub(usage, &mgr->usage);
455 atomic64_sub(vis_usage, &mgr->vis_usage);
457 kvfree(mem->mm_node);
458 mem->mm_node = NULL;
462 * amdgpu_vram_mgr_usage - how many bytes are used in this domain
464 * @man: TTM memory type manager
466 * Returns how many bytes are used in this domain.
468 uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man)
470 struct amdgpu_vram_mgr *mgr = man->priv;
472 return atomic64_read(&mgr->usage);
476 * amdgpu_vram_mgr_vis_usage - how many bytes are used in the visible part
478 * @man: TTM memory type manager
480 * Returns how many bytes are used in the visible part of VRAM
482 uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man)
484 struct amdgpu_vram_mgr *mgr = man->priv;
486 return atomic64_read(&mgr->vis_usage);
490 * amdgpu_vram_mgr_debug - dump VRAM table
492 * @man: TTM memory type manager
493 * @printer: DRM printer to use
495 * Dump the table content using printk.
497 static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man,
498 struct drm_printer *printer)
500 struct amdgpu_vram_mgr *mgr = man->priv;
502 spin_lock(&mgr->lock);
503 drm_mm_print(&mgr->mm, printer);
504 spin_unlock(&mgr->lock);
506 drm_printf(printer, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
507 man->size, amdgpu_vram_mgr_usage(man) >> 20,
508 amdgpu_vram_mgr_vis_usage(man) >> 20);
511 const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = {
512 .init = amdgpu_vram_mgr_init,
513 .takedown = amdgpu_vram_mgr_fini,
514 .get_node = amdgpu_vram_mgr_new,
515 .put_node = amdgpu_vram_mgr_del,
516 .debug = amdgpu_vram_mgr_debug