Merge tag 'block-5.11-2021-01-10' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / gpu / drm / vmwgfx / vmwgfx_thp.c
blob155ca3a5c7e55400596bec864bb0cc1d0ba5aebc
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3 * Huge page-table-entry support for IO memory.
5 * Copyright (C) 2007-2019 Vmware, Inc. All rights reservedd.
6 */
7 #include "vmwgfx_drv.h"
8 #include <drm/ttm/ttm_module.h>
9 #include <drm/ttm/ttm_bo_driver.h>
10 #include <drm/ttm/ttm_placement.h>
12 /**
13 * struct vmw_thp_manager - Range manager implementing huge page alignment
15 * @mm: The underlying range manager. Protected by @lock.
16 * @lock: Manager lock.
18 struct vmw_thp_manager {
19 struct ttm_resource_manager manager;
20 struct drm_mm mm;
21 spinlock_t lock;
24 static struct vmw_thp_manager *to_thp_manager(struct ttm_resource_manager *man)
26 return container_of(man, struct vmw_thp_manager, manager);
29 static const struct ttm_resource_manager_func vmw_thp_func;
31 static int vmw_thp_insert_aligned(struct drm_mm *mm, struct drm_mm_node *node,
32 unsigned long align_pages,
33 const struct ttm_place *place,
34 struct ttm_resource *mem,
35 unsigned long lpfn,
36 enum drm_mm_insert_mode mode)
38 if (align_pages >= mem->page_alignment &&
39 (!mem->page_alignment || align_pages % mem->page_alignment == 0)) {
40 return drm_mm_insert_node_in_range(mm, node,
41 mem->num_pages,
42 align_pages, 0,
43 place->fpfn, lpfn, mode);
46 return -ENOSPC;
49 static int vmw_thp_get_node(struct ttm_resource_manager *man,
50 struct ttm_buffer_object *bo,
51 const struct ttm_place *place,
52 struct ttm_resource *mem)
54 struct vmw_thp_manager *rman = to_thp_manager(man);
55 struct drm_mm *mm = &rman->mm;
56 struct drm_mm_node *node;
57 unsigned long align_pages;
58 unsigned long lpfn;
59 enum drm_mm_insert_mode mode = DRM_MM_INSERT_BEST;
60 int ret;
62 node = kzalloc(sizeof(*node), GFP_KERNEL);
63 if (!node)
64 return -ENOMEM;
66 lpfn = place->lpfn;
67 if (!lpfn)
68 lpfn = man->size;
70 mode = DRM_MM_INSERT_BEST;
71 if (place->flags & TTM_PL_FLAG_TOPDOWN)
72 mode = DRM_MM_INSERT_HIGH;
74 spin_lock(&rman->lock);
75 if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) {
76 align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT);
77 if (mem->num_pages >= align_pages) {
78 ret = vmw_thp_insert_aligned(mm, node, align_pages,
79 place, mem, lpfn, mode);
80 if (!ret)
81 goto found_unlock;
85 align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT);
86 if (mem->num_pages >= align_pages) {
87 ret = vmw_thp_insert_aligned(mm, node, align_pages, place, mem,
88 lpfn, mode);
89 if (!ret)
90 goto found_unlock;
93 ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
94 mem->page_alignment, 0,
95 place->fpfn, lpfn, mode);
96 found_unlock:
97 spin_unlock(&rman->lock);
99 if (unlikely(ret)) {
100 kfree(node);
101 } else {
102 mem->mm_node = node;
103 mem->start = node->start;
106 return ret;
111 static void vmw_thp_put_node(struct ttm_resource_manager *man,
112 struct ttm_resource *mem)
114 struct vmw_thp_manager *rman = to_thp_manager(man);
116 if (mem->mm_node) {
117 spin_lock(&rman->lock);
118 drm_mm_remove_node(mem->mm_node);
119 spin_unlock(&rman->lock);
121 kfree(mem->mm_node);
122 mem->mm_node = NULL;
126 int vmw_thp_init(struct vmw_private *dev_priv)
128 struct vmw_thp_manager *rman;
130 rman = kzalloc(sizeof(*rman), GFP_KERNEL);
131 if (!rman)
132 return -ENOMEM;
134 ttm_resource_manager_init(&rman->manager,
135 dev_priv->vram_size >> PAGE_SHIFT);
137 rman->manager.func = &vmw_thp_func;
138 drm_mm_init(&rman->mm, 0, rman->manager.size);
139 spin_lock_init(&rman->lock);
141 ttm_set_driver_manager(&dev_priv->bdev, TTM_PL_VRAM, &rman->manager);
142 ttm_resource_manager_set_used(&rman->manager, true);
143 return 0;
146 void vmw_thp_fini(struct vmw_private *dev_priv)
148 struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
149 struct vmw_thp_manager *rman = to_thp_manager(man);
150 struct drm_mm *mm = &rman->mm;
151 int ret;
153 ttm_resource_manager_set_used(man, false);
155 ret = ttm_resource_manager_evict_all(&dev_priv->bdev, man);
156 if (ret)
157 return;
158 spin_lock(&rman->lock);
159 drm_mm_clean(mm);
160 drm_mm_takedown(mm);
161 spin_unlock(&rman->lock);
162 ttm_resource_manager_cleanup(man);
163 ttm_set_driver_manager(&dev_priv->bdev, TTM_PL_VRAM, NULL);
164 kfree(rman);
167 static void vmw_thp_debug(struct ttm_resource_manager *man,
168 struct drm_printer *printer)
170 struct vmw_thp_manager *rman = to_thp_manager(man);
172 spin_lock(&rman->lock);
173 drm_mm_print(&rman->mm, printer);
174 spin_unlock(&rman->lock);
177 static const struct ttm_resource_manager_func vmw_thp_func = {
178 .alloc = vmw_thp_get_node,
179 .free = vmw_thp_put_node,
180 .debug = vmw_thp_debug