1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 * Huge page-table-entry support for IO memory.
5 * Copyright (C) 2007-2019 Vmware, Inc. All rights reservedd.
7 #include "vmwgfx_drv.h"
8 #include <drm/ttm/ttm_module.h>
9 #include <drm/ttm/ttm_bo_driver.h>
10 #include <drm/ttm/ttm_placement.h>
13 * struct vmw_thp_manager - Range manager implementing huge page alignment
15 * @mm: The underlying range manager. Protected by @lock.
16 * @lock: Manager lock.
18 struct vmw_thp_manager
{
19 struct ttm_resource_manager manager
;
24 static struct vmw_thp_manager
*to_thp_manager(struct ttm_resource_manager
*man
)
26 return container_of(man
, struct vmw_thp_manager
, manager
);
29 static const struct ttm_resource_manager_func vmw_thp_func
;
31 static int vmw_thp_insert_aligned(struct drm_mm
*mm
, struct drm_mm_node
*node
,
32 unsigned long align_pages
,
33 const struct ttm_place
*place
,
34 struct ttm_resource
*mem
,
36 enum drm_mm_insert_mode mode
)
38 if (align_pages
>= mem
->page_alignment
&&
39 (!mem
->page_alignment
|| align_pages
% mem
->page_alignment
== 0)) {
40 return drm_mm_insert_node_in_range(mm
, node
,
43 place
->fpfn
, lpfn
, mode
);
49 static int vmw_thp_get_node(struct ttm_resource_manager
*man
,
50 struct ttm_buffer_object
*bo
,
51 const struct ttm_place
*place
,
52 struct ttm_resource
*mem
)
54 struct vmw_thp_manager
*rman
= to_thp_manager(man
);
55 struct drm_mm
*mm
= &rman
->mm
;
56 struct drm_mm_node
*node
;
57 unsigned long align_pages
;
59 enum drm_mm_insert_mode mode
= DRM_MM_INSERT_BEST
;
62 node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
70 mode
= DRM_MM_INSERT_BEST
;
71 if (place
->flags
& TTM_PL_FLAG_TOPDOWN
)
72 mode
= DRM_MM_INSERT_HIGH
;
74 spin_lock(&rman
->lock
);
75 if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
)) {
76 align_pages
= (HPAGE_PUD_SIZE
>> PAGE_SHIFT
);
77 if (mem
->num_pages
>= align_pages
) {
78 ret
= vmw_thp_insert_aligned(mm
, node
, align_pages
,
79 place
, mem
, lpfn
, mode
);
85 align_pages
= (HPAGE_PMD_SIZE
>> PAGE_SHIFT
);
86 if (mem
->num_pages
>= align_pages
) {
87 ret
= vmw_thp_insert_aligned(mm
, node
, align_pages
, place
, mem
,
93 ret
= drm_mm_insert_node_in_range(mm
, node
, mem
->num_pages
,
94 mem
->page_alignment
, 0,
95 place
->fpfn
, lpfn
, mode
);
97 spin_unlock(&rman
->lock
);
103 mem
->start
= node
->start
;
111 static void vmw_thp_put_node(struct ttm_resource_manager
*man
,
112 struct ttm_resource
*mem
)
114 struct vmw_thp_manager
*rman
= to_thp_manager(man
);
117 spin_lock(&rman
->lock
);
118 drm_mm_remove_node(mem
->mm_node
);
119 spin_unlock(&rman
->lock
);
126 int vmw_thp_init(struct vmw_private
*dev_priv
)
128 struct vmw_thp_manager
*rman
;
130 rman
= kzalloc(sizeof(*rman
), GFP_KERNEL
);
134 ttm_resource_manager_init(&rman
->manager
,
135 dev_priv
->vram_size
>> PAGE_SHIFT
);
137 rman
->manager
.func
= &vmw_thp_func
;
138 drm_mm_init(&rman
->mm
, 0, rman
->manager
.size
);
139 spin_lock_init(&rman
->lock
);
141 ttm_set_driver_manager(&dev_priv
->bdev
, TTM_PL_VRAM
, &rman
->manager
);
142 ttm_resource_manager_set_used(&rman
->manager
, true);
146 void vmw_thp_fini(struct vmw_private
*dev_priv
)
148 struct ttm_resource_manager
*man
= ttm_manager_type(&dev_priv
->bdev
, TTM_PL_VRAM
);
149 struct vmw_thp_manager
*rman
= to_thp_manager(man
);
150 struct drm_mm
*mm
= &rman
->mm
;
153 ttm_resource_manager_set_used(man
, false);
155 ret
= ttm_resource_manager_evict_all(&dev_priv
->bdev
, man
);
158 spin_lock(&rman
->lock
);
161 spin_unlock(&rman
->lock
);
162 ttm_resource_manager_cleanup(man
);
163 ttm_set_driver_manager(&dev_priv
->bdev
, TTM_PL_VRAM
, NULL
);
167 static void vmw_thp_debug(struct ttm_resource_manager
*man
,
168 struct drm_printer
*printer
)
170 struct vmw_thp_manager
*rman
= to_thp_manager(man
);
172 spin_lock(&rman
->lock
);
173 drm_mm_print(&rman
->mm
, printer
);
174 spin_unlock(&rman
->lock
);
177 static const struct ttm_resource_manager_func vmw_thp_func
= {
178 .alloc
= vmw_thp_get_node
,
179 .free
= vmw_thp_put_node
,
180 .debug
= vmw_thp_debug