1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
32 #define pr_fmt(fmt) "[TTM] " fmt
34 #include <linux/sched.h>
35 #include <linux/pagemap.h>
36 #include <linux/shmem_fs.h>
37 #include <linux/file.h>
38 #include <drm/drm_cache.h>
39 #include <drm/ttm/ttm_bo_driver.h>
40 #include <drm/ttm/ttm_page_alloc.h>
41 #include <drm/ttm/ttm_set_memory.h>
44 * Allocates a ttm structure for the given BO.
46 int ttm_tt_create(struct ttm_buffer_object
*bo
, bool zero_alloc
)
48 struct ttm_bo_device
*bdev
= bo
->bdev
;
49 uint32_t page_flags
= 0;
51 reservation_object_assert_held(bo
->resv
);
54 page_flags
|= TTM_PAGE_FLAG_DMA32
;
57 page_flags
|= TTM_PAGE_FLAG_NO_RETRY
;
60 case ttm_bo_type_device
:
62 page_flags
|= TTM_PAGE_FLAG_ZERO_ALLOC
;
64 case ttm_bo_type_kernel
:
67 page_flags
|= TTM_PAGE_FLAG_SG
;
71 pr_err("Illegal buffer object type\n");
75 bo
->ttm
= bdev
->driver
->ttm_tt_create(bo
, page_flags
);
76 if (unlikely(bo
->ttm
== NULL
))
83 * Allocates storage for pointers to the pages that back the ttm.
85 static int ttm_tt_alloc_page_directory(struct ttm_tt
*ttm
)
87 ttm
->pages
= kvmalloc_array(ttm
->num_pages
, sizeof(void*),
88 GFP_KERNEL
| __GFP_ZERO
);
94 static int ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt
*ttm
)
96 ttm
->ttm
.pages
= kvmalloc_array(ttm
->ttm
.num_pages
,
97 sizeof(*ttm
->ttm
.pages
) +
98 sizeof(*ttm
->dma_address
),
99 GFP_KERNEL
| __GFP_ZERO
);
102 ttm
->dma_address
= (void *) (ttm
->ttm
.pages
+ ttm
->ttm
.num_pages
);
106 static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt
*ttm
)
108 ttm
->dma_address
= kvmalloc_array(ttm
->ttm
.num_pages
,
109 sizeof(*ttm
->dma_address
),
110 GFP_KERNEL
| __GFP_ZERO
);
111 if (!ttm
->dma_address
)
116 static int ttm_tt_set_page_caching(struct page
*p
,
117 enum ttm_caching_state c_old
,
118 enum ttm_caching_state c_new
)
125 if (c_old
!= tt_cached
) {
126 /* p isn't in the default caching state, set it to
127 * writeback first to free its current memtype. */
129 ret
= ttm_set_pages_wb(p
, 1);
135 ret
= ttm_set_pages_wc(p
, 1);
136 else if (c_new
== tt_uncached
)
137 ret
= ttm_set_pages_uc(p
, 1);
143 * Change caching policy for the linear kernel map
144 * for range of pages in a ttm.
147 static int ttm_tt_set_caching(struct ttm_tt
*ttm
,
148 enum ttm_caching_state c_state
)
151 struct page
*cur_page
;
154 if (ttm
->caching_state
== c_state
)
157 if (ttm
->state
== tt_unpopulated
) {
158 /* Change caching but don't populate */
159 ttm
->caching_state
= c_state
;
163 if (ttm
->caching_state
== tt_cached
)
164 drm_clflush_pages(ttm
->pages
, ttm
->num_pages
);
166 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
167 cur_page
= ttm
->pages
[i
];
168 if (likely(cur_page
!= NULL
)) {
169 ret
= ttm_tt_set_page_caching(cur_page
,
172 if (unlikely(ret
!= 0))
177 ttm
->caching_state
= c_state
;
182 for (j
= 0; j
< i
; ++j
) {
183 cur_page
= ttm
->pages
[j
];
184 if (likely(cur_page
!= NULL
)) {
185 (void)ttm_tt_set_page_caching(cur_page
, c_state
,
193 int ttm_tt_set_placement_caching(struct ttm_tt
*ttm
, uint32_t placement
)
195 enum ttm_caching_state state
;
197 if (placement
& TTM_PL_FLAG_WC
)
199 else if (placement
& TTM_PL_FLAG_UNCACHED
)
204 return ttm_tt_set_caching(ttm
, state
);
206 EXPORT_SYMBOL(ttm_tt_set_placement_caching
);
208 void ttm_tt_destroy(struct ttm_tt
*ttm
)
215 if (ttm
->state
== tt_unbound
)
216 ttm_tt_unpopulate(ttm
);
218 if (!(ttm
->page_flags
& TTM_PAGE_FLAG_PERSISTENT_SWAP
) &&
220 fput(ttm
->swap_storage
);
222 ttm
->swap_storage
= NULL
;
223 ttm
->func
->destroy(ttm
);
226 void ttm_tt_init_fields(struct ttm_tt
*ttm
, struct ttm_buffer_object
*bo
,
229 ttm
->bdev
= bo
->bdev
;
230 ttm
->num_pages
= bo
->num_pages
;
231 ttm
->caching_state
= tt_cached
;
232 ttm
->page_flags
= page_flags
;
233 ttm
->state
= tt_unpopulated
;
234 ttm
->swap_storage
= NULL
;
238 int ttm_tt_init(struct ttm_tt
*ttm
, struct ttm_buffer_object
*bo
,
241 ttm_tt_init_fields(ttm
, bo
, page_flags
);
243 if (ttm_tt_alloc_page_directory(ttm
)) {
245 pr_err("Failed allocating page table\n");
250 EXPORT_SYMBOL(ttm_tt_init
);
252 void ttm_tt_fini(struct ttm_tt
*ttm
)
257 EXPORT_SYMBOL(ttm_tt_fini
);
259 int ttm_dma_tt_init(struct ttm_dma_tt
*ttm_dma
, struct ttm_buffer_object
*bo
,
262 struct ttm_tt
*ttm
= &ttm_dma
->ttm
;
264 ttm_tt_init_fields(ttm
, bo
, page_flags
);
266 INIT_LIST_HEAD(&ttm_dma
->pages_list
);
267 if (ttm_dma_tt_alloc_page_directory(ttm_dma
)) {
269 pr_err("Failed allocating page table\n");
274 EXPORT_SYMBOL(ttm_dma_tt_init
);
276 int ttm_sg_tt_init(struct ttm_dma_tt
*ttm_dma
, struct ttm_buffer_object
*bo
,
279 struct ttm_tt
*ttm
= &ttm_dma
->ttm
;
282 ttm_tt_init_fields(ttm
, bo
, page_flags
);
284 INIT_LIST_HEAD(&ttm_dma
->pages_list
);
285 if (page_flags
& TTM_PAGE_FLAG_SG
)
286 ret
= ttm_sg_tt_alloc_page_directory(ttm_dma
);
288 ret
= ttm_dma_tt_alloc_page_directory(ttm_dma
);
291 pr_err("Failed allocating page table\n");
296 EXPORT_SYMBOL(ttm_sg_tt_init
);
298 void ttm_dma_tt_fini(struct ttm_dma_tt
*ttm_dma
)
300 struct ttm_tt
*ttm
= &ttm_dma
->ttm
;
305 kvfree(ttm_dma
->dma_address
);
307 ttm_dma
->dma_address
= NULL
;
309 EXPORT_SYMBOL(ttm_dma_tt_fini
);
311 void ttm_tt_unbind(struct ttm_tt
*ttm
)
315 if (ttm
->state
== tt_bound
) {
316 ret
= ttm
->func
->unbind(ttm
);
318 ttm
->state
= tt_unbound
;
322 int ttm_tt_bind(struct ttm_tt
*ttm
, struct ttm_mem_reg
*bo_mem
,
323 struct ttm_operation_ctx
*ctx
)
330 if (ttm
->state
== tt_bound
)
333 ret
= ttm_tt_populate(ttm
, ctx
);
337 ret
= ttm
->func
->bind(ttm
, bo_mem
);
338 if (unlikely(ret
!= 0))
341 ttm
->state
= tt_bound
;
345 EXPORT_SYMBOL(ttm_tt_bind
);
347 int ttm_tt_swapin(struct ttm_tt
*ttm
)
349 struct address_space
*swap_space
;
350 struct file
*swap_storage
;
351 struct page
*from_page
;
352 struct page
*to_page
;
356 swap_storage
= ttm
->swap_storage
;
357 BUG_ON(swap_storage
== NULL
);
359 swap_space
= swap_storage
->f_mapping
;
361 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
362 gfp_t gfp_mask
= mapping_gfp_mask(swap_space
);
364 gfp_mask
|= (ttm
->page_flags
& TTM_PAGE_FLAG_NO_RETRY
? __GFP_RETRY_MAYFAIL
: 0);
365 from_page
= shmem_read_mapping_page_gfp(swap_space
, i
, gfp_mask
);
367 if (IS_ERR(from_page
)) {
368 ret
= PTR_ERR(from_page
);
371 to_page
= ttm
->pages
[i
];
372 if (unlikely(to_page
== NULL
))
375 copy_highpage(to_page
, from_page
);
379 if (!(ttm
->page_flags
& TTM_PAGE_FLAG_PERSISTENT_SWAP
))
381 ttm
->swap_storage
= NULL
;
382 ttm
->page_flags
&= ~TTM_PAGE_FLAG_SWAPPED
;
389 int ttm_tt_swapout(struct ttm_tt
*ttm
, struct file
*persistent_swap_storage
)
391 struct address_space
*swap_space
;
392 struct file
*swap_storage
;
393 struct page
*from_page
;
394 struct page
*to_page
;
398 BUG_ON(ttm
->state
!= tt_unbound
&& ttm
->state
!= tt_unpopulated
);
399 BUG_ON(ttm
->caching_state
!= tt_cached
);
401 if (!persistent_swap_storage
) {
402 swap_storage
= shmem_file_setup("ttm swap",
403 ttm
->num_pages
<< PAGE_SHIFT
,
405 if (IS_ERR(swap_storage
)) {
406 pr_err("Failed allocating swap storage\n");
407 return PTR_ERR(swap_storage
);
410 swap_storage
= persistent_swap_storage
;
413 swap_space
= swap_storage
->f_mapping
;
415 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
416 gfp_t gfp_mask
= mapping_gfp_mask(swap_space
);
418 gfp_mask
|= (ttm
->page_flags
& TTM_PAGE_FLAG_NO_RETRY
? __GFP_RETRY_MAYFAIL
: 0);
420 from_page
= ttm
->pages
[i
];
421 if (unlikely(from_page
== NULL
))
424 to_page
= shmem_read_mapping_page_gfp(swap_space
, i
, gfp_mask
);
425 if (IS_ERR(to_page
)) {
426 ret
= PTR_ERR(to_page
);
429 copy_highpage(to_page
, from_page
);
430 set_page_dirty(to_page
);
431 mark_page_accessed(to_page
);
435 ttm_tt_unpopulate(ttm
);
436 ttm
->swap_storage
= swap_storage
;
437 ttm
->page_flags
|= TTM_PAGE_FLAG_SWAPPED
;
438 if (persistent_swap_storage
)
439 ttm
->page_flags
|= TTM_PAGE_FLAG_PERSISTENT_SWAP
;
443 if (!persistent_swap_storage
)
449 static void ttm_tt_add_mapping(struct ttm_tt
*ttm
)
453 if (ttm
->page_flags
& TTM_PAGE_FLAG_SG
)
456 for (i
= 0; i
< ttm
->num_pages
; ++i
)
457 ttm
->pages
[i
]->mapping
= ttm
->bdev
->dev_mapping
;
460 int ttm_tt_populate(struct ttm_tt
*ttm
, struct ttm_operation_ctx
*ctx
)
464 if (ttm
->state
!= tt_unpopulated
)
467 if (ttm
->bdev
->driver
->ttm_tt_populate
)
468 ret
= ttm
->bdev
->driver
->ttm_tt_populate(ttm
, ctx
);
470 ret
= ttm_pool_populate(ttm
, ctx
);
472 ttm_tt_add_mapping(ttm
);
476 static void ttm_tt_clear_mapping(struct ttm_tt
*ttm
)
479 struct page
**page
= ttm
->pages
;
481 if (ttm
->page_flags
& TTM_PAGE_FLAG_SG
)
484 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
485 (*page
)->mapping
= NULL
;
486 (*page
++)->index
= 0;
490 void ttm_tt_unpopulate(struct ttm_tt
*ttm
)
492 if (ttm
->state
== tt_unpopulated
)
495 ttm_tt_clear_mapping(ttm
);
496 if (ttm
->bdev
->driver
->ttm_tt_unpopulate
)
497 ttm
->bdev
->driver
->ttm_tt_unpopulate(ttm
);
499 ttm_pool_unpopulate(ttm
);