1 /**************************************************************************
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 #include <linux/vmalloc.h>
32 #include <linux/sched.h>
33 #include <linux/highmem.h>
34 #include <linux/pagemap.h>
35 #include <linux/file.h>
36 #include <linux/swap.h>
37 #include "drm_cache.h"
38 #include "ttm/ttm_module.h"
39 #include "ttm/ttm_bo_driver.h"
40 #include "ttm/ttm_placement.h"
42 static int ttm_tt_swapin(struct ttm_tt
*ttm
);
45 * Allocates storage for pointers to the pages that back the ttm.
47 * Uses kmalloc if possible. Otherwise falls back to vmalloc.
49 static void ttm_tt_alloc_page_directory(struct ttm_tt
*ttm
)
51 unsigned long size
= ttm
->num_pages
* sizeof(*ttm
->pages
);
54 if (size
<= PAGE_SIZE
)
55 ttm
->pages
= kzalloc(size
, GFP_KERNEL
);
58 ttm
->pages
= vmalloc_user(size
);
60 ttm
->page_flags
|= TTM_PAGE_FLAG_VMALLOC
;
64 static void ttm_tt_free_page_directory(struct ttm_tt
*ttm
)
66 if (ttm
->page_flags
& TTM_PAGE_FLAG_VMALLOC
) {
68 ttm
->page_flags
&= ~TTM_PAGE_FLAG_VMALLOC
;
75 static struct page
*ttm_tt_alloc_page(unsigned page_flags
)
77 gfp_t gfp_flags
= GFP_USER
;
79 if (page_flags
& TTM_PAGE_FLAG_ZERO_ALLOC
)
80 gfp_flags
|= __GFP_ZERO
;
82 if (page_flags
& TTM_PAGE_FLAG_DMA32
)
83 gfp_flags
|= __GFP_DMA32
;
85 gfp_flags
|= __GFP_HIGHMEM
;
87 return alloc_page(gfp_flags
);
90 static void ttm_tt_free_user_pages(struct ttm_tt
*ttm
)
96 struct ttm_backend
*be
= ttm
->be
;
98 BUG_ON(!(ttm
->page_flags
& TTM_PAGE_FLAG_USER
));
99 write
= ((ttm
->page_flags
& TTM_PAGE_FLAG_WRITE
) != 0);
100 dirty
= ((ttm
->page_flags
& TTM_PAGE_FLAG_USER_DIRTY
) != 0);
105 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
106 page
= ttm
->pages
[i
];
110 if (page
== ttm
->dummy_read_page
) {
115 if (write
&& dirty
&& !PageReserved(page
))
116 set_page_dirty_lock(page
);
118 ttm
->pages
[i
] = NULL
;
119 ttm_mem_global_free(ttm
->glob
->mem_glob
, PAGE_SIZE
);
122 ttm
->state
= tt_unpopulated
;
123 ttm
->first_himem_page
= ttm
->num_pages
;
124 ttm
->last_lomem_page
= -1;
127 static struct page
*__ttm_tt_get_page(struct ttm_tt
*ttm
, int index
)
130 struct ttm_mem_global
*mem_glob
= ttm
->glob
->mem_glob
;
133 while (NULL
== (p
= ttm
->pages
[index
])) {
134 p
= ttm_tt_alloc_page(ttm
->page_flags
);
139 ret
= ttm_mem_global_alloc_page(mem_glob
, p
, false, false);
140 if (unlikely(ret
!= 0))
144 ttm
->pages
[--ttm
->first_himem_page
] = p
;
146 ttm
->pages
[++ttm
->last_lomem_page
] = p
;
154 struct page
*ttm_tt_get_page(struct ttm_tt
*ttm
, int index
)
158 if (unlikely(ttm
->page_flags
& TTM_PAGE_FLAG_SWAPPED
)) {
159 ret
= ttm_tt_swapin(ttm
);
160 if (unlikely(ret
!= 0))
163 return __ttm_tt_get_page(ttm
, index
);
166 int ttm_tt_populate(struct ttm_tt
*ttm
)
170 struct ttm_backend
*be
;
173 if (ttm
->state
!= tt_unpopulated
)
176 if (unlikely(ttm
->page_flags
& TTM_PAGE_FLAG_SWAPPED
)) {
177 ret
= ttm_tt_swapin(ttm
);
178 if (unlikely(ret
!= 0))
184 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
185 page
= __ttm_tt_get_page(ttm
, i
);
190 be
->func
->populate(be
, ttm
->num_pages
, ttm
->pages
,
191 ttm
->dummy_read_page
);
192 ttm
->state
= tt_unbound
;
195 EXPORT_SYMBOL(ttm_tt_populate
);
198 static inline int ttm_tt_set_page_caching(struct page
*p
,
199 enum ttm_caching_state c_old
,
200 enum ttm_caching_state c_new
)
207 if (c_old
!= tt_cached
) {
208 /* p isn't in the default caching state, set it to
209 * writeback first to free its current memtype. */
211 ret
= set_pages_wb(p
, 1);
217 ret
= set_memory_wc((unsigned long) page_address(p
), 1);
218 else if (c_new
== tt_uncached
)
219 ret
= set_pages_uc(p
, 1);
223 #else /* CONFIG_X86 */
224 static inline int ttm_tt_set_page_caching(struct page
*p
,
225 enum ttm_caching_state c_old
,
226 enum ttm_caching_state c_new
)
230 #endif /* CONFIG_X86 */
233 * Change caching policy for the linear kernel map
234 * for range of pages in a ttm.
237 static int ttm_tt_set_caching(struct ttm_tt
*ttm
,
238 enum ttm_caching_state c_state
)
241 struct page
*cur_page
;
244 if (ttm
->caching_state
== c_state
)
247 if (c_state
!= tt_cached
) {
248 ret
= ttm_tt_populate(ttm
);
249 if (unlikely(ret
!= 0))
253 if (ttm
->caching_state
== tt_cached
)
254 drm_clflush_pages(ttm
->pages
, ttm
->num_pages
);
256 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
257 cur_page
= ttm
->pages
[i
];
258 if (likely(cur_page
!= NULL
)) {
259 ret
= ttm_tt_set_page_caching(cur_page
,
262 if (unlikely(ret
!= 0))
267 ttm
->caching_state
= c_state
;
272 for (j
= 0; j
< i
; ++j
) {
273 cur_page
= ttm
->pages
[j
];
274 if (likely(cur_page
!= NULL
)) {
275 (void)ttm_tt_set_page_caching(cur_page
, c_state
,
283 int ttm_tt_set_placement_caching(struct ttm_tt
*ttm
, uint32_t placement
)
285 enum ttm_caching_state state
;
287 if (placement
& TTM_PL_FLAG_WC
)
289 else if (placement
& TTM_PL_FLAG_UNCACHED
)
294 return ttm_tt_set_caching(ttm
, state
);
296 EXPORT_SYMBOL(ttm_tt_set_placement_caching
);
298 static void ttm_tt_free_alloced_pages(struct ttm_tt
*ttm
)
301 struct page
*cur_page
;
302 struct ttm_backend
*be
= ttm
->be
;
306 (void)ttm_tt_set_caching(ttm
, tt_cached
);
307 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
308 cur_page
= ttm
->pages
[i
];
309 ttm
->pages
[i
] = NULL
;
311 if (page_count(cur_page
) != 1)
312 printk(KERN_ERR TTM_PFX
313 "Erroneous page count. "
315 ttm_mem_global_free_page(ttm
->glob
->mem_glob
,
317 __free_page(cur_page
);
320 ttm
->state
= tt_unpopulated
;
321 ttm
->first_himem_page
= ttm
->num_pages
;
322 ttm
->last_lomem_page
= -1;
325 void ttm_tt_destroy(struct ttm_tt
*ttm
)
327 struct ttm_backend
*be
;
329 if (unlikely(ttm
== NULL
))
333 if (likely(be
!= NULL
)) {
334 be
->func
->destroy(be
);
338 if (likely(ttm
->pages
!= NULL
)) {
339 if (ttm
->page_flags
& TTM_PAGE_FLAG_USER
)
340 ttm_tt_free_user_pages(ttm
);
342 ttm_tt_free_alloced_pages(ttm
);
344 ttm_tt_free_page_directory(ttm
);
347 if (!(ttm
->page_flags
& TTM_PAGE_FLAG_PERSISTANT_SWAP
) &&
349 fput(ttm
->swap_storage
);
354 int ttm_tt_set_user(struct ttm_tt
*ttm
,
355 struct task_struct
*tsk
,
356 unsigned long start
, unsigned long num_pages
)
358 struct mm_struct
*mm
= tsk
->mm
;
360 int write
= (ttm
->page_flags
& TTM_PAGE_FLAG_WRITE
) != 0;
361 struct ttm_mem_global
*mem_glob
= ttm
->glob
->mem_glob
;
363 BUG_ON(num_pages
!= ttm
->num_pages
);
364 BUG_ON((ttm
->page_flags
& TTM_PAGE_FLAG_USER
) == 0);
367 * Account user pages as lowmem pages for now.
370 ret
= ttm_mem_global_alloc(mem_glob
, num_pages
* PAGE_SIZE
,
372 if (unlikely(ret
!= 0))
375 down_read(&mm
->mmap_sem
);
376 ret
= get_user_pages(tsk
, mm
, start
, num_pages
,
377 write
, 0, ttm
->pages
, NULL
);
378 up_read(&mm
->mmap_sem
);
380 if (ret
!= num_pages
&& write
) {
381 ttm_tt_free_user_pages(ttm
);
382 ttm_mem_global_free(mem_glob
, num_pages
* PAGE_SIZE
);
388 ttm
->state
= tt_unbound
;
393 struct ttm_tt
*ttm_tt_create(struct ttm_bo_device
*bdev
, unsigned long size
,
394 uint32_t page_flags
, struct page
*dummy_read_page
)
396 struct ttm_bo_driver
*bo_driver
= bdev
->driver
;
402 ttm
= kzalloc(sizeof(*ttm
), GFP_KERNEL
);
406 ttm
->glob
= bdev
->glob
;
407 ttm
->num_pages
= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
408 ttm
->first_himem_page
= ttm
->num_pages
;
409 ttm
->last_lomem_page
= -1;
410 ttm
->caching_state
= tt_cached
;
411 ttm
->page_flags
= page_flags
;
413 ttm
->dummy_read_page
= dummy_read_page
;
415 ttm_tt_alloc_page_directory(ttm
);
418 printk(KERN_ERR TTM_PFX
"Failed allocating page table\n");
421 ttm
->be
= bo_driver
->create_ttm_backend_entry(bdev
);
424 printk(KERN_ERR TTM_PFX
"Failed creating ttm backend entry\n");
427 ttm
->state
= tt_unpopulated
;
431 void ttm_tt_unbind(struct ttm_tt
*ttm
)
434 struct ttm_backend
*be
= ttm
->be
;
436 if (ttm
->state
== tt_bound
) {
437 ret
= be
->func
->unbind(be
);
439 ttm
->state
= tt_unbound
;
443 int ttm_tt_bind(struct ttm_tt
*ttm
, struct ttm_mem_reg
*bo_mem
)
446 struct ttm_backend
*be
;
451 if (ttm
->state
== tt_bound
)
456 ret
= ttm_tt_populate(ttm
);
460 ret
= be
->func
->bind(be
, bo_mem
);
462 printk(KERN_ERR TTM_PFX
"Couldn't bind backend.\n");
466 ttm
->state
= tt_bound
;
468 if (ttm
->page_flags
& TTM_PAGE_FLAG_USER
)
469 ttm
->page_flags
|= TTM_PAGE_FLAG_USER_DIRTY
;
472 EXPORT_SYMBOL(ttm_tt_bind
);
474 static int ttm_tt_swapin(struct ttm_tt
*ttm
)
476 struct address_space
*swap_space
;
477 struct file
*swap_storage
;
478 struct page
*from_page
;
479 struct page
*to_page
;
485 if (ttm
->page_flags
& TTM_PAGE_FLAG_USER
) {
486 ret
= ttm_tt_set_user(ttm
, ttm
->tsk
, ttm
->start
,
488 if (unlikely(ret
!= 0))
491 ttm
->page_flags
&= ~TTM_PAGE_FLAG_SWAPPED
;
495 swap_storage
= ttm
->swap_storage
;
496 BUG_ON(swap_storage
== NULL
);
498 swap_space
= swap_storage
->f_path
.dentry
->d_inode
->i_mapping
;
500 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
501 from_page
= read_mapping_page(swap_space
, i
, NULL
);
502 if (IS_ERR(from_page
)) {
503 ret
= PTR_ERR(from_page
);
506 to_page
= __ttm_tt_get_page(ttm
, i
);
507 if (unlikely(to_page
== NULL
))
511 from_virtual
= kmap_atomic(from_page
, KM_USER0
);
512 to_virtual
= kmap_atomic(to_page
, KM_USER1
);
513 memcpy(to_virtual
, from_virtual
, PAGE_SIZE
);
514 kunmap_atomic(to_virtual
, KM_USER1
);
515 kunmap_atomic(from_virtual
, KM_USER0
);
517 page_cache_release(from_page
);
520 if (!(ttm
->page_flags
& TTM_PAGE_FLAG_PERSISTANT_SWAP
))
522 ttm
->swap_storage
= NULL
;
523 ttm
->page_flags
&= ~TTM_PAGE_FLAG_SWAPPED
;
527 ttm_tt_free_alloced_pages(ttm
);
531 int ttm_tt_swapout(struct ttm_tt
*ttm
, struct file
*persistant_swap_storage
)
533 struct address_space
*swap_space
;
534 struct file
*swap_storage
;
535 struct page
*from_page
;
536 struct page
*to_page
;
542 BUG_ON(ttm
->state
!= tt_unbound
&& ttm
->state
!= tt_unpopulated
);
543 BUG_ON(ttm
->caching_state
!= tt_cached
);
546 * For user buffers, just unpin the pages, as there should be
550 if (ttm
->page_flags
& TTM_PAGE_FLAG_USER
) {
551 ttm_tt_free_user_pages(ttm
);
552 ttm
->page_flags
|= TTM_PAGE_FLAG_SWAPPED
;
553 ttm
->swap_storage
= NULL
;
557 if (!persistant_swap_storage
) {
558 swap_storage
= shmem_file_setup("ttm swap",
559 ttm
->num_pages
<< PAGE_SHIFT
,
561 if (unlikely(IS_ERR(swap_storage
))) {
562 printk(KERN_ERR
"Failed allocating swap storage.\n");
563 return PTR_ERR(swap_storage
);
566 swap_storage
= persistant_swap_storage
;
568 swap_space
= swap_storage
->f_path
.dentry
->d_inode
->i_mapping
;
570 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
571 from_page
= ttm
->pages
[i
];
572 if (unlikely(from_page
== NULL
))
574 to_page
= read_mapping_page(swap_space
, i
, NULL
);
575 if (unlikely(IS_ERR(to_page
))) {
576 ret
= PTR_ERR(to_page
);
580 from_virtual
= kmap_atomic(from_page
, KM_USER0
);
581 to_virtual
= kmap_atomic(to_page
, KM_USER1
);
582 memcpy(to_virtual
, from_virtual
, PAGE_SIZE
);
583 kunmap_atomic(to_virtual
, KM_USER1
);
584 kunmap_atomic(from_virtual
, KM_USER0
);
586 set_page_dirty(to_page
);
587 mark_page_accessed(to_page
);
588 page_cache_release(to_page
);
591 ttm_tt_free_alloced_pages(ttm
);
592 ttm
->swap_storage
= swap_storage
;
593 ttm
->page_flags
|= TTM_PAGE_FLAG_SWAPPED
;
594 if (persistant_swap_storage
)
595 ttm
->page_flags
|= TTM_PAGE_FLAG_PERSISTANT_SWAP
;
599 if (!persistant_swap_storage
)