i2c-eg20t: change timeout value 50msec to 1000msec
[zen-stable.git] / drivers / gpu / drm / ttm / ttm_tt.c
blob2f75d203a2bf259e8b17d71b03f79fe948fc7bc9
1 /**************************************************************************
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 #include <linux/sched.h>
32 #include <linux/highmem.h>
33 #include <linux/pagemap.h>
34 #include <linux/shmem_fs.h>
35 #include <linux/file.h>
36 #include <linux/swap.h>
37 #include <linux/slab.h>
38 #include <linux/export.h>
39 #include "drm_cache.h"
40 #include "drm_mem_util.h"
41 #include "ttm/ttm_module.h"
42 #include "ttm/ttm_bo_driver.h"
43 #include "ttm/ttm_placement.h"
44 #include "ttm/ttm_page_alloc.h"
46 /**
47 * Allocates storage for pointers to the pages that back the ttm.
49 static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
51 ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*));
54 static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
56 ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, sizeof(void*));
57 ttm->dma_address = drm_calloc_large(ttm->ttm.num_pages,
58 sizeof(*ttm->dma_address));
61 #ifdef CONFIG_X86
62 static inline int ttm_tt_set_page_caching(struct page *p,
63 enum ttm_caching_state c_old,
64 enum ttm_caching_state c_new)
66 int ret = 0;
68 if (PageHighMem(p))
69 return 0;
71 if (c_old != tt_cached) {
72 /* p isn't in the default caching state, set it to
73 * writeback first to free its current memtype. */
75 ret = set_pages_wb(p, 1);
76 if (ret)
77 return ret;
80 if (c_new == tt_wc)
81 ret = set_memory_wc((unsigned long) page_address(p), 1);
82 else if (c_new == tt_uncached)
83 ret = set_pages_uc(p, 1);
85 return ret;
87 #else /* CONFIG_X86 */
88 static inline int ttm_tt_set_page_caching(struct page *p,
89 enum ttm_caching_state c_old,
90 enum ttm_caching_state c_new)
92 return 0;
94 #endif /* CONFIG_X86 */
97 * Change caching policy for the linear kernel map
98 * for range of pages in a ttm.
101 static int ttm_tt_set_caching(struct ttm_tt *ttm,
102 enum ttm_caching_state c_state)
104 int i, j;
105 struct page *cur_page;
106 int ret;
108 if (ttm->caching_state == c_state)
109 return 0;
111 if (ttm->state == tt_unpopulated) {
112 /* Change caching but don't populate */
113 ttm->caching_state = c_state;
114 return 0;
117 if (ttm->caching_state == tt_cached)
118 drm_clflush_pages(ttm->pages, ttm->num_pages);
120 for (i = 0; i < ttm->num_pages; ++i) {
121 cur_page = ttm->pages[i];
122 if (likely(cur_page != NULL)) {
123 ret = ttm_tt_set_page_caching(cur_page,
124 ttm->caching_state,
125 c_state);
126 if (unlikely(ret != 0))
127 goto out_err;
131 ttm->caching_state = c_state;
133 return 0;
135 out_err:
136 for (j = 0; j < i; ++j) {
137 cur_page = ttm->pages[j];
138 if (likely(cur_page != NULL)) {
139 (void)ttm_tt_set_page_caching(cur_page, c_state,
140 ttm->caching_state);
144 return ret;
147 int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
149 enum ttm_caching_state state;
151 if (placement & TTM_PL_FLAG_WC)
152 state = tt_wc;
153 else if (placement & TTM_PL_FLAG_UNCACHED)
154 state = tt_uncached;
155 else
156 state = tt_cached;
158 return ttm_tt_set_caching(ttm, state);
160 EXPORT_SYMBOL(ttm_tt_set_placement_caching);
162 void ttm_tt_destroy(struct ttm_tt *ttm)
164 if (unlikely(ttm == NULL))
165 return;
167 if (ttm->state == tt_bound) {
168 ttm_tt_unbind(ttm);
171 if (likely(ttm->pages != NULL)) {
172 ttm->bdev->driver->ttm_tt_unpopulate(ttm);
175 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
176 ttm->swap_storage)
177 fput(ttm->swap_storage);
179 ttm->swap_storage = NULL;
180 ttm->func->destroy(ttm);
183 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
184 unsigned long size, uint32_t page_flags,
185 struct page *dummy_read_page)
187 ttm->bdev = bdev;
188 ttm->glob = bdev->glob;
189 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
190 ttm->caching_state = tt_cached;
191 ttm->page_flags = page_flags;
192 ttm->dummy_read_page = dummy_read_page;
193 ttm->state = tt_unpopulated;
194 ttm->swap_storage = NULL;
196 ttm_tt_alloc_page_directory(ttm);
197 if (!ttm->pages) {
198 ttm_tt_destroy(ttm);
199 printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
200 return -ENOMEM;
202 return 0;
204 EXPORT_SYMBOL(ttm_tt_init);
206 void ttm_tt_fini(struct ttm_tt *ttm)
208 drm_free_large(ttm->pages);
209 ttm->pages = NULL;
211 EXPORT_SYMBOL(ttm_tt_fini);
213 int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
214 unsigned long size, uint32_t page_flags,
215 struct page *dummy_read_page)
217 struct ttm_tt *ttm = &ttm_dma->ttm;
219 ttm->bdev = bdev;
220 ttm->glob = bdev->glob;
221 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
222 ttm->caching_state = tt_cached;
223 ttm->page_flags = page_flags;
224 ttm->dummy_read_page = dummy_read_page;
225 ttm->state = tt_unpopulated;
226 ttm->swap_storage = NULL;
228 INIT_LIST_HEAD(&ttm_dma->pages_list);
229 ttm_dma_tt_alloc_page_directory(ttm_dma);
230 if (!ttm->pages || !ttm_dma->dma_address) {
231 ttm_tt_destroy(ttm);
232 printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
233 return -ENOMEM;
235 return 0;
237 EXPORT_SYMBOL(ttm_dma_tt_init);
239 void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
241 struct ttm_tt *ttm = &ttm_dma->ttm;
243 drm_free_large(ttm->pages);
244 ttm->pages = NULL;
245 drm_free_large(ttm_dma->dma_address);
246 ttm_dma->dma_address = NULL;
248 EXPORT_SYMBOL(ttm_dma_tt_fini);
250 void ttm_tt_unbind(struct ttm_tt *ttm)
252 int ret;
254 if (ttm->state == tt_bound) {
255 ret = ttm->func->unbind(ttm);
256 BUG_ON(ret);
257 ttm->state = tt_unbound;
261 int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
263 int ret = 0;
265 if (!ttm)
266 return -EINVAL;
268 if (ttm->state == tt_bound)
269 return 0;
271 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
272 if (ret)
273 return ret;
275 ret = ttm->func->bind(ttm, bo_mem);
276 if (unlikely(ret != 0))
277 return ret;
279 ttm->state = tt_bound;
281 return 0;
283 EXPORT_SYMBOL(ttm_tt_bind);
285 int ttm_tt_swapin(struct ttm_tt *ttm)
287 struct address_space *swap_space;
288 struct file *swap_storage;
289 struct page *from_page;
290 struct page *to_page;
291 void *from_virtual;
292 void *to_virtual;
293 int i;
294 int ret = -ENOMEM;
296 swap_storage = ttm->swap_storage;
297 BUG_ON(swap_storage == NULL);
299 swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
301 for (i = 0; i < ttm->num_pages; ++i) {
302 from_page = shmem_read_mapping_page(swap_space, i);
303 if (IS_ERR(from_page)) {
304 ret = PTR_ERR(from_page);
305 goto out_err;
307 to_page = ttm->pages[i];
308 if (unlikely(to_page == NULL))
309 goto out_err;
311 preempt_disable();
312 from_virtual = kmap_atomic(from_page, KM_USER0);
313 to_virtual = kmap_atomic(to_page, KM_USER1);
314 memcpy(to_virtual, from_virtual, PAGE_SIZE);
315 kunmap_atomic(to_virtual, KM_USER1);
316 kunmap_atomic(from_virtual, KM_USER0);
317 preempt_enable();
318 page_cache_release(from_page);
321 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
322 fput(swap_storage);
323 ttm->swap_storage = NULL;
324 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
326 return 0;
327 out_err:
328 return ret;
331 int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
333 struct address_space *swap_space;
334 struct file *swap_storage;
335 struct page *from_page;
336 struct page *to_page;
337 void *from_virtual;
338 void *to_virtual;
339 int i;
340 int ret = -ENOMEM;
342 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
343 BUG_ON(ttm->caching_state != tt_cached);
345 if (!persistent_swap_storage) {
346 swap_storage = shmem_file_setup("ttm swap",
347 ttm->num_pages << PAGE_SHIFT,
349 if (unlikely(IS_ERR(swap_storage))) {
350 printk(KERN_ERR "Failed allocating swap storage.\n");
351 return PTR_ERR(swap_storage);
353 } else
354 swap_storage = persistent_swap_storage;
356 swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
358 for (i = 0; i < ttm->num_pages; ++i) {
359 from_page = ttm->pages[i];
360 if (unlikely(from_page == NULL))
361 continue;
362 to_page = shmem_read_mapping_page(swap_space, i);
363 if (unlikely(IS_ERR(to_page))) {
364 ret = PTR_ERR(to_page);
365 goto out_err;
367 preempt_disable();
368 from_virtual = kmap_atomic(from_page, KM_USER0);
369 to_virtual = kmap_atomic(to_page, KM_USER1);
370 memcpy(to_virtual, from_virtual, PAGE_SIZE);
371 kunmap_atomic(to_virtual, KM_USER1);
372 kunmap_atomic(from_virtual, KM_USER0);
373 preempt_enable();
374 set_page_dirty(to_page);
375 mark_page_accessed(to_page);
376 page_cache_release(to_page);
379 ttm->bdev->driver->ttm_tt_unpopulate(ttm);
380 ttm->swap_storage = swap_storage;
381 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
382 if (persistent_swap_storage)
383 ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
385 return 0;
386 out_err:
387 if (!persistent_swap_storage)
388 fput(swap_storage);
390 return ret;