Input: xpad - add support for Xbox1 PDP Camo series gamepad
[linux/fpc-iii.git] / drivers / gpu / drm / ttm / ttm_bo_vm.c
blob750733a8cce21d8c8572649a3632e1af334436b1
1 /**************************************************************************
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 #define pr_fmt(fmt) "[TTM] " fmt
33 #include <ttm/ttm_module.h>
34 #include <ttm/ttm_bo_driver.h>
35 #include <ttm/ttm_placement.h>
36 #include <drm/drm_vma_manager.h>
37 #include <linux/mm.h>
38 #include <linux/pfn_t.h>
39 #include <linux/rbtree.h>
40 #include <linux/module.h>
41 #include <linux/uaccess.h>
43 #define TTM_BO_VM_NUM_PREFAULT 16
45 static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
46 struct vm_area_struct *vma,
47 struct vm_fault *vmf)
49 int ret = 0;
51 if (likely(!bo->moving))
52 goto out_unlock;
55 * Quick non-stalling check for idle.
57 if (fence_is_signaled(bo->moving))
58 goto out_clear;
61 * If possible, avoid waiting for GPU with mmap_sem
62 * held.
64 if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
65 ret = VM_FAULT_RETRY;
66 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
67 goto out_unlock;
69 ttm_bo_reference(bo);
70 up_read(&vma->vm_mm->mmap_sem);
71 (void) fence_wait(bo->moving, true);
72 ttm_bo_unreserve(bo);
73 ttm_bo_unref(&bo);
74 goto out_unlock;
78 * Ordinary wait.
80 ret = fence_wait(bo->moving, true);
81 if (unlikely(ret != 0)) {
82 ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
83 VM_FAULT_NOPAGE;
84 goto out_unlock;
87 out_clear:
88 fence_put(bo->moving);
89 bo->moving = NULL;
91 out_unlock:
92 return ret;
95 static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
97 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
98 vma->vm_private_data;
99 struct ttm_bo_device *bdev = bo->bdev;
100 unsigned long page_offset;
101 unsigned long page_last;
102 unsigned long pfn;
103 struct ttm_tt *ttm = NULL;
104 struct page *page;
105 int ret;
106 int i;
107 unsigned long address = (unsigned long)vmf->virtual_address;
108 int retval = VM_FAULT_NOPAGE;
109 struct ttm_mem_type_manager *man =
110 &bdev->man[bo->mem.mem_type];
111 struct vm_area_struct cvma;
114 * Work around locking order reversal in fault / nopfn
115 * between mmap_sem and bo_reserve: Perform a trylock operation
116 * for reserve, and if it fails, retry the fault after waiting
117 * for the buffer to become unreserved.
119 ret = ttm_bo_reserve(bo, true, true, NULL);
120 if (unlikely(ret != 0)) {
121 if (ret != -EBUSY)
122 return VM_FAULT_NOPAGE;
124 if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
125 if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
126 ttm_bo_reference(bo);
127 up_read(&vma->vm_mm->mmap_sem);
128 (void) ttm_bo_wait_unreserved(bo);
129 ttm_bo_unref(&bo);
132 return VM_FAULT_RETRY;
136 * If we'd want to change locking order to
137 * mmap_sem -> bo::reserve, we'd use a blocking reserve here
138 * instead of retrying the fault...
140 return VM_FAULT_NOPAGE;
144 * Refuse to fault imported pages. This should be handled
145 * (if at all) by redirecting mmap to the exporter.
147 if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
148 retval = VM_FAULT_SIGBUS;
149 goto out_unlock;
152 if (bdev->driver->fault_reserve_notify) {
153 ret = bdev->driver->fault_reserve_notify(bo);
154 switch (ret) {
155 case 0:
156 break;
157 case -EBUSY:
158 case -ERESTARTSYS:
159 retval = VM_FAULT_NOPAGE;
160 goto out_unlock;
161 default:
162 retval = VM_FAULT_SIGBUS;
163 goto out_unlock;
168 * Wait for buffer data in transit, due to a pipelined
169 * move.
171 ret = ttm_bo_vm_fault_idle(bo, vma, vmf);
172 if (unlikely(ret != 0)) {
173 retval = ret;
175 if (retval == VM_FAULT_RETRY &&
176 !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
177 /* The BO has already been unreserved. */
178 return retval;
181 goto out_unlock;
184 ret = ttm_mem_io_lock(man, true);
185 if (unlikely(ret != 0)) {
186 retval = VM_FAULT_NOPAGE;
187 goto out_unlock;
189 ret = ttm_mem_io_reserve_vm(bo);
190 if (unlikely(ret != 0)) {
191 retval = VM_FAULT_SIGBUS;
192 goto out_io_unlock;
195 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
196 vma->vm_pgoff - drm_vma_node_start(&bo->vma_node);
197 page_last = vma_pages(vma) + vma->vm_pgoff -
198 drm_vma_node_start(&bo->vma_node);
200 if (unlikely(page_offset >= bo->num_pages)) {
201 retval = VM_FAULT_SIGBUS;
202 goto out_io_unlock;
206 * Make a local vma copy to modify the page_prot member
207 * and vm_flags if necessary. The vma parameter is protected
208 * by mmap_sem in write mode.
210 cvma = *vma;
211 cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
213 if (bo->mem.bus.is_iomem) {
214 cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
215 cvma.vm_page_prot);
216 } else {
217 ttm = bo->ttm;
218 cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
219 cvma.vm_page_prot);
221 /* Allocate all page at once, most common usage */
222 if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
223 retval = VM_FAULT_OOM;
224 goto out_io_unlock;
229 * Speculatively prefault a number of pages. Only error on
230 * first page.
232 for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
233 if (bo->mem.bus.is_iomem)
234 pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
235 else {
236 page = ttm->pages[page_offset];
237 if (unlikely(!page && i == 0)) {
238 retval = VM_FAULT_OOM;
239 goto out_io_unlock;
240 } else if (unlikely(!page)) {
241 break;
243 page->mapping = vma->vm_file->f_mapping;
244 page->index = drm_vma_node_start(&bo->vma_node) +
245 page_offset;
246 pfn = page_to_pfn(page);
249 if (vma->vm_flags & VM_MIXEDMAP)
250 ret = vm_insert_mixed(&cvma, address,
251 __pfn_to_pfn_t(pfn, PFN_DEV));
252 else
253 ret = vm_insert_pfn(&cvma, address, pfn);
256 * Somebody beat us to this PTE or prefaulting to
257 * an already populated PTE, or prefaulting error.
260 if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
261 break;
262 else if (unlikely(ret != 0)) {
263 retval =
264 (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
265 goto out_io_unlock;
268 address += PAGE_SIZE;
269 if (unlikely(++page_offset >= page_last))
270 break;
272 out_io_unlock:
273 ttm_mem_io_unlock(man);
274 out_unlock:
275 ttm_bo_unreserve(bo);
276 return retval;
279 static void ttm_bo_vm_open(struct vm_area_struct *vma)
281 struct ttm_buffer_object *bo =
282 (struct ttm_buffer_object *)vma->vm_private_data;
284 WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
286 (void)ttm_bo_reference(bo);
289 static void ttm_bo_vm_close(struct vm_area_struct *vma)
291 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
293 ttm_bo_unref(&bo);
294 vma->vm_private_data = NULL;
297 static const struct vm_operations_struct ttm_bo_vm_ops = {
298 .fault = ttm_bo_vm_fault,
299 .open = ttm_bo_vm_open,
300 .close = ttm_bo_vm_close
303 static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
304 unsigned long offset,
305 unsigned long pages)
307 struct drm_vma_offset_node *node;
308 struct ttm_buffer_object *bo = NULL;
310 drm_vma_offset_lock_lookup(&bdev->vma_manager);
312 node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
313 if (likely(node)) {
314 bo = container_of(node, struct ttm_buffer_object, vma_node);
315 if (!kref_get_unless_zero(&bo->kref))
316 bo = NULL;
319 drm_vma_offset_unlock_lookup(&bdev->vma_manager);
321 if (!bo)
322 pr_err("Could not find buffer object to map\n");
324 return bo;
327 int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
328 struct ttm_bo_device *bdev)
330 struct ttm_bo_driver *driver;
331 struct ttm_buffer_object *bo;
332 int ret;
334 bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
335 if (unlikely(!bo))
336 return -EINVAL;
338 driver = bo->bdev->driver;
339 if (unlikely(!driver->verify_access)) {
340 ret = -EPERM;
341 goto out_unref;
343 ret = driver->verify_access(bo, filp);
344 if (unlikely(ret != 0))
345 goto out_unref;
347 vma->vm_ops = &ttm_bo_vm_ops;
350 * Note: We're transferring the bo reference to
351 * vma->vm_private_data here.
354 vma->vm_private_data = bo;
357 * We'd like to use VM_PFNMAP on shared mappings, where
358 * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
359 * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
360 * bad for performance. Until that has been sorted out, use
361 * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
363 vma->vm_flags |= VM_MIXEDMAP;
364 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
365 return 0;
366 out_unref:
367 ttm_bo_unref(&bo);
368 return ret;
370 EXPORT_SYMBOL(ttm_bo_mmap);
372 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
374 if (vma->vm_pgoff != 0)
375 return -EACCES;
377 vma->vm_ops = &ttm_bo_vm_ops;
378 vma->vm_private_data = ttm_bo_reference(bo);
379 vma->vm_flags |= VM_MIXEDMAP;
380 vma->vm_flags |= VM_IO | VM_DONTEXPAND;
381 return 0;
383 EXPORT_SYMBOL(ttm_fbdev_mmap);