1 /**************************************************************************
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 #define pr_fmt(fmt) "[TTM] " fmt
33 #include <ttm/ttm_module.h>
34 #include <ttm/ttm_bo_driver.h>
35 #include <ttm/ttm_placement.h>
36 #include <drm/drm_vma_manager.h>
38 #include <linux/rbtree.h>
39 #include <linux/module.h>
40 #include <linux/uaccess.h>
42 #define TTM_BO_VM_NUM_PREFAULT 16
44 static int ttm_bo_vm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
46 struct ttm_buffer_object
*bo
= (struct ttm_buffer_object
*)
48 struct ttm_bo_device
*bdev
= bo
->bdev
;
49 unsigned long page_offset
;
50 unsigned long page_last
;
52 struct ttm_tt
*ttm
= NULL
;
56 unsigned long address
= (unsigned long)vmf
->virtual_address
;
57 int retval
= VM_FAULT_NOPAGE
;
58 struct ttm_mem_type_manager
*man
=
59 &bdev
->man
[bo
->mem
.mem_type
];
62 * Work around locking order reversal in fault / nopfn
63 * between mmap_sem and bo_reserve: Perform a trylock operation
64 * for reserve, and if it fails, retry the fault after scheduling.
67 ret
= ttm_bo_reserve(bo
, true, true, false, 0);
68 if (unlikely(ret
!= 0)) {
71 return VM_FAULT_NOPAGE
;
74 if (bdev
->driver
->fault_reserve_notify
) {
75 ret
= bdev
->driver
->fault_reserve_notify(bo
);
82 retval
= VM_FAULT_NOPAGE
;
85 retval
= VM_FAULT_SIGBUS
;
91 * Wait for buffer data in transit, due to a pipelined
95 spin_lock(&bdev
->fence_lock
);
96 if (test_bit(TTM_BO_PRIV_FLAG_MOVING
, &bo
->priv_flags
)) {
97 ret
= ttm_bo_wait(bo
, false, true, false);
98 spin_unlock(&bdev
->fence_lock
);
99 if (unlikely(ret
!= 0)) {
100 retval
= (ret
!= -ERESTARTSYS
) ?
101 VM_FAULT_SIGBUS
: VM_FAULT_NOPAGE
;
105 spin_unlock(&bdev
->fence_lock
);
107 ret
= ttm_mem_io_lock(man
, true);
108 if (unlikely(ret
!= 0)) {
109 retval
= VM_FAULT_NOPAGE
;
112 ret
= ttm_mem_io_reserve_vm(bo
);
113 if (unlikely(ret
!= 0)) {
114 retval
= VM_FAULT_SIGBUS
;
118 page_offset
= ((address
- vma
->vm_start
) >> PAGE_SHIFT
) +
119 vma
->vm_pgoff
- drm_vma_node_start(&bo
->vma_node
);
120 page_last
= vma_pages(vma
) + vma
->vm_pgoff
-
121 drm_vma_node_start(&bo
->vma_node
);
123 if (unlikely(page_offset
>= bo
->num_pages
)) {
124 retval
= VM_FAULT_SIGBUS
;
129 * Strictly, we're not allowed to modify vma->vm_page_prot here,
130 * since the mmap_sem is only held in read mode. However, we
131 * modify only the caching bits of vma->vm_page_prot and
132 * consider those bits protected by
133 * the bo->mutex, as we should be the only writers.
134 * There shouldn't really be any readers of these bits except
135 * within vm_insert_mixed()? fork?
137 * TODO: Add a list of vmas to the bo, and change the
138 * vma->vm_page_prot when the object changes caching policy, with
139 * the correct locks held.
141 if (bo
->mem
.bus
.is_iomem
) {
142 vma
->vm_page_prot
= ttm_io_prot(bo
->mem
.placement
,
146 vma
->vm_page_prot
= (bo
->mem
.placement
& TTM_PL_FLAG_CACHED
) ?
147 vm_get_page_prot(vma
->vm_flags
) :
148 ttm_io_prot(bo
->mem
.placement
, vma
->vm_page_prot
);
150 /* Allocate all page at once, most common usage */
151 if (ttm
->bdev
->driver
->ttm_tt_populate(ttm
)) {
152 retval
= VM_FAULT_OOM
;
158 * Speculatively prefault a number of pages. Only error on
161 for (i
= 0; i
< TTM_BO_VM_NUM_PREFAULT
; ++i
) {
162 if (bo
->mem
.bus
.is_iomem
)
163 pfn
= ((bo
->mem
.bus
.base
+ bo
->mem
.bus
.offset
) >> PAGE_SHIFT
) + page_offset
;
165 page
= ttm
->pages
[page_offset
];
166 if (unlikely(!page
&& i
== 0)) {
167 retval
= VM_FAULT_OOM
;
169 } else if (unlikely(!page
)) {
172 pfn
= page_to_pfn(page
);
175 ret
= vm_insert_mixed(vma
, address
, pfn
);
177 * Somebody beat us to this PTE or prefaulting to
178 * an already populated PTE, or prefaulting error.
181 if (unlikely((ret
== -EBUSY
) || (ret
!= 0 && i
> 0)))
183 else if (unlikely(ret
!= 0)) {
185 (ret
== -ENOMEM
) ? VM_FAULT_OOM
: VM_FAULT_SIGBUS
;
189 address
+= PAGE_SIZE
;
190 if (unlikely(++page_offset
>= page_last
))
194 ttm_mem_io_unlock(man
);
196 ttm_bo_unreserve(bo
);
200 static void ttm_bo_vm_open(struct vm_area_struct
*vma
)
202 struct ttm_buffer_object
*bo
=
203 (struct ttm_buffer_object
*)vma
->vm_private_data
;
205 (void)ttm_bo_reference(bo
);
208 static void ttm_bo_vm_close(struct vm_area_struct
*vma
)
210 struct ttm_buffer_object
*bo
= (struct ttm_buffer_object
*)vma
->vm_private_data
;
213 vma
->vm_private_data
= NULL
;
216 static const struct vm_operations_struct ttm_bo_vm_ops
= {
217 .fault
= ttm_bo_vm_fault
,
218 .open
= ttm_bo_vm_open
,
219 .close
= ttm_bo_vm_close
222 static struct ttm_buffer_object
*ttm_bo_vm_lookup(struct ttm_bo_device
*bdev
,
223 unsigned long offset
,
226 struct drm_vma_offset_node
*node
;
227 struct ttm_buffer_object
*bo
= NULL
;
229 drm_vma_offset_lock_lookup(&bdev
->vma_manager
);
231 node
= drm_vma_offset_lookup_locked(&bdev
->vma_manager
, offset
, pages
);
233 bo
= container_of(node
, struct ttm_buffer_object
, vma_node
);
234 if (!kref_get_unless_zero(&bo
->kref
))
238 drm_vma_offset_unlock_lookup(&bdev
->vma_manager
);
241 pr_err("Could not find buffer object to map\n");
246 int ttm_bo_mmap(struct file
*filp
, struct vm_area_struct
*vma
,
247 struct ttm_bo_device
*bdev
)
249 struct ttm_bo_driver
*driver
;
250 struct ttm_buffer_object
*bo
;
253 bo
= ttm_bo_vm_lookup(bdev
, vma
->vm_pgoff
, vma_pages(vma
));
257 driver
= bo
->bdev
->driver
;
258 if (unlikely(!driver
->verify_access
)) {
262 ret
= driver
->verify_access(bo
, filp
);
263 if (unlikely(ret
!= 0))
266 vma
->vm_ops
= &ttm_bo_vm_ops
;
269 * Note: We're transferring the bo reference to
270 * vma->vm_private_data here.
273 vma
->vm_private_data
= bo
;
274 vma
->vm_flags
|= VM_IO
| VM_MIXEDMAP
| VM_DONTEXPAND
| VM_DONTDUMP
;
280 EXPORT_SYMBOL(ttm_bo_mmap
);
282 int ttm_fbdev_mmap(struct vm_area_struct
*vma
, struct ttm_buffer_object
*bo
)
284 if (vma
->vm_pgoff
!= 0)
287 vma
->vm_ops
= &ttm_bo_vm_ops
;
288 vma
->vm_private_data
= ttm_bo_reference(bo
);
289 vma
->vm_flags
|= VM_IO
| VM_MIXEDMAP
| VM_DONTEXPAND
;
292 EXPORT_SYMBOL(ttm_fbdev_mmap
);