1 /**************************************************************************
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 #include <ttm/ttm_module.h>
32 #include <ttm/ttm_bo_driver.h>
33 #include <ttm/ttm_placement.h>
35 #include <linux/rbtree.h>
36 #include <linux/module.h>
37 #include <linux/uaccess.h>
39 #define TTM_BO_VM_NUM_PREFAULT 16
41 static struct ttm_buffer_object
*ttm_bo_vm_lookup_rb(struct ttm_bo_device
*bdev
,
42 unsigned long page_start
,
43 unsigned long num_pages
)
45 struct rb_node
*cur
= bdev
->addr_space_rb
.rb_node
;
46 unsigned long cur_offset
;
47 struct ttm_buffer_object
*bo
;
48 struct ttm_buffer_object
*best_bo
= NULL
;
50 while (likely(cur
!= NULL
)) {
51 bo
= rb_entry(cur
, struct ttm_buffer_object
, vm_rb
);
52 cur_offset
= bo
->vm_node
->start
;
53 if (page_start
>= cur_offset
) {
56 if (page_start
== cur_offset
)
62 if (unlikely(best_bo
== NULL
))
65 if (unlikely((best_bo
->vm_node
->start
+ best_bo
->num_pages
) <
66 (page_start
+ num_pages
)))
72 static int ttm_bo_vm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
74 struct ttm_buffer_object
*bo
= (struct ttm_buffer_object
*)
76 struct ttm_bo_device
*bdev
= bo
->bdev
;
77 unsigned long bus_base
;
78 unsigned long bus_offset
;
79 unsigned long bus_size
;
80 unsigned long page_offset
;
81 unsigned long page_last
;
83 struct ttm_tt
*ttm
= NULL
;
88 unsigned long address
= (unsigned long)vmf
->virtual_address
;
89 int retval
= VM_FAULT_NOPAGE
;
92 * Work around locking order reversal in fault / nopfn
93 * between mmap_sem and bo_reserve: Perform a trylock operation
94 * for reserve, and if it fails, retry the fault after scheduling.
97 ret
= ttm_bo_reserve(bo
, true, true, false, 0);
98 if (unlikely(ret
!= 0)) {
101 return VM_FAULT_NOPAGE
;
104 if (bdev
->driver
->fault_reserve_notify
)
105 bdev
->driver
->fault_reserve_notify(bo
);
108 * Wait for buffer data in transit, due to a pipelined
112 spin_lock(&bo
->lock
);
113 if (test_bit(TTM_BO_PRIV_FLAG_MOVING
, &bo
->priv_flags
)) {
114 ret
= ttm_bo_wait(bo
, false, true, false);
115 spin_unlock(&bo
->lock
);
116 if (unlikely(ret
!= 0)) {
117 retval
= (ret
!= -ERESTARTSYS
) ?
118 VM_FAULT_SIGBUS
: VM_FAULT_NOPAGE
;
122 spin_unlock(&bo
->lock
);
125 ret
= ttm_bo_pci_offset(bdev
, &bo
->mem
, &bus_base
, &bus_offset
,
127 if (unlikely(ret
!= 0)) {
128 retval
= VM_FAULT_SIGBUS
;
132 is_iomem
= (bus_size
!= 0);
134 page_offset
= ((address
- vma
->vm_start
) >> PAGE_SHIFT
) +
135 bo
->vm_node
->start
- vma
->vm_pgoff
;
136 page_last
= ((vma
->vm_end
- vma
->vm_start
) >> PAGE_SHIFT
) +
137 bo
->vm_node
->start
- vma
->vm_pgoff
;
139 if (unlikely(page_offset
>= bo
->num_pages
)) {
140 retval
= VM_FAULT_SIGBUS
;
145 * Strictly, we're not allowed to modify vma->vm_page_prot here,
146 * since the mmap_sem is only held in read mode. However, we
147 * modify only the caching bits of vma->vm_page_prot and
148 * consider those bits protected by
149 * the bo->mutex, as we should be the only writers.
150 * There shouldn't really be any readers of these bits except
151 * within vm_insert_mixed()? fork?
153 * TODO: Add a list of vmas to the bo, and change the
154 * vma->vm_page_prot when the object changes caching policy, with
155 * the correct locks held.
159 vma
->vm_page_prot
= ttm_io_prot(bo
->mem
.placement
,
163 vma
->vm_page_prot
= (bo
->mem
.placement
& TTM_PL_FLAG_CACHED
) ?
164 vm_get_page_prot(vma
->vm_flags
) :
165 ttm_io_prot(bo
->mem
.placement
, vma
->vm_page_prot
);
169 * Speculatively prefault a number of pages. Only error on
173 for (i
= 0; i
< TTM_BO_VM_NUM_PREFAULT
; ++i
) {
176 pfn
= ((bus_base
+ bus_offset
) >> PAGE_SHIFT
) +
179 page
= ttm_tt_get_page(ttm
, page_offset
);
180 if (unlikely(!page
&& i
== 0)) {
181 retval
= VM_FAULT_OOM
;
183 } else if (unlikely(!page
)) {
186 pfn
= page_to_pfn(page
);
189 ret
= vm_insert_mixed(vma
, address
, pfn
);
191 * Somebody beat us to this PTE or prefaulting to
192 * an already populated PTE, or prefaulting error.
195 if (unlikely((ret
== -EBUSY
) || (ret
!= 0 && i
> 0)))
197 else if (unlikely(ret
!= 0)) {
199 (ret
== -ENOMEM
) ? VM_FAULT_OOM
: VM_FAULT_SIGBUS
;
204 address
+= PAGE_SIZE
;
205 if (unlikely(++page_offset
>= page_last
))
210 ttm_bo_unreserve(bo
);
214 static void ttm_bo_vm_open(struct vm_area_struct
*vma
)
216 struct ttm_buffer_object
*bo
=
217 (struct ttm_buffer_object
*)vma
->vm_private_data
;
219 (void)ttm_bo_reference(bo
);
222 static void ttm_bo_vm_close(struct vm_area_struct
*vma
)
224 struct ttm_buffer_object
*bo
=
225 (struct ttm_buffer_object
*)vma
->vm_private_data
;
228 vma
->vm_private_data
= NULL
;
231 static const struct vm_operations_struct ttm_bo_vm_ops
= {
232 .fault
= ttm_bo_vm_fault
,
233 .open
= ttm_bo_vm_open
,
234 .close
= ttm_bo_vm_close
237 int ttm_bo_mmap(struct file
*filp
, struct vm_area_struct
*vma
,
238 struct ttm_bo_device
*bdev
)
240 struct ttm_bo_driver
*driver
;
241 struct ttm_buffer_object
*bo
;
244 read_lock(&bdev
->vm_lock
);
245 bo
= ttm_bo_vm_lookup_rb(bdev
, vma
->vm_pgoff
,
246 (vma
->vm_end
- vma
->vm_start
) >> PAGE_SHIFT
);
247 if (likely(bo
!= NULL
))
248 ttm_bo_reference(bo
);
249 read_unlock(&bdev
->vm_lock
);
251 if (unlikely(bo
== NULL
)) {
252 printk(KERN_ERR TTM_PFX
253 "Could not find buffer object to map.\n");
257 driver
= bo
->bdev
->driver
;
258 if (unlikely(!driver
->verify_access
)) {
262 ret
= driver
->verify_access(bo
, filp
);
263 if (unlikely(ret
!= 0))
266 vma
->vm_ops
= &ttm_bo_vm_ops
;
269 * Note: We're transferring the bo reference to
270 * vma->vm_private_data here.
273 vma
->vm_private_data
= bo
;
274 vma
->vm_flags
|= VM_RESERVED
| VM_IO
| VM_MIXEDMAP
| VM_DONTEXPAND
;
280 EXPORT_SYMBOL(ttm_bo_mmap
);
282 int ttm_fbdev_mmap(struct vm_area_struct
*vma
, struct ttm_buffer_object
*bo
)
284 if (vma
->vm_pgoff
!= 0)
287 vma
->vm_ops
= &ttm_bo_vm_ops
;
288 vma
->vm_private_data
= ttm_bo_reference(bo
);
289 vma
->vm_flags
|= VM_RESERVED
| VM_IO
| VM_MIXEDMAP
| VM_DONTEXPAND
;
292 EXPORT_SYMBOL(ttm_fbdev_mmap
);
295 ssize_t
ttm_bo_io(struct ttm_bo_device
*bdev
, struct file
*filp
,
296 const char __user
*wbuf
, char __user
*rbuf
, size_t count
,
297 loff_t
*f_pos
, bool write
)
299 struct ttm_buffer_object
*bo
;
300 struct ttm_bo_driver
*driver
;
301 struct ttm_bo_kmap_obj map
;
302 unsigned long dev_offset
= (*f_pos
>> PAGE_SHIFT
);
303 unsigned long kmap_offset
;
304 unsigned long kmap_end
;
305 unsigned long kmap_num
;
307 unsigned int page_offset
;
310 bool no_wait
= false;
313 read_lock(&bdev
->vm_lock
);
314 bo
= ttm_bo_vm_lookup_rb(bdev
, dev_offset
, 1);
315 if (likely(bo
!= NULL
))
316 ttm_bo_reference(bo
);
317 read_unlock(&bdev
->vm_lock
);
319 if (unlikely(bo
== NULL
))
322 driver
= bo
->bdev
->driver
;
323 if (unlikely(!driver
->verify_access
)) {
328 ret
= driver
->verify_access(bo
, filp
);
329 if (unlikely(ret
!= 0))
332 kmap_offset
= dev_offset
- bo
->vm_node
->start
;
333 if (unlikely(kmap_offset
>= bo
->num_pages
)) {
338 page_offset
= *f_pos
& ~PAGE_MASK
;
339 io_size
= bo
->num_pages
- kmap_offset
;
340 io_size
= (io_size
<< PAGE_SHIFT
) - page_offset
;
344 kmap_end
= (*f_pos
+ count
- 1) >> PAGE_SHIFT
;
345 kmap_num
= kmap_end
- kmap_offset
+ 1;
347 ret
= ttm_bo_reserve(bo
, true, no_wait
, false, 0);
359 ret
= ttm_bo_kmap(bo
, kmap_offset
, kmap_num
, &map
);
360 if (unlikely(ret
!= 0)) {
361 ttm_bo_unreserve(bo
);
365 virtual = ttm_kmap_obj_virtual(&map
, &dummy
);
366 virtual += page_offset
;
369 ret
= copy_from_user(virtual, wbuf
, io_size
);
371 ret
= copy_to_user(rbuf
, virtual, io_size
);
374 ttm_bo_unreserve(bo
);
377 if (unlikely(ret
!= 0))
388 ssize_t
ttm_bo_fbdev_io(struct ttm_buffer_object
*bo
, const char __user
*wbuf
,
389 char __user
*rbuf
, size_t count
, loff_t
*f_pos
,
392 struct ttm_bo_kmap_obj map
;
393 unsigned long kmap_offset
;
394 unsigned long kmap_end
;
395 unsigned long kmap_num
;
397 unsigned int page_offset
;
400 bool no_wait
= false;
403 kmap_offset
= (*f_pos
>> PAGE_SHIFT
);
404 if (unlikely(kmap_offset
>= bo
->num_pages
))
407 page_offset
= *f_pos
& ~PAGE_MASK
;
408 io_size
= bo
->num_pages
- kmap_offset
;
409 io_size
= (io_size
<< PAGE_SHIFT
) - page_offset
;
413 kmap_end
= (*f_pos
+ count
- 1) >> PAGE_SHIFT
;
414 kmap_num
= kmap_end
- kmap_offset
+ 1;
416 ret
= ttm_bo_reserve(bo
, true, no_wait
, false, 0);
427 ret
= ttm_bo_kmap(bo
, kmap_offset
, kmap_num
, &map
);
428 if (unlikely(ret
!= 0)) {
429 ttm_bo_unreserve(bo
);
433 virtual = ttm_kmap_obj_virtual(&map
, &dummy
);
434 virtual += page_offset
;
437 ret
= copy_from_user(virtual, wbuf
, io_size
);
439 ret
= copy_to_user(rbuf
, virtual, io_size
);
442 ttm_bo_unreserve(bo
);
445 if (unlikely(ret
!= 0))