1 /**************************************************************************
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 #include <ttm/ttm_module.h>
32 #include <ttm/ttm_bo_driver.h>
33 #include <ttm/ttm_placement.h>
35 #include <linux/rbtree.h>
36 #include <linux/module.h>
37 #include <linux/uaccess.h>
39 #define TTM_BO_VM_NUM_PREFAULT 16
41 static struct ttm_buffer_object
*ttm_bo_vm_lookup_rb(struct ttm_bo_device
*bdev
,
42 unsigned long page_start
,
43 unsigned long num_pages
)
45 struct rb_node
*cur
= bdev
->addr_space_rb
.rb_node
;
46 unsigned long cur_offset
;
47 struct ttm_buffer_object
*bo
;
48 struct ttm_buffer_object
*best_bo
= NULL
;
50 while (likely(cur
!= NULL
)) {
51 bo
= rb_entry(cur
, struct ttm_buffer_object
, vm_rb
);
52 cur_offset
= bo
->vm_node
->start
;
53 if (page_start
>= cur_offset
) {
56 if (page_start
== cur_offset
)
62 if (unlikely(best_bo
== NULL
))
65 if (unlikely((best_bo
->vm_node
->start
+ best_bo
->num_pages
) <
66 (page_start
+ num_pages
)))
72 static int ttm_bo_vm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
74 struct ttm_buffer_object
*bo
= (struct ttm_buffer_object
*)
76 struct ttm_bo_device
*bdev
= bo
->bdev
;
77 unsigned long page_offset
;
78 unsigned long page_last
;
80 struct ttm_tt
*ttm
= NULL
;
84 unsigned long address
= (unsigned long)vmf
->virtual_address
;
85 int retval
= VM_FAULT_NOPAGE
;
86 struct ttm_mem_type_manager
*man
=
87 &bdev
->man
[bo
->mem
.mem_type
];
90 * Work around locking order reversal in fault / nopfn
91 * between mmap_sem and bo_reserve: Perform a trylock operation
92 * for reserve, and if it fails, retry the fault after scheduling.
95 ret
= ttm_bo_reserve(bo
, true, true, false, 0);
96 if (unlikely(ret
!= 0)) {
99 return VM_FAULT_NOPAGE
;
102 if (bdev
->driver
->fault_reserve_notify
) {
103 ret
= bdev
->driver
->fault_reserve_notify(bo
);
110 retval
= VM_FAULT_NOPAGE
;
113 retval
= VM_FAULT_SIGBUS
;
119 * Wait for buffer data in transit, due to a pipelined
123 spin_lock(&bdev
->fence_lock
);
124 if (test_bit(TTM_BO_PRIV_FLAG_MOVING
, &bo
->priv_flags
)) {
125 ret
= ttm_bo_wait(bo
, false, true, false);
126 spin_unlock(&bdev
->fence_lock
);
127 if (unlikely(ret
!= 0)) {
128 retval
= (ret
!= -ERESTARTSYS
) ?
129 VM_FAULT_SIGBUS
: VM_FAULT_NOPAGE
;
133 spin_unlock(&bdev
->fence_lock
);
135 ret
= ttm_mem_io_lock(man
, true);
136 if (unlikely(ret
!= 0)) {
137 retval
= VM_FAULT_NOPAGE
;
140 ret
= ttm_mem_io_reserve_vm(bo
);
141 if (unlikely(ret
!= 0)) {
142 retval
= VM_FAULT_SIGBUS
;
146 page_offset
= ((address
- vma
->vm_start
) >> PAGE_SHIFT
) +
147 bo
->vm_node
->start
- vma
->vm_pgoff
;
148 page_last
= ((vma
->vm_end
- vma
->vm_start
) >> PAGE_SHIFT
) +
149 bo
->vm_node
->start
- vma
->vm_pgoff
;
151 if (unlikely(page_offset
>= bo
->num_pages
)) {
152 retval
= VM_FAULT_SIGBUS
;
157 * Strictly, we're not allowed to modify vma->vm_page_prot here,
158 * since the mmap_sem is only held in read mode. However, we
159 * modify only the caching bits of vma->vm_page_prot and
160 * consider those bits protected by
161 * the bo->mutex, as we should be the only writers.
162 * There shouldn't really be any readers of these bits except
163 * within vm_insert_mixed()? fork?
165 * TODO: Add a list of vmas to the bo, and change the
166 * vma->vm_page_prot when the object changes caching policy, with
167 * the correct locks held.
169 if (bo
->mem
.bus
.is_iomem
) {
170 vma
->vm_page_prot
= ttm_io_prot(bo
->mem
.placement
,
174 vma
->vm_page_prot
= (bo
->mem
.placement
& TTM_PL_FLAG_CACHED
) ?
175 vm_get_page_prot(vma
->vm_flags
) :
176 ttm_io_prot(bo
->mem
.placement
, vma
->vm_page_prot
);
180 * Speculatively prefault a number of pages. Only error on
184 for (i
= 0; i
< TTM_BO_VM_NUM_PREFAULT
; ++i
) {
185 if (bo
->mem
.bus
.is_iomem
)
186 pfn
= ((bo
->mem
.bus
.base
+ bo
->mem
.bus
.offset
) >> PAGE_SHIFT
) + page_offset
;
188 page
= ttm_tt_get_page(ttm
, page_offset
);
189 if (unlikely(!page
&& i
== 0)) {
190 retval
= VM_FAULT_OOM
;
192 } else if (unlikely(!page
)) {
195 pfn
= page_to_pfn(page
);
198 ret
= vm_insert_mixed(vma
, address
, pfn
);
200 * Somebody beat us to this PTE or prefaulting to
201 * an already populated PTE, or prefaulting error.
204 if (unlikely((ret
== -EBUSY
) || (ret
!= 0 && i
> 0)))
206 else if (unlikely(ret
!= 0)) {
208 (ret
== -ENOMEM
) ? VM_FAULT_OOM
: VM_FAULT_SIGBUS
;
212 address
+= PAGE_SIZE
;
213 if (unlikely(++page_offset
>= page_last
))
217 ttm_mem_io_unlock(man
);
219 ttm_bo_unreserve(bo
);
223 static void ttm_bo_vm_open(struct vm_area_struct
*vma
)
225 struct ttm_buffer_object
*bo
=
226 (struct ttm_buffer_object
*)vma
->vm_private_data
;
228 (void)ttm_bo_reference(bo
);
231 static void ttm_bo_vm_close(struct vm_area_struct
*vma
)
233 struct ttm_buffer_object
*bo
= (struct ttm_buffer_object
*)vma
->vm_private_data
;
236 vma
->vm_private_data
= NULL
;
239 static const struct vm_operations_struct ttm_bo_vm_ops
= {
240 .fault
= ttm_bo_vm_fault
,
241 .open
= ttm_bo_vm_open
,
242 .close
= ttm_bo_vm_close
245 int ttm_bo_mmap(struct file
*filp
, struct vm_area_struct
*vma
,
246 struct ttm_bo_device
*bdev
)
248 struct ttm_bo_driver
*driver
;
249 struct ttm_buffer_object
*bo
;
252 read_lock(&bdev
->vm_lock
);
253 bo
= ttm_bo_vm_lookup_rb(bdev
, vma
->vm_pgoff
,
254 (vma
->vm_end
- vma
->vm_start
) >> PAGE_SHIFT
);
255 if (likely(bo
!= NULL
))
256 ttm_bo_reference(bo
);
257 read_unlock(&bdev
->vm_lock
);
259 if (unlikely(bo
== NULL
)) {
260 printk(KERN_ERR TTM_PFX
261 "Could not find buffer object to map.\n");
265 driver
= bo
->bdev
->driver
;
266 if (unlikely(!driver
->verify_access
)) {
270 ret
= driver
->verify_access(bo
, filp
);
271 if (unlikely(ret
!= 0))
274 vma
->vm_ops
= &ttm_bo_vm_ops
;
277 * Note: We're transferring the bo reference to
278 * vma->vm_private_data here.
281 vma
->vm_private_data
= bo
;
282 vma
->vm_flags
|= VM_RESERVED
| VM_IO
| VM_MIXEDMAP
| VM_DONTEXPAND
;
288 EXPORT_SYMBOL(ttm_bo_mmap
);
290 int ttm_fbdev_mmap(struct vm_area_struct
*vma
, struct ttm_buffer_object
*bo
)
292 if (vma
->vm_pgoff
!= 0)
295 vma
->vm_ops
= &ttm_bo_vm_ops
;
296 vma
->vm_private_data
= ttm_bo_reference(bo
);
297 vma
->vm_flags
|= VM_RESERVED
| VM_IO
| VM_MIXEDMAP
| VM_DONTEXPAND
;
300 EXPORT_SYMBOL(ttm_fbdev_mmap
);
303 ssize_t
ttm_bo_io(struct ttm_bo_device
*bdev
, struct file
*filp
,
304 const char __user
*wbuf
, char __user
*rbuf
, size_t count
,
305 loff_t
*f_pos
, bool write
)
307 struct ttm_buffer_object
*bo
;
308 struct ttm_bo_driver
*driver
;
309 struct ttm_bo_kmap_obj map
;
310 unsigned long dev_offset
= (*f_pos
>> PAGE_SHIFT
);
311 unsigned long kmap_offset
;
312 unsigned long kmap_end
;
313 unsigned long kmap_num
;
315 unsigned int page_offset
;
318 bool no_wait
= false;
321 read_lock(&bdev
->vm_lock
);
322 bo
= ttm_bo_vm_lookup_rb(bdev
, dev_offset
, 1);
323 if (likely(bo
!= NULL
))
324 ttm_bo_reference(bo
);
325 read_unlock(&bdev
->vm_lock
);
327 if (unlikely(bo
== NULL
))
330 driver
= bo
->bdev
->driver
;
331 if (unlikely(!driver
->verify_access
)) {
336 ret
= driver
->verify_access(bo
, filp
);
337 if (unlikely(ret
!= 0))
340 kmap_offset
= dev_offset
- bo
->vm_node
->start
;
341 if (unlikely(kmap_offset
>= bo
->num_pages
)) {
346 page_offset
= *f_pos
& ~PAGE_MASK
;
347 io_size
= bo
->num_pages
- kmap_offset
;
348 io_size
= (io_size
<< PAGE_SHIFT
) - page_offset
;
352 kmap_end
= (*f_pos
+ count
- 1) >> PAGE_SHIFT
;
353 kmap_num
= kmap_end
- kmap_offset
+ 1;
355 ret
= ttm_bo_reserve(bo
, true, no_wait
, false, 0);
367 ret
= ttm_bo_kmap(bo
, kmap_offset
, kmap_num
, &map
);
368 if (unlikely(ret
!= 0)) {
369 ttm_bo_unreserve(bo
);
373 virtual = ttm_kmap_obj_virtual(&map
, &dummy
);
374 virtual += page_offset
;
377 ret
= copy_from_user(virtual, wbuf
, io_size
);
379 ret
= copy_to_user(rbuf
, virtual, io_size
);
382 ttm_bo_unreserve(bo
);
385 if (unlikely(ret
!= 0))
396 ssize_t
ttm_bo_fbdev_io(struct ttm_buffer_object
*bo
, const char __user
*wbuf
,
397 char __user
*rbuf
, size_t count
, loff_t
*f_pos
,
400 struct ttm_bo_kmap_obj map
;
401 unsigned long kmap_offset
;
402 unsigned long kmap_end
;
403 unsigned long kmap_num
;
405 unsigned int page_offset
;
408 bool no_wait
= false;
411 kmap_offset
= (*f_pos
>> PAGE_SHIFT
);
412 if (unlikely(kmap_offset
>= bo
->num_pages
))
415 page_offset
= *f_pos
& ~PAGE_MASK
;
416 io_size
= bo
->num_pages
- kmap_offset
;
417 io_size
= (io_size
<< PAGE_SHIFT
) - page_offset
;
421 kmap_end
= (*f_pos
+ count
- 1) >> PAGE_SHIFT
;
422 kmap_num
= kmap_end
- kmap_offset
+ 1;
424 ret
= ttm_bo_reserve(bo
, true, no_wait
, false, 0);
435 ret
= ttm_bo_kmap(bo
, kmap_offset
, kmap_num
, &map
);
436 if (unlikely(ret
!= 0)) {
437 ttm_bo_unreserve(bo
);
441 virtual = ttm_kmap_obj_virtual(&map
, &dummy
);
442 virtual += page_offset
;
445 ret
= copy_from_user(virtual, wbuf
, io_size
);
447 ret
= copy_to_user(rbuf
, virtual, io_size
);
450 ttm_bo_unreserve(bo
);
453 if (unlikely(ret
!= 0))