1 /**************************************************************************
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 #define pr_fmt(fmt) "[TTM] " fmt
33 #include <ttm/ttm_module.h>
34 #include <ttm/ttm_bo_driver.h>
35 #include <ttm/ttm_placement.h>
37 #include <linux/rbtree.h>
38 #include <linux/module.h>
39 #include <linux/uaccess.h>
41 #define TTM_BO_VM_NUM_PREFAULT 16
43 static struct ttm_buffer_object
*ttm_bo_vm_lookup_rb(struct ttm_bo_device
*bdev
,
44 unsigned long page_start
,
45 unsigned long num_pages
)
47 struct rb_node
*cur
= bdev
->addr_space_rb
.rb_node
;
48 unsigned long cur_offset
;
49 struct ttm_buffer_object
*bo
;
50 struct ttm_buffer_object
*best_bo
= NULL
;
52 while (likely(cur
!= NULL
)) {
53 bo
= rb_entry(cur
, struct ttm_buffer_object
, vm_rb
);
54 cur_offset
= bo
->vm_node
->start
;
55 if (page_start
>= cur_offset
) {
58 if (page_start
== cur_offset
)
64 if (unlikely(best_bo
== NULL
))
67 if (unlikely((best_bo
->vm_node
->start
+ best_bo
->num_pages
) <
68 (page_start
+ num_pages
)))
74 static int ttm_bo_vm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
76 struct ttm_buffer_object
*bo
= (struct ttm_buffer_object
*)
78 struct ttm_bo_device
*bdev
= bo
->bdev
;
79 unsigned long page_offset
;
80 unsigned long page_last
;
82 struct ttm_tt
*ttm
= NULL
;
86 unsigned long address
= (unsigned long)vmf
->virtual_address
;
87 int retval
= VM_FAULT_NOPAGE
;
88 struct ttm_mem_type_manager
*man
=
89 &bdev
->man
[bo
->mem
.mem_type
];
92 * Work around locking order reversal in fault / nopfn
93 * between mmap_sem and bo_reserve: Perform a trylock operation
94 * for reserve, and if it fails, retry the fault after scheduling.
97 ret
= ttm_bo_reserve(bo
, true, true, false, 0);
98 if (unlikely(ret
!= 0)) {
101 return VM_FAULT_NOPAGE
;
104 if (bdev
->driver
->fault_reserve_notify
) {
105 ret
= bdev
->driver
->fault_reserve_notify(bo
);
112 retval
= VM_FAULT_NOPAGE
;
115 retval
= VM_FAULT_SIGBUS
;
121 * Wait for buffer data in transit, due to a pipelined
125 spin_lock(&bdev
->fence_lock
);
126 if (test_bit(TTM_BO_PRIV_FLAG_MOVING
, &bo
->priv_flags
)) {
127 ret
= ttm_bo_wait(bo
, false, true, false);
128 spin_unlock(&bdev
->fence_lock
);
129 if (unlikely(ret
!= 0)) {
130 retval
= (ret
!= -ERESTARTSYS
) ?
131 VM_FAULT_SIGBUS
: VM_FAULT_NOPAGE
;
135 spin_unlock(&bdev
->fence_lock
);
137 ret
= ttm_mem_io_lock(man
, true);
138 if (unlikely(ret
!= 0)) {
139 retval
= VM_FAULT_NOPAGE
;
142 ret
= ttm_mem_io_reserve_vm(bo
);
143 if (unlikely(ret
!= 0)) {
144 retval
= VM_FAULT_SIGBUS
;
148 page_offset
= ((address
- vma
->vm_start
) >> PAGE_SHIFT
) +
149 bo
->vm_node
->start
- vma
->vm_pgoff
;
150 page_last
= ((vma
->vm_end
- vma
->vm_start
) >> PAGE_SHIFT
) +
151 bo
->vm_node
->start
- vma
->vm_pgoff
;
153 if (unlikely(page_offset
>= bo
->num_pages
)) {
154 retval
= VM_FAULT_SIGBUS
;
159 * Strictly, we're not allowed to modify vma->vm_page_prot here,
160 * since the mmap_sem is only held in read mode. However, we
161 * modify only the caching bits of vma->vm_page_prot and
162 * consider those bits protected by
163 * the bo->mutex, as we should be the only writers.
164 * There shouldn't really be any readers of these bits except
165 * within vm_insert_mixed()? fork?
167 * TODO: Add a list of vmas to the bo, and change the
168 * vma->vm_page_prot when the object changes caching policy, with
169 * the correct locks held.
171 if (bo
->mem
.bus
.is_iomem
) {
172 vma
->vm_page_prot
= ttm_io_prot(bo
->mem
.placement
,
176 vma
->vm_page_prot
= (bo
->mem
.placement
& TTM_PL_FLAG_CACHED
) ?
177 vm_get_page_prot(vma
->vm_flags
) :
178 ttm_io_prot(bo
->mem
.placement
, vma
->vm_page_prot
);
180 /* Allocate all page at once, most common usage */
181 if (ttm
->bdev
->driver
->ttm_tt_populate(ttm
)) {
182 retval
= VM_FAULT_OOM
;
188 * Speculatively prefault a number of pages. Only error on
191 for (i
= 0; i
< TTM_BO_VM_NUM_PREFAULT
; ++i
) {
192 if (bo
->mem
.bus
.is_iomem
)
193 pfn
= ((bo
->mem
.bus
.base
+ bo
->mem
.bus
.offset
) >> PAGE_SHIFT
) + page_offset
;
195 page
= ttm
->pages
[page_offset
];
196 if (unlikely(!page
&& i
== 0)) {
197 retval
= VM_FAULT_OOM
;
199 } else if (unlikely(!page
)) {
202 pfn
= page_to_pfn(page
);
205 ret
= vm_insert_mixed(vma
, address
, pfn
);
207 * Somebody beat us to this PTE or prefaulting to
208 * an already populated PTE, or prefaulting error.
211 if (unlikely((ret
== -EBUSY
) || (ret
!= 0 && i
> 0)))
213 else if (unlikely(ret
!= 0)) {
215 (ret
== -ENOMEM
) ? VM_FAULT_OOM
: VM_FAULT_SIGBUS
;
219 address
+= PAGE_SIZE
;
220 if (unlikely(++page_offset
>= page_last
))
224 ttm_mem_io_unlock(man
);
226 ttm_bo_unreserve(bo
);
230 static void ttm_bo_vm_open(struct vm_area_struct
*vma
)
232 struct ttm_buffer_object
*bo
=
233 (struct ttm_buffer_object
*)vma
->vm_private_data
;
235 (void)ttm_bo_reference(bo
);
238 static void ttm_bo_vm_close(struct vm_area_struct
*vma
)
240 struct ttm_buffer_object
*bo
= (struct ttm_buffer_object
*)vma
->vm_private_data
;
243 vma
->vm_private_data
= NULL
;
246 static const struct vm_operations_struct ttm_bo_vm_ops
= {
247 .fault
= ttm_bo_vm_fault
,
248 .open
= ttm_bo_vm_open
,
249 .close
= ttm_bo_vm_close
252 int ttm_bo_mmap(struct file
*filp
, struct vm_area_struct
*vma
,
253 struct ttm_bo_device
*bdev
)
255 struct ttm_bo_driver
*driver
;
256 struct ttm_buffer_object
*bo
;
259 read_lock(&bdev
->vm_lock
);
260 bo
= ttm_bo_vm_lookup_rb(bdev
, vma
->vm_pgoff
,
261 (vma
->vm_end
- vma
->vm_start
) >> PAGE_SHIFT
);
262 if (likely(bo
!= NULL
))
263 ttm_bo_reference(bo
);
264 read_unlock(&bdev
->vm_lock
);
266 if (unlikely(bo
== NULL
)) {
267 pr_err("Could not find buffer object to map\n");
271 driver
= bo
->bdev
->driver
;
272 if (unlikely(!driver
->verify_access
)) {
276 ret
= driver
->verify_access(bo
, filp
);
277 if (unlikely(ret
!= 0))
280 vma
->vm_ops
= &ttm_bo_vm_ops
;
283 * Note: We're transferring the bo reference to
284 * vma->vm_private_data here.
287 vma
->vm_private_data
= bo
;
288 vma
->vm_flags
|= VM_RESERVED
| VM_IO
| VM_MIXEDMAP
| VM_DONTEXPAND
;
294 EXPORT_SYMBOL(ttm_bo_mmap
);
296 int ttm_fbdev_mmap(struct vm_area_struct
*vma
, struct ttm_buffer_object
*bo
)
298 if (vma
->vm_pgoff
!= 0)
301 vma
->vm_ops
= &ttm_bo_vm_ops
;
302 vma
->vm_private_data
= ttm_bo_reference(bo
);
303 vma
->vm_flags
|= VM_RESERVED
| VM_IO
| VM_MIXEDMAP
| VM_DONTEXPAND
;
306 EXPORT_SYMBOL(ttm_fbdev_mmap
);
309 ssize_t
ttm_bo_io(struct ttm_bo_device
*bdev
, struct file
*filp
,
310 const char __user
*wbuf
, char __user
*rbuf
, size_t count
,
311 loff_t
*f_pos
, bool write
)
313 struct ttm_buffer_object
*bo
;
314 struct ttm_bo_driver
*driver
;
315 struct ttm_bo_kmap_obj map
;
316 unsigned long dev_offset
= (*f_pos
>> PAGE_SHIFT
);
317 unsigned long kmap_offset
;
318 unsigned long kmap_end
;
319 unsigned long kmap_num
;
321 unsigned int page_offset
;
324 bool no_wait
= false;
327 read_lock(&bdev
->vm_lock
);
328 bo
= ttm_bo_vm_lookup_rb(bdev
, dev_offset
, 1);
329 if (likely(bo
!= NULL
))
330 ttm_bo_reference(bo
);
331 read_unlock(&bdev
->vm_lock
);
333 if (unlikely(bo
== NULL
))
336 driver
= bo
->bdev
->driver
;
337 if (unlikely(!driver
->verify_access
)) {
342 ret
= driver
->verify_access(bo
, filp
);
343 if (unlikely(ret
!= 0))
346 kmap_offset
= dev_offset
- bo
->vm_node
->start
;
347 if (unlikely(kmap_offset
>= bo
->num_pages
)) {
352 page_offset
= *f_pos
& ~PAGE_MASK
;
353 io_size
= bo
->num_pages
- kmap_offset
;
354 io_size
= (io_size
<< PAGE_SHIFT
) - page_offset
;
358 kmap_end
= (*f_pos
+ count
- 1) >> PAGE_SHIFT
;
359 kmap_num
= kmap_end
- kmap_offset
+ 1;
361 ret
= ttm_bo_reserve(bo
, true, no_wait
, false, 0);
373 ret
= ttm_bo_kmap(bo
, kmap_offset
, kmap_num
, &map
);
374 if (unlikely(ret
!= 0)) {
375 ttm_bo_unreserve(bo
);
379 virtual = ttm_kmap_obj_virtual(&map
, &dummy
);
380 virtual += page_offset
;
383 ret
= copy_from_user(virtual, wbuf
, io_size
);
385 ret
= copy_to_user(rbuf
, virtual, io_size
);
388 ttm_bo_unreserve(bo
);
391 if (unlikely(ret
!= 0))
402 ssize_t
ttm_bo_fbdev_io(struct ttm_buffer_object
*bo
, const char __user
*wbuf
,
403 char __user
*rbuf
, size_t count
, loff_t
*f_pos
,
406 struct ttm_bo_kmap_obj map
;
407 unsigned long kmap_offset
;
408 unsigned long kmap_end
;
409 unsigned long kmap_num
;
411 unsigned int page_offset
;
414 bool no_wait
= false;
417 kmap_offset
= (*f_pos
>> PAGE_SHIFT
);
418 if (unlikely(kmap_offset
>= bo
->num_pages
))
421 page_offset
= *f_pos
& ~PAGE_MASK
;
422 io_size
= bo
->num_pages
- kmap_offset
;
423 io_size
= (io_size
<< PAGE_SHIFT
) - page_offset
;
427 kmap_end
= (*f_pos
+ count
- 1) >> PAGE_SHIFT
;
428 kmap_num
= kmap_end
- kmap_offset
+ 1;
430 ret
= ttm_bo_reserve(bo
, true, no_wait
, false, 0);
441 ret
= ttm_bo_kmap(bo
, kmap_offset
, kmap_num
, &map
);
442 if (unlikely(ret
!= 0)) {
443 ttm_bo_unreserve(bo
);
447 virtual = ttm_kmap_obj_virtual(&map
, &dummy
);
448 virtual += page_offset
;
451 ret
= copy_from_user(virtual, wbuf
, io_size
);
453 ret
= copy_to_user(rbuf
, virtual, io_size
);
456 ttm_bo_unreserve(bo
);
459 if (unlikely(ret
!= 0))