1 /**************************************************************************
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 #define pr_fmt(fmt) "[TTM] " fmt
33 #include <drm/ttm/ttm_module.h>
34 #include <drm/ttm/ttm_bo_driver.h>
35 #include <drm/ttm/ttm_placement.h>
36 #include <drm/drm_vma_manager.h>
38 #include <linux/pfn_t.h>
39 #include <linux/rbtree.h>
40 #include <linux/module.h>
41 #include <linux/uaccess.h>
42 #include <linux/mem_encrypt.h>
44 #define TTM_BO_VM_NUM_PREFAULT 16
46 static int ttm_bo_vm_fault_idle(struct ttm_buffer_object
*bo
,
51 if (likely(!bo
->moving
))
55 * Quick non-stalling check for idle.
57 if (dma_fence_is_signaled(bo
->moving
))
61 * If possible, avoid waiting for GPU with mmap_sem
64 if (vmf
->flags
& FAULT_FLAG_ALLOW_RETRY
) {
66 if (vmf
->flags
& FAULT_FLAG_RETRY_NOWAIT
)
70 up_read(&vmf
->vma
->vm_mm
->mmap_sem
);
71 (void) dma_fence_wait(bo
->moving
, true);
80 ret
= dma_fence_wait(bo
->moving
, true);
81 if (unlikely(ret
!= 0)) {
82 ret
= (ret
!= -ERESTARTSYS
) ? VM_FAULT_SIGBUS
:
88 dma_fence_put(bo
->moving
);
95 static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object
*bo
,
96 unsigned long page_offset
)
98 struct ttm_bo_device
*bdev
= bo
->bdev
;
100 if (bdev
->driver
->io_mem_pfn
)
101 return bdev
->driver
->io_mem_pfn(bo
, page_offset
);
103 return ((bo
->mem
.bus
.base
+ bo
->mem
.bus
.offset
) >> PAGE_SHIFT
)
107 static int ttm_bo_vm_fault(struct vm_fault
*vmf
)
109 struct vm_area_struct
*vma
= vmf
->vma
;
110 struct ttm_buffer_object
*bo
= (struct ttm_buffer_object
*)
111 vma
->vm_private_data
;
112 struct ttm_bo_device
*bdev
= bo
->bdev
;
113 unsigned long page_offset
;
114 unsigned long page_last
;
116 struct ttm_tt
*ttm
= NULL
;
120 unsigned long address
= vmf
->address
;
121 int retval
= VM_FAULT_NOPAGE
;
122 struct ttm_mem_type_manager
*man
=
123 &bdev
->man
[bo
->mem
.mem_type
];
124 struct vm_area_struct cvma
;
127 * Work around locking order reversal in fault / nopfn
128 * between mmap_sem and bo_reserve: Perform a trylock operation
129 * for reserve, and if it fails, retry the fault after waiting
130 * for the buffer to become unreserved.
132 ret
= ttm_bo_reserve(bo
, true, true, NULL
);
133 if (unlikely(ret
!= 0)) {
135 return VM_FAULT_NOPAGE
;
137 if (vmf
->flags
& FAULT_FLAG_ALLOW_RETRY
) {
138 if (!(vmf
->flags
& FAULT_FLAG_RETRY_NOWAIT
)) {
139 ttm_bo_reference(bo
);
140 up_read(&vmf
->vma
->vm_mm
->mmap_sem
);
141 (void) ttm_bo_wait_unreserved(bo
);
145 return VM_FAULT_RETRY
;
149 * If we'd want to change locking order to
150 * mmap_sem -> bo::reserve, we'd use a blocking reserve here
151 * instead of retrying the fault...
153 return VM_FAULT_NOPAGE
;
157 * Refuse to fault imported pages. This should be handled
158 * (if at all) by redirecting mmap to the exporter.
160 if (bo
->ttm
&& (bo
->ttm
->page_flags
& TTM_PAGE_FLAG_SG
)) {
161 retval
= VM_FAULT_SIGBUS
;
165 if (bdev
->driver
->fault_reserve_notify
) {
166 ret
= bdev
->driver
->fault_reserve_notify(bo
);
172 retval
= VM_FAULT_NOPAGE
;
175 retval
= VM_FAULT_SIGBUS
;
181 * Wait for buffer data in transit, due to a pipelined
184 ret
= ttm_bo_vm_fault_idle(bo
, vmf
);
185 if (unlikely(ret
!= 0)) {
188 if (retval
== VM_FAULT_RETRY
&&
189 !(vmf
->flags
& FAULT_FLAG_RETRY_NOWAIT
)) {
190 /* The BO has already been unreserved. */
197 ret
= ttm_mem_io_lock(man
, true);
198 if (unlikely(ret
!= 0)) {
199 retval
= VM_FAULT_NOPAGE
;
202 ret
= ttm_mem_io_reserve_vm(bo
);
203 if (unlikely(ret
!= 0)) {
204 retval
= VM_FAULT_SIGBUS
;
208 page_offset
= ((address
- vma
->vm_start
) >> PAGE_SHIFT
) +
209 vma
->vm_pgoff
- drm_vma_node_start(&bo
->vma_node
);
210 page_last
= vma_pages(vma
) + vma
->vm_pgoff
-
211 drm_vma_node_start(&bo
->vma_node
);
213 if (unlikely(page_offset
>= bo
->num_pages
)) {
214 retval
= VM_FAULT_SIGBUS
;
219 * Make a local vma copy to modify the page_prot member
220 * and vm_flags if necessary. The vma parameter is protected
221 * by mmap_sem in write mode.
224 cvma
.vm_page_prot
= vm_get_page_prot(cvma
.vm_flags
);
226 if (bo
->mem
.bus
.is_iomem
) {
227 cvma
.vm_page_prot
= ttm_io_prot(bo
->mem
.placement
,
230 struct ttm_operation_ctx ctx
= {
231 .interruptible
= false,
236 cvma
.vm_page_prot
= ttm_io_prot(bo
->mem
.placement
,
239 /* Allocate all page at once, most common usage */
240 if (ttm
->bdev
->driver
->ttm_tt_populate(ttm
, &ctx
)) {
241 retval
= VM_FAULT_OOM
;
247 * Speculatively prefault a number of pages. Only error on
250 for (i
= 0; i
< TTM_BO_VM_NUM_PREFAULT
; ++i
) {
251 if (bo
->mem
.bus
.is_iomem
) {
252 /* Iomem should not be marked encrypted */
253 cvma
.vm_page_prot
= pgprot_decrypted(cvma
.vm_page_prot
);
254 pfn
= ttm_bo_io_mem_pfn(bo
, page_offset
);
256 page
= ttm
->pages
[page_offset
];
257 if (unlikely(!page
&& i
== 0)) {
258 retval
= VM_FAULT_OOM
;
260 } else if (unlikely(!page
)) {
263 page
->mapping
= vma
->vm_file
->f_mapping
;
264 page
->index
= drm_vma_node_start(&bo
->vma_node
) +
266 pfn
= page_to_pfn(page
);
269 if (vma
->vm_flags
& VM_MIXEDMAP
)
270 ret
= vm_insert_mixed(&cvma
, address
,
271 __pfn_to_pfn_t(pfn
, PFN_DEV
));
273 ret
= vm_insert_pfn(&cvma
, address
, pfn
);
276 * Somebody beat us to this PTE or prefaulting to
277 * an already populated PTE, or prefaulting error.
280 if (unlikely((ret
== -EBUSY
) || (ret
!= 0 && i
> 0)))
282 else if (unlikely(ret
!= 0)) {
284 (ret
== -ENOMEM
) ? VM_FAULT_OOM
: VM_FAULT_SIGBUS
;
288 address
+= PAGE_SIZE
;
289 if (unlikely(++page_offset
>= page_last
))
293 ttm_mem_io_unlock(man
);
295 ttm_bo_unreserve(bo
);
299 static void ttm_bo_vm_open(struct vm_area_struct
*vma
)
301 struct ttm_buffer_object
*bo
=
302 (struct ttm_buffer_object
*)vma
->vm_private_data
;
304 WARN_ON(bo
->bdev
->dev_mapping
!= vma
->vm_file
->f_mapping
);
306 (void)ttm_bo_reference(bo
);
309 static void ttm_bo_vm_close(struct vm_area_struct
*vma
)
311 struct ttm_buffer_object
*bo
= (struct ttm_buffer_object
*)vma
->vm_private_data
;
314 vma
->vm_private_data
= NULL
;
317 static int ttm_bo_vm_access_kmap(struct ttm_buffer_object
*bo
,
318 unsigned long offset
,
319 uint8_t *buf
, int len
, int write
)
321 unsigned long page
= offset
>> PAGE_SHIFT
;
322 unsigned long bytes_left
= len
;
325 /* Copy a page at a time, that way no extra virtual address
328 offset
-= page
<< PAGE_SHIFT
;
330 unsigned long bytes
= min(bytes_left
, PAGE_SIZE
- offset
);
331 struct ttm_bo_kmap_obj map
;
335 ret
= ttm_bo_kmap(bo
, page
, 1, &map
);
339 ptr
= (uint8_t *)ttm_kmap_obj_virtual(&map
, &is_iomem
) + offset
;
340 WARN_ON_ONCE(is_iomem
);
342 memcpy(ptr
, buf
, bytes
);
344 memcpy(buf
, ptr
, bytes
);
351 } while (bytes_left
);
356 static int ttm_bo_vm_access(struct vm_area_struct
*vma
, unsigned long addr
,
357 void *buf
, int len
, int write
)
359 unsigned long offset
= (addr
) - vma
->vm_start
;
360 struct ttm_buffer_object
*bo
= vma
->vm_private_data
;
363 if (len
< 1 || (offset
+ len
) >> PAGE_SHIFT
> bo
->num_pages
)
366 ret
= ttm_bo_reserve(bo
, true, false, NULL
);
370 switch (bo
->mem
.mem_type
) {
372 if (unlikely(bo
->ttm
->page_flags
& TTM_PAGE_FLAG_SWAPPED
)) {
373 ret
= ttm_tt_swapin(bo
->ttm
);
374 if (unlikely(ret
!= 0))
379 ret
= ttm_bo_vm_access_kmap(bo
, offset
, buf
, len
, write
);
382 if (bo
->bdev
->driver
->access_memory
)
383 ret
= bo
->bdev
->driver
->access_memory(
384 bo
, offset
, buf
, len
, write
);
389 ttm_bo_unreserve(bo
);
394 static const struct vm_operations_struct ttm_bo_vm_ops
= {
395 .fault
= ttm_bo_vm_fault
,
396 .open
= ttm_bo_vm_open
,
397 .close
= ttm_bo_vm_close
,
398 .access
= ttm_bo_vm_access
401 static struct ttm_buffer_object
*ttm_bo_vm_lookup(struct ttm_bo_device
*bdev
,
402 unsigned long offset
,
405 struct drm_vma_offset_node
*node
;
406 struct ttm_buffer_object
*bo
= NULL
;
408 drm_vma_offset_lock_lookup(&bdev
->vma_manager
);
410 node
= drm_vma_offset_lookup_locked(&bdev
->vma_manager
, offset
, pages
);
412 bo
= container_of(node
, struct ttm_buffer_object
, vma_node
);
413 if (!kref_get_unless_zero(&bo
->kref
))
417 drm_vma_offset_unlock_lookup(&bdev
->vma_manager
);
420 pr_err("Could not find buffer object to map\n");
425 int ttm_bo_mmap(struct file
*filp
, struct vm_area_struct
*vma
,
426 struct ttm_bo_device
*bdev
)
428 struct ttm_bo_driver
*driver
;
429 struct ttm_buffer_object
*bo
;
432 bo
= ttm_bo_vm_lookup(bdev
, vma
->vm_pgoff
, vma_pages(vma
));
436 driver
= bo
->bdev
->driver
;
437 if (unlikely(!driver
->verify_access
)) {
441 ret
= driver
->verify_access(bo
, filp
);
442 if (unlikely(ret
!= 0))
445 vma
->vm_ops
= &ttm_bo_vm_ops
;
448 * Note: We're transferring the bo reference to
449 * vma->vm_private_data here.
452 vma
->vm_private_data
= bo
;
455 * We'd like to use VM_PFNMAP on shared mappings, where
456 * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
457 * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
458 * bad for performance. Until that has been sorted out, use
459 * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
461 vma
->vm_flags
|= VM_MIXEDMAP
;
462 vma
->vm_flags
|= VM_IO
| VM_DONTEXPAND
| VM_DONTDUMP
;
468 EXPORT_SYMBOL(ttm_bo_mmap
);
470 int ttm_fbdev_mmap(struct vm_area_struct
*vma
, struct ttm_buffer_object
*bo
)
472 if (vma
->vm_pgoff
!= 0)
475 vma
->vm_ops
= &ttm_bo_vm_ops
;
476 vma
->vm_private_data
= ttm_bo_reference(bo
);
477 vma
->vm_flags
|= VM_MIXEDMAP
;
478 vma
->vm_flags
|= VM_IO
| VM_DONTEXPAND
;
481 EXPORT_SYMBOL(ttm_fbdev_mmap
);