1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
32 #define pr_fmt(fmt) "[TTM] " fmt
34 #include <drm/ttm/ttm_module.h>
35 #include <drm/ttm/ttm_bo_driver.h>
36 #include <drm/ttm/ttm_placement.h>
37 #include <drm/drm_vma_manager.h>
39 #include <linux/pfn_t.h>
40 #include <linux/rbtree.h>
41 #include <linux/module.h>
42 #include <linux/uaccess.h>
43 #include <linux/mem_encrypt.h>
45 #define TTM_BO_VM_NUM_PREFAULT 16
47 static vm_fault_t
ttm_bo_vm_fault_idle(struct ttm_buffer_object
*bo
,
53 if (likely(!bo
->moving
))
57 * Quick non-stalling check for idle.
59 if (dma_fence_is_signaled(bo
->moving
))
63 * If possible, avoid waiting for GPU with mmap_sem
66 if (vmf
->flags
& FAULT_FLAG_ALLOW_RETRY
) {
68 if (vmf
->flags
& FAULT_FLAG_RETRY_NOWAIT
)
72 up_read(&vmf
->vma
->vm_mm
->mmap_sem
);
73 (void) dma_fence_wait(bo
->moving
, true);
74 reservation_object_unlock(bo
->resv
);
82 err
= dma_fence_wait(bo
->moving
, true);
83 if (unlikely(err
!= 0)) {
84 ret
= (err
!= -ERESTARTSYS
) ? VM_FAULT_SIGBUS
:
90 dma_fence_put(bo
->moving
);
97 static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object
*bo
,
98 unsigned long page_offset
)
100 struct ttm_bo_device
*bdev
= bo
->bdev
;
102 if (bdev
->driver
->io_mem_pfn
)
103 return bdev
->driver
->io_mem_pfn(bo
, page_offset
);
105 return ((bo
->mem
.bus
.base
+ bo
->mem
.bus
.offset
) >> PAGE_SHIFT
)
109 static vm_fault_t
ttm_bo_vm_fault(struct vm_fault
*vmf
)
111 struct vm_area_struct
*vma
= vmf
->vma
;
112 struct ttm_buffer_object
*bo
= (struct ttm_buffer_object
*)
113 vma
->vm_private_data
;
114 struct ttm_bo_device
*bdev
= bo
->bdev
;
115 unsigned long page_offset
;
116 unsigned long page_last
;
118 struct ttm_tt
*ttm
= NULL
;
122 vm_fault_t ret
= VM_FAULT_NOPAGE
;
123 unsigned long address
= vmf
->address
;
124 struct ttm_mem_type_manager
*man
=
125 &bdev
->man
[bo
->mem
.mem_type
];
126 struct vm_area_struct cvma
;
129 * Work around locking order reversal in fault / nopfn
130 * between mmap_sem and bo_reserve: Perform a trylock operation
131 * for reserve, and if it fails, retry the fault after waiting
132 * for the buffer to become unreserved.
134 if (unlikely(!reservation_object_trylock(bo
->resv
))) {
135 if (vmf
->flags
& FAULT_FLAG_ALLOW_RETRY
) {
136 if (!(vmf
->flags
& FAULT_FLAG_RETRY_NOWAIT
)) {
138 up_read(&vmf
->vma
->vm_mm
->mmap_sem
);
139 (void) ttm_bo_wait_unreserved(bo
);
143 return VM_FAULT_RETRY
;
147 * If we'd want to change locking order to
148 * mmap_sem -> bo::reserve, we'd use a blocking reserve here
149 * instead of retrying the fault...
151 return VM_FAULT_NOPAGE
;
155 * Refuse to fault imported pages. This should be handled
156 * (if at all) by redirecting mmap to the exporter.
158 if (bo
->ttm
&& (bo
->ttm
->page_flags
& TTM_PAGE_FLAG_SG
)) {
159 ret
= VM_FAULT_SIGBUS
;
163 if (bdev
->driver
->fault_reserve_notify
) {
164 struct dma_fence
*moving
= dma_fence_get(bo
->moving
);
166 err
= bdev
->driver
->fault_reserve_notify(bo
);
172 ret
= VM_FAULT_NOPAGE
;
175 ret
= VM_FAULT_SIGBUS
;
179 if (bo
->moving
!= moving
) {
180 spin_lock(&bdev
->glob
->lru_lock
);
181 ttm_bo_move_to_lru_tail(bo
, NULL
);
182 spin_unlock(&bdev
->glob
->lru_lock
);
184 dma_fence_put(moving
);
188 * Wait for buffer data in transit, due to a pipelined
191 ret
= ttm_bo_vm_fault_idle(bo
, vmf
);
192 if (unlikely(ret
!= 0)) {
193 if (ret
== VM_FAULT_RETRY
&&
194 !(vmf
->flags
& FAULT_FLAG_RETRY_NOWAIT
)) {
195 /* The BO has already been unreserved. */
202 err
= ttm_mem_io_lock(man
, true);
203 if (unlikely(err
!= 0)) {
204 ret
= VM_FAULT_NOPAGE
;
207 err
= ttm_mem_io_reserve_vm(bo
);
208 if (unlikely(err
!= 0)) {
209 ret
= VM_FAULT_SIGBUS
;
213 page_offset
= ((address
- vma
->vm_start
) >> PAGE_SHIFT
) +
214 vma
->vm_pgoff
- drm_vma_node_start(&bo
->vma_node
);
215 page_last
= vma_pages(vma
) + vma
->vm_pgoff
-
216 drm_vma_node_start(&bo
->vma_node
);
218 if (unlikely(page_offset
>= bo
->num_pages
)) {
219 ret
= VM_FAULT_SIGBUS
;
224 * Make a local vma copy to modify the page_prot member
225 * and vm_flags if necessary. The vma parameter is protected
226 * by mmap_sem in write mode.
229 cvma
.vm_page_prot
= vm_get_page_prot(cvma
.vm_flags
);
231 if (bo
->mem
.bus
.is_iomem
) {
232 cvma
.vm_page_prot
= ttm_io_prot(bo
->mem
.placement
,
235 struct ttm_operation_ctx ctx
= {
236 .interruptible
= false,
237 .no_wait_gpu
= false,
238 .flags
= TTM_OPT_FLAG_FORCE_ALLOC
243 cvma
.vm_page_prot
= ttm_io_prot(bo
->mem
.placement
,
246 /* Allocate all page at once, most common usage */
247 if (ttm_tt_populate(ttm
, &ctx
)) {
254 * Speculatively prefault a number of pages. Only error on
257 for (i
= 0; i
< TTM_BO_VM_NUM_PREFAULT
; ++i
) {
258 if (bo
->mem
.bus
.is_iomem
) {
259 /* Iomem should not be marked encrypted */
260 cvma
.vm_page_prot
= pgprot_decrypted(cvma
.vm_page_prot
);
261 pfn
= ttm_bo_io_mem_pfn(bo
, page_offset
);
263 page
= ttm
->pages
[page_offset
];
264 if (unlikely(!page
&& i
== 0)) {
267 } else if (unlikely(!page
)) {
270 page
->index
= drm_vma_node_start(&bo
->vma_node
) +
272 pfn
= page_to_pfn(page
);
275 if (vma
->vm_flags
& VM_MIXEDMAP
)
276 ret
= vmf_insert_mixed(&cvma
, address
,
277 __pfn_to_pfn_t(pfn
, PFN_DEV
));
279 ret
= vmf_insert_pfn(&cvma
, address
, pfn
);
282 * Somebody beat us to this PTE or prefaulting to
283 * an already populated PTE, or prefaulting error.
286 if (unlikely((ret
== VM_FAULT_NOPAGE
&& i
> 0)))
288 else if (unlikely(ret
& VM_FAULT_ERROR
))
291 address
+= PAGE_SIZE
;
292 if (unlikely(++page_offset
>= page_last
))
295 ret
= VM_FAULT_NOPAGE
;
297 ttm_mem_io_unlock(man
);
299 reservation_object_unlock(bo
->resv
);
303 static void ttm_bo_vm_open(struct vm_area_struct
*vma
)
305 struct ttm_buffer_object
*bo
=
306 (struct ttm_buffer_object
*)vma
->vm_private_data
;
308 WARN_ON(bo
->bdev
->dev_mapping
!= vma
->vm_file
->f_mapping
);
313 static void ttm_bo_vm_close(struct vm_area_struct
*vma
)
315 struct ttm_buffer_object
*bo
= (struct ttm_buffer_object
*)vma
->vm_private_data
;
318 vma
->vm_private_data
= NULL
;
321 static int ttm_bo_vm_access_kmap(struct ttm_buffer_object
*bo
,
322 unsigned long offset
,
323 uint8_t *buf
, int len
, int write
)
325 unsigned long page
= offset
>> PAGE_SHIFT
;
326 unsigned long bytes_left
= len
;
329 /* Copy a page at a time, that way no extra virtual address
332 offset
-= page
<< PAGE_SHIFT
;
334 unsigned long bytes
= min(bytes_left
, PAGE_SIZE
- offset
);
335 struct ttm_bo_kmap_obj map
;
339 ret
= ttm_bo_kmap(bo
, page
, 1, &map
);
343 ptr
= (uint8_t *)ttm_kmap_obj_virtual(&map
, &is_iomem
) + offset
;
344 WARN_ON_ONCE(is_iomem
);
346 memcpy(ptr
, buf
, bytes
);
348 memcpy(buf
, ptr
, bytes
);
355 } while (bytes_left
);
360 static int ttm_bo_vm_access(struct vm_area_struct
*vma
, unsigned long addr
,
361 void *buf
, int len
, int write
)
363 unsigned long offset
= (addr
) - vma
->vm_start
;
364 struct ttm_buffer_object
*bo
= vma
->vm_private_data
;
367 if (len
< 1 || (offset
+ len
) >> PAGE_SHIFT
> bo
->num_pages
)
370 ret
= ttm_bo_reserve(bo
, true, false, NULL
);
374 switch (bo
->mem
.mem_type
) {
376 if (unlikely(bo
->ttm
->page_flags
& TTM_PAGE_FLAG_SWAPPED
)) {
377 ret
= ttm_tt_swapin(bo
->ttm
);
378 if (unlikely(ret
!= 0))
383 ret
= ttm_bo_vm_access_kmap(bo
, offset
, buf
, len
, write
);
386 if (bo
->bdev
->driver
->access_memory
)
387 ret
= bo
->bdev
->driver
->access_memory(
388 bo
, offset
, buf
, len
, write
);
393 ttm_bo_unreserve(bo
);
398 static const struct vm_operations_struct ttm_bo_vm_ops
= {
399 .fault
= ttm_bo_vm_fault
,
400 .open
= ttm_bo_vm_open
,
401 .close
= ttm_bo_vm_close
,
402 .access
= ttm_bo_vm_access
405 static struct ttm_buffer_object
*ttm_bo_vm_lookup(struct ttm_bo_device
*bdev
,
406 unsigned long offset
,
409 struct drm_vma_offset_node
*node
;
410 struct ttm_buffer_object
*bo
= NULL
;
412 drm_vma_offset_lock_lookup(&bdev
->vma_manager
);
414 node
= drm_vma_offset_lookup_locked(&bdev
->vma_manager
, offset
, pages
);
416 bo
= container_of(node
, struct ttm_buffer_object
, vma_node
);
417 bo
= ttm_bo_get_unless_zero(bo
);
420 drm_vma_offset_unlock_lookup(&bdev
->vma_manager
);
423 pr_err("Could not find buffer object to map\n");
428 int ttm_bo_mmap(struct file
*filp
, struct vm_area_struct
*vma
,
429 struct ttm_bo_device
*bdev
)
431 struct ttm_bo_driver
*driver
;
432 struct ttm_buffer_object
*bo
;
435 bo
= ttm_bo_vm_lookup(bdev
, vma
->vm_pgoff
, vma_pages(vma
));
439 driver
= bo
->bdev
->driver
;
440 if (unlikely(!driver
->verify_access
)) {
444 ret
= driver
->verify_access(bo
, filp
);
445 if (unlikely(ret
!= 0))
448 vma
->vm_ops
= &ttm_bo_vm_ops
;
451 * Note: We're transferring the bo reference to
452 * vma->vm_private_data here.
455 vma
->vm_private_data
= bo
;
458 * We'd like to use VM_PFNMAP on shared mappings, where
459 * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
460 * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
461 * bad for performance. Until that has been sorted out, use
462 * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
464 vma
->vm_flags
|= VM_MIXEDMAP
;
465 vma
->vm_flags
|= VM_IO
| VM_DONTEXPAND
| VM_DONTDUMP
;
471 EXPORT_SYMBOL(ttm_bo_mmap
);
473 int ttm_fbdev_mmap(struct vm_area_struct
*vma
, struct ttm_buffer_object
*bo
)
475 if (vma
->vm_pgoff
!= 0)
480 vma
->vm_ops
= &ttm_bo_vm_ops
;
481 vma
->vm_private_data
= bo
;
482 vma
->vm_flags
|= VM_MIXEDMAP
;
483 vma
->vm_flags
|= VM_IO
| VM_DONTEXPAND
;
486 EXPORT_SYMBOL(ttm_fbdev_mmap
);