1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
32 #define pr_fmt(fmt) "[TTM] " fmt
34 #include <drm/ttm/ttm_module.h>
35 #include <drm/ttm/ttm_bo_driver.h>
36 #include <drm/ttm/ttm_placement.h>
37 #include <drm/drm_vma_manager.h>
39 #include <linux/pfn_t.h>
40 #include <linux/rbtree.h>
41 #include <linux/module.h>
42 #include <linux/uaccess.h>
43 #include <linux/mem_encrypt.h>
45 #define TTM_BO_VM_NUM_PREFAULT 16
47 static vm_fault_t
ttm_bo_vm_fault_idle(struct ttm_buffer_object
*bo
,
53 if (likely(!bo
->moving
))
57 * Quick non-stalling check for idle.
59 if (dma_fence_is_signaled(bo
->moving
))
63 * If possible, avoid waiting for GPU with mmap_sem
66 if (vmf
->flags
& FAULT_FLAG_ALLOW_RETRY
) {
68 if (vmf
->flags
& FAULT_FLAG_RETRY_NOWAIT
)
72 up_read(&vmf
->vma
->vm_mm
->mmap_sem
);
73 (void) dma_fence_wait(bo
->moving
, true);
82 err
= dma_fence_wait(bo
->moving
, true);
83 if (unlikely(err
!= 0)) {
84 ret
= (err
!= -ERESTARTSYS
) ? VM_FAULT_SIGBUS
:
90 dma_fence_put(bo
->moving
);
97 static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object
*bo
,
98 unsigned long page_offset
)
100 struct ttm_bo_device
*bdev
= bo
->bdev
;
102 if (bdev
->driver
->io_mem_pfn
)
103 return bdev
->driver
->io_mem_pfn(bo
, page_offset
);
105 return ((bo
->mem
.bus
.base
+ bo
->mem
.bus
.offset
) >> PAGE_SHIFT
)
109 static vm_fault_t
ttm_bo_vm_fault(struct vm_fault
*vmf
)
111 struct vm_area_struct
*vma
= vmf
->vma
;
112 struct ttm_buffer_object
*bo
= (struct ttm_buffer_object
*)
113 vma
->vm_private_data
;
114 struct ttm_bo_device
*bdev
= bo
->bdev
;
115 unsigned long page_offset
;
116 unsigned long page_last
;
118 struct ttm_tt
*ttm
= NULL
;
122 vm_fault_t ret
= VM_FAULT_NOPAGE
;
123 unsigned long address
= vmf
->address
;
124 struct ttm_mem_type_manager
*man
=
125 &bdev
->man
[bo
->mem
.mem_type
];
126 struct vm_area_struct cvma
;
129 * Work around locking order reversal in fault / nopfn
130 * between mmap_sem and bo_reserve: Perform a trylock operation
131 * for reserve, and if it fails, retry the fault after waiting
132 * for the buffer to become unreserved.
134 err
= ttm_bo_reserve(bo
, true, true, NULL
);
135 if (unlikely(err
!= 0)) {
137 return VM_FAULT_NOPAGE
;
139 if (vmf
->flags
& FAULT_FLAG_ALLOW_RETRY
) {
140 if (!(vmf
->flags
& FAULT_FLAG_RETRY_NOWAIT
)) {
142 up_read(&vmf
->vma
->vm_mm
->mmap_sem
);
143 (void) ttm_bo_wait_unreserved(bo
);
147 return VM_FAULT_RETRY
;
151 * If we'd want to change locking order to
152 * mmap_sem -> bo::reserve, we'd use a blocking reserve here
153 * instead of retrying the fault...
155 return VM_FAULT_NOPAGE
;
159 * Refuse to fault imported pages. This should be handled
160 * (if at all) by redirecting mmap to the exporter.
162 if (bo
->ttm
&& (bo
->ttm
->page_flags
& TTM_PAGE_FLAG_SG
)) {
163 ret
= VM_FAULT_SIGBUS
;
167 if (bdev
->driver
->fault_reserve_notify
) {
168 err
= bdev
->driver
->fault_reserve_notify(bo
);
174 ret
= VM_FAULT_NOPAGE
;
177 ret
= VM_FAULT_SIGBUS
;
183 * Wait for buffer data in transit, due to a pipelined
186 ret
= ttm_bo_vm_fault_idle(bo
, vmf
);
187 if (unlikely(ret
!= 0)) {
188 if (ret
== VM_FAULT_RETRY
&&
189 !(vmf
->flags
& FAULT_FLAG_RETRY_NOWAIT
)) {
190 /* The BO has already been unreserved. */
197 err
= ttm_mem_io_lock(man
, true);
198 if (unlikely(err
!= 0)) {
199 ret
= VM_FAULT_NOPAGE
;
202 err
= ttm_mem_io_reserve_vm(bo
);
203 if (unlikely(err
!= 0)) {
204 ret
= VM_FAULT_SIGBUS
;
208 page_offset
= ((address
- vma
->vm_start
) >> PAGE_SHIFT
) +
209 vma
->vm_pgoff
- drm_vma_node_start(&bo
->vma_node
);
210 page_last
= vma_pages(vma
) + vma
->vm_pgoff
-
211 drm_vma_node_start(&bo
->vma_node
);
213 if (unlikely(page_offset
>= bo
->num_pages
)) {
214 ret
= VM_FAULT_SIGBUS
;
219 * Make a local vma copy to modify the page_prot member
220 * and vm_flags if necessary. The vma parameter is protected
221 * by mmap_sem in write mode.
224 cvma
.vm_page_prot
= vm_get_page_prot(cvma
.vm_flags
);
226 if (bo
->mem
.bus
.is_iomem
) {
227 cvma
.vm_page_prot
= ttm_io_prot(bo
->mem
.placement
,
230 struct ttm_operation_ctx ctx
= {
231 .interruptible
= false,
232 .no_wait_gpu
= false,
233 .flags
= TTM_OPT_FLAG_FORCE_ALLOC
238 cvma
.vm_page_prot
= ttm_io_prot(bo
->mem
.placement
,
241 /* Allocate all page at once, most common usage */
242 if (ttm_tt_populate(ttm
, &ctx
)) {
249 * Speculatively prefault a number of pages. Only error on
252 for (i
= 0; i
< TTM_BO_VM_NUM_PREFAULT
; ++i
) {
253 if (bo
->mem
.bus
.is_iomem
) {
254 /* Iomem should not be marked encrypted */
255 cvma
.vm_page_prot
= pgprot_decrypted(cvma
.vm_page_prot
);
256 pfn
= ttm_bo_io_mem_pfn(bo
, page_offset
);
258 page
= ttm
->pages
[page_offset
];
259 if (unlikely(!page
&& i
== 0)) {
262 } else if (unlikely(!page
)) {
265 page
->index
= drm_vma_node_start(&bo
->vma_node
) +
267 pfn
= page_to_pfn(page
);
270 if (vma
->vm_flags
& VM_MIXEDMAP
)
271 ret
= vmf_insert_mixed(&cvma
, address
,
272 __pfn_to_pfn_t(pfn
, PFN_DEV
));
274 ret
= vmf_insert_pfn(&cvma
, address
, pfn
);
277 * Somebody beat us to this PTE or prefaulting to
278 * an already populated PTE, or prefaulting error.
281 if (unlikely((ret
== VM_FAULT_NOPAGE
&& i
> 0)))
283 else if (unlikely(ret
& VM_FAULT_ERROR
))
286 address
+= PAGE_SIZE
;
287 if (unlikely(++page_offset
>= page_last
))
290 ret
= VM_FAULT_NOPAGE
;
292 ttm_mem_io_unlock(man
);
294 ttm_bo_unreserve(bo
);
298 static void ttm_bo_vm_open(struct vm_area_struct
*vma
)
300 struct ttm_buffer_object
*bo
=
301 (struct ttm_buffer_object
*)vma
->vm_private_data
;
303 WARN_ON(bo
->bdev
->dev_mapping
!= vma
->vm_file
->f_mapping
);
308 static void ttm_bo_vm_close(struct vm_area_struct
*vma
)
310 struct ttm_buffer_object
*bo
= (struct ttm_buffer_object
*)vma
->vm_private_data
;
313 vma
->vm_private_data
= NULL
;
316 static int ttm_bo_vm_access_kmap(struct ttm_buffer_object
*bo
,
317 unsigned long offset
,
318 uint8_t *buf
, int len
, int write
)
320 unsigned long page
= offset
>> PAGE_SHIFT
;
321 unsigned long bytes_left
= len
;
324 /* Copy a page at a time, that way no extra virtual address
327 offset
-= page
<< PAGE_SHIFT
;
329 unsigned long bytes
= min(bytes_left
, PAGE_SIZE
- offset
);
330 struct ttm_bo_kmap_obj map
;
334 ret
= ttm_bo_kmap(bo
, page
, 1, &map
);
338 ptr
= (uint8_t *)ttm_kmap_obj_virtual(&map
, &is_iomem
) + offset
;
339 WARN_ON_ONCE(is_iomem
);
341 memcpy(ptr
, buf
, bytes
);
343 memcpy(buf
, ptr
, bytes
);
350 } while (bytes_left
);
355 static int ttm_bo_vm_access(struct vm_area_struct
*vma
, unsigned long addr
,
356 void *buf
, int len
, int write
)
358 unsigned long offset
= (addr
) - vma
->vm_start
;
359 struct ttm_buffer_object
*bo
= vma
->vm_private_data
;
362 if (len
< 1 || (offset
+ len
) >> PAGE_SHIFT
> bo
->num_pages
)
365 ret
= ttm_bo_reserve(bo
, true, false, NULL
);
369 switch (bo
->mem
.mem_type
) {
371 if (unlikely(bo
->ttm
->page_flags
& TTM_PAGE_FLAG_SWAPPED
)) {
372 ret
= ttm_tt_swapin(bo
->ttm
);
373 if (unlikely(ret
!= 0))
378 ret
= ttm_bo_vm_access_kmap(bo
, offset
, buf
, len
, write
);
381 if (bo
->bdev
->driver
->access_memory
)
382 ret
= bo
->bdev
->driver
->access_memory(
383 bo
, offset
, buf
, len
, write
);
388 ttm_bo_unreserve(bo
);
393 static const struct vm_operations_struct ttm_bo_vm_ops
= {
394 .fault
= ttm_bo_vm_fault
,
395 .open
= ttm_bo_vm_open
,
396 .close
= ttm_bo_vm_close
,
397 .access
= ttm_bo_vm_access
400 static struct ttm_buffer_object
*ttm_bo_vm_lookup(struct ttm_bo_device
*bdev
,
401 unsigned long offset
,
404 struct drm_vma_offset_node
*node
;
405 struct ttm_buffer_object
*bo
= NULL
;
407 drm_vma_offset_lock_lookup(&bdev
->vma_manager
);
409 node
= drm_vma_offset_lookup_locked(&bdev
->vma_manager
, offset
, pages
);
411 bo
= container_of(node
, struct ttm_buffer_object
, vma_node
);
412 if (!kref_get_unless_zero(&bo
->kref
))
416 drm_vma_offset_unlock_lookup(&bdev
->vma_manager
);
419 pr_err("Could not find buffer object to map\n");
424 int ttm_bo_mmap(struct file
*filp
, struct vm_area_struct
*vma
,
425 struct ttm_bo_device
*bdev
)
427 struct ttm_bo_driver
*driver
;
428 struct ttm_buffer_object
*bo
;
431 bo
= ttm_bo_vm_lookup(bdev
, vma
->vm_pgoff
, vma_pages(vma
));
435 driver
= bo
->bdev
->driver
;
436 if (unlikely(!driver
->verify_access
)) {
440 ret
= driver
->verify_access(bo
, filp
);
441 if (unlikely(ret
!= 0))
444 vma
->vm_ops
= &ttm_bo_vm_ops
;
447 * Note: We're transferring the bo reference to
448 * vma->vm_private_data here.
451 vma
->vm_private_data
= bo
;
454 * We'd like to use VM_PFNMAP on shared mappings, where
455 * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
456 * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
457 * bad for performance. Until that has been sorted out, use
458 * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
460 vma
->vm_flags
|= VM_MIXEDMAP
;
461 vma
->vm_flags
|= VM_IO
| VM_DONTEXPAND
| VM_DONTDUMP
;
467 EXPORT_SYMBOL(ttm_bo_mmap
);
469 int ttm_fbdev_mmap(struct vm_area_struct
*vma
, struct ttm_buffer_object
*bo
)
471 if (vma
->vm_pgoff
!= 0)
476 vma
->vm_ops
= &ttm_bo_vm_ops
;
477 vma
->vm_private_data
= bo
;
478 vma
->vm_flags
|= VM_MIXEDMAP
;
479 vma
->vm_flags
|= VM_IO
| VM_DONTEXPAND
;
482 EXPORT_SYMBOL(ttm_fbdev_mmap
);