2 * SPDX-License-Identifier: MIT
4 * Copyright © 2014-2016 Intel Corporation
7 #include <linux/anon_inodes.h>
8 #include <linux/mman.h>
9 #include <linux/pfn_t.h>
10 #include <linux/sizes.h>
12 #include "gt/intel_gt.h"
13 #include "gt/intel_gt_requests.h"
16 #include "i915_gem_gtt.h"
17 #include "i915_gem_ioctls.h"
18 #include "i915_gem_object.h"
19 #include "i915_gem_mman.h"
20 #include "i915_trace.h"
21 #include "i915_user_extensions.h"
25 __vma_matches(struct vm_area_struct
*vma
, struct file
*filp
,
26 unsigned long addr
, unsigned long size
)
28 if (vma
->vm_file
!= filp
)
31 return vma
->vm_start
== addr
&&
32 (vma
->vm_end
- vma
->vm_start
) == PAGE_ALIGN(size
);
36 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
39 * @data: ioctl data blob
42 * While the mapping holds a reference on the contents of the object, it doesn't
43 * imply a ref on the object itself.
47 * DRM driver writers who look a this function as an example for how to do GEM
48 * mmap support, please don't implement mmap support like here. The modern way
49 * to implement DRM mmap support is with an mmap offset ioctl (like
50 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
51 * That way debug tooling like valgrind will understand what's going on, hiding
52 * the mmap call in a driver private ioctl will break that. The i915 driver only
53 * does cpu mmaps this way because we didn't know better.
56 i915_gem_mmap_ioctl(struct drm_device
*dev
, void *data
,
57 struct drm_file
*file
)
59 struct drm_i915_gem_mmap
*args
= data
;
60 struct drm_i915_gem_object
*obj
;
63 if (args
->flags
& ~(I915_MMAP_WC
))
66 if (args
->flags
& I915_MMAP_WC
&& !boot_cpu_has(X86_FEATURE_PAT
))
69 obj
= i915_gem_object_lookup(file
, args
->handle
);
73 /* prime objects have no backing filp to GEM mmap
76 if (!obj
->base
.filp
) {
81 if (range_overflows(args
->offset
, args
->size
, (u64
)obj
->base
.size
)) {
86 addr
= vm_mmap(obj
->base
.filp
, 0, args
->size
,
87 PROT_READ
| PROT_WRITE
, MAP_SHARED
,
89 if (IS_ERR_VALUE(addr
))
92 if (args
->flags
& I915_MMAP_WC
) {
93 struct mm_struct
*mm
= current
->mm
;
94 struct vm_area_struct
*vma
;
96 if (down_write_killable(&mm
->mmap_sem
)) {
100 vma
= find_vma(mm
, addr
);
101 if (vma
&& __vma_matches(vma
, obj
->base
.filp
, addr
, args
->size
))
103 pgprot_writecombine(vm_get_page_prot(vma
->vm_flags
));
106 up_write(&mm
->mmap_sem
);
107 if (IS_ERR_VALUE(addr
))
110 i915_gem_object_put(obj
);
112 args
->addr_ptr
= (u64
)addr
;
116 i915_gem_object_put(obj
);
120 static unsigned int tile_row_pages(const struct drm_i915_gem_object
*obj
)
122 return i915_gem_object_get_tile_row_size(obj
) >> PAGE_SHIFT
;
126 * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
128 * A history of the GTT mmap interface:
130 * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
131 * aligned and suitable for fencing, and still fit into the available
132 * mappable space left by the pinned display objects. A classic problem
133 * we called the page-fault-of-doom where we would ping-pong between
134 * two objects that could not fit inside the GTT and so the memcpy
135 * would page one object in at the expense of the other between every
138 * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
139 * as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
140 * object is too large for the available space (or simply too large
141 * for the mappable aperture!), a view is created instead and faulted
142 * into userspace. (This view is aligned and sized appropriately for
145 * 2 - Recognise WC as a separate cache domain so that we can flush the
146 * delayed writes via GTT before performing direct access via WC.
148 * 3 - Remove implicit set-domain(GTT) and synchronisation on initial
149 * pagefault; swapin remains transparent.
151 * 4 - Support multiple fault handlers per object depending on object's
152 * backing storage (a.k.a. MMAP_OFFSET).
156 * * snoopable objects cannot be accessed via the GTT. It can cause machine
157 * hangs on some architectures, corruption on others. An attempt to service
158 * a GTT page fault from a snoopable object will generate a SIGBUS.
160 * * the object must be able to fit into RAM (physical memory, though no
161 * limited to the mappable aperture).
166 * * a new GTT page fault will synchronize rendering from the GPU and flush
167 * all data to system memory. Subsequent access will not be synchronized.
169 * * all mappings are revoked on runtime device suspend.
171 * * there are only 8, 16 or 32 fence registers to share between all users
172 * (older machines require fence register for display and blitter access
173 * as well). Contention of the fence registers will cause the previous users
174 * to be unmapped and any new access will generate new page faults.
176 * * running out of memory while servicing a fault may generate a SIGBUS,
177 * rather than the expected SIGSEGV.
179 int i915_gem_mmap_gtt_version(void)
184 static inline struct i915_ggtt_view
185 compute_partial_view(const struct drm_i915_gem_object
*obj
,
189 struct i915_ggtt_view view
;
191 if (i915_gem_object_is_tiled(obj
))
192 chunk
= roundup(chunk
, tile_row_pages(obj
));
194 view
.type
= I915_GGTT_VIEW_PARTIAL
;
195 view
.partial
.offset
= rounddown(page_offset
, chunk
);
197 min_t(unsigned int, chunk
,
198 (obj
->base
.size
>> PAGE_SHIFT
) - view
.partial
.offset
);
200 /* If the partial covers the entire object, just create a normal VMA. */
201 if (chunk
>= obj
->base
.size
>> PAGE_SHIFT
)
202 view
.type
= I915_GGTT_VIEW_NORMAL
;
207 static vm_fault_t
i915_error_to_vmf_fault(int err
)
211 WARN_ONCE(err
, "unhandled error in %s: %i\n", __func__
, err
);
213 case -EIO
: /* shmemfs failure from swap device */
214 case -EFAULT
: /* purged object */
215 case -ENODEV
: /* bad object, how did you get here! */
216 case -ENXIO
: /* unable to access backing store (on device) */
217 return VM_FAULT_SIGBUS
;
219 case -ENOSPC
: /* shmemfs allocation failure */
220 case -ENOMEM
: /* our allocation failure */
229 * EBUSY is ok: this just means that another thread
230 * already did the job.
232 return VM_FAULT_NOPAGE
;
236 static vm_fault_t
vm_fault_cpu(struct vm_fault
*vmf
)
238 struct vm_area_struct
*area
= vmf
->vma
;
239 struct i915_mmap_offset
*mmo
= area
->vm_private_data
;
240 struct drm_i915_gem_object
*obj
= mmo
->obj
;
241 resource_size_t iomap
;
244 /* Sanity check that we allow writing into this object */
245 if (unlikely(i915_gem_object_is_readonly(obj
) &&
246 area
->vm_flags
& VM_WRITE
))
247 return VM_FAULT_SIGBUS
;
249 err
= i915_gem_object_pin_pages(obj
);
254 if (!i915_gem_object_type_has(obj
, I915_GEM_OBJECT_HAS_STRUCT_PAGE
)) {
255 iomap
= obj
->mm
.region
->iomap
.base
;
256 iomap
-= obj
->mm
.region
->region
.start
;
259 /* PTEs are revoked in obj->ops->put_pages() */
260 err
= remap_io_sg(area
,
261 area
->vm_start
, area
->vm_end
- area
->vm_start
,
262 obj
->mm
.pages
->sgl
, iomap
);
264 if (area
->vm_flags
& VM_WRITE
) {
265 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj
));
266 obj
->mm
.dirty
= true;
269 i915_gem_object_unpin_pages(obj
);
272 return i915_error_to_vmf_fault(err
);
275 static vm_fault_t
vm_fault_gtt(struct vm_fault
*vmf
)
277 #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
278 struct vm_area_struct
*area
= vmf
->vma
;
279 struct i915_mmap_offset
*mmo
= area
->vm_private_data
;
280 struct drm_i915_gem_object
*obj
= mmo
->obj
;
281 struct drm_device
*dev
= obj
->base
.dev
;
282 struct drm_i915_private
*i915
= to_i915(dev
);
283 struct intel_runtime_pm
*rpm
= &i915
->runtime_pm
;
284 struct i915_ggtt
*ggtt
= &i915
->ggtt
;
285 bool write
= area
->vm_flags
& VM_WRITE
;
286 intel_wakeref_t wakeref
;
287 struct i915_vma
*vma
;
292 /* Sanity check that we allow writing into this object */
293 if (i915_gem_object_is_readonly(obj
) && write
)
294 return VM_FAULT_SIGBUS
;
296 /* We don't use vmf->pgoff since that has the fake offset */
297 page_offset
= (vmf
->address
- area
->vm_start
) >> PAGE_SHIFT
;
299 trace_i915_gem_object_fault(obj
, page_offset
, true, write
);
301 ret
= i915_gem_object_pin_pages(obj
);
305 wakeref
= intel_runtime_pm_get(rpm
);
307 ret
= intel_gt_reset_trylock(ggtt
->vm
.gt
, &srcu
);
311 /* Now pin it into the GTT as needed */
312 vma
= i915_gem_object_ggtt_pin(obj
, NULL
, 0, 0,
314 PIN_NONBLOCK
/* NOWARN */ |
317 /* Use a partial view if it is bigger than available space */
318 struct i915_ggtt_view view
=
319 compute_partial_view(obj
, page_offset
, MIN_CHUNK_PAGES
);
322 flags
= PIN_MAPPABLE
| PIN_NOSEARCH
;
323 if (view
.type
== I915_GGTT_VIEW_NORMAL
)
324 flags
|= PIN_NONBLOCK
; /* avoid warnings for pinned */
327 * Userspace is now writing through an untracked VMA, abandon
328 * all hope that the hardware is able to track future writes.
331 vma
= i915_gem_object_ggtt_pin(obj
, &view
, 0, 0, flags
);
333 flags
= PIN_MAPPABLE
;
334 view
.type
= I915_GGTT_VIEW_PARTIAL
;
335 vma
= i915_gem_object_ggtt_pin(obj
, &view
, 0, 0, flags
);
338 /* The entire mappable GGTT is pinned? Unexpected! */
339 GEM_BUG_ON(vma
== ERR_PTR(-ENOSPC
));
346 /* Access to snoopable pages through the GTT is incoherent. */
347 if (obj
->cache_level
!= I915_CACHE_NONE
&& !HAS_LLC(i915
)) {
352 ret
= i915_vma_pin_fence(vma
);
356 /* Finally, remap it using the new GTT offset */
357 ret
= remap_io_mapping(area
,
358 area
->vm_start
+ (vma
->ggtt_view
.partial
.offset
<< PAGE_SHIFT
),
359 (ggtt
->gmadr
.start
+ vma
->node
.start
) >> PAGE_SHIFT
,
360 min_t(u64
, vma
->size
, area
->vm_end
- area
->vm_start
),
365 assert_rpm_wakelock_held(rpm
);
367 /* Mark as being mmapped into userspace for later revocation */
368 mutex_lock(&i915
->ggtt
.vm
.mutex
);
369 if (!i915_vma_set_userfault(vma
) && !obj
->userfault_count
++)
370 list_add(&obj
->userfault_link
, &i915
->ggtt
.userfault_list
);
371 mutex_unlock(&i915
->ggtt
.vm
.mutex
);
373 /* Track the mmo associated with the fenced vma */
376 if (IS_ACTIVE(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND
))
377 intel_wakeref_auto(&i915
->ggtt
.userfault_wakeref
,
378 msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND
));
381 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj
));
382 i915_vma_set_ggtt_write(vma
);
383 obj
->mm
.dirty
= true;
387 i915_vma_unpin_fence(vma
);
389 __i915_vma_unpin(vma
);
391 intel_gt_reset_unlock(ggtt
->vm
.gt
, srcu
);
393 intel_runtime_pm_put(rpm
, wakeref
);
394 i915_gem_object_unpin_pages(obj
);
396 return i915_error_to_vmf_fault(ret
);
399 void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object
*obj
)
401 struct i915_vma
*vma
;
403 GEM_BUG_ON(!obj
->userfault_count
);
405 for_each_ggtt_vma(vma
, obj
)
406 i915_vma_revoke_mmap(vma
);
408 GEM_BUG_ON(obj
->userfault_count
);
412 * It is vital that we remove the page mapping if we have mapped a tiled
413 * object through the GTT and then lose the fence register due to
414 * resource pressure. Similarly if the object has been moved out of the
415 * aperture, than pages mapped into userspace must be revoked. Removing the
416 * mapping will then trigger a page fault on the next user access, allowing
417 * fixup by vm_fault_gtt().
419 static void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object
*obj
)
421 struct drm_i915_private
*i915
= to_i915(obj
->base
.dev
);
422 intel_wakeref_t wakeref
;
425 * Serialisation between user GTT access and our code depends upon
426 * revoking the CPU's PTE whilst the mutex is held. The next user
427 * pagefault then has to wait until we release the mutex.
429 * Note that RPM complicates somewhat by adding an additional
430 * requirement that operations to the GGTT be made holding the RPM
433 wakeref
= intel_runtime_pm_get(&i915
->runtime_pm
);
434 mutex_lock(&i915
->ggtt
.vm
.mutex
);
436 if (!obj
->userfault_count
)
439 __i915_gem_object_release_mmap_gtt(obj
);
442 * Ensure that the CPU's PTE are revoked and there are not outstanding
443 * memory transactions from userspace before we return. The TLB
444 * flushing implied above by changing the PTE above *should* be
445 * sufficient, an extra barrier here just provides us with a bit
446 * of paranoid documentation about our requirement to serialise
447 * memory writes before touching registers / GSM.
452 mutex_unlock(&i915
->ggtt
.vm
.mutex
);
453 intel_runtime_pm_put(&i915
->runtime_pm
, wakeref
);
456 void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object
*obj
)
458 struct i915_mmap_offset
*mmo
;
460 spin_lock(&obj
->mmo
.lock
);
461 list_for_each_entry(mmo
, &obj
->mmo
.offsets
, offset
) {
463 * vma_node_unmap for GTT mmaps handled already in
464 * __i915_gem_object_release_mmap_gtt
466 if (mmo
->mmap_type
== I915_MMAP_TYPE_GTT
)
469 spin_unlock(&obj
->mmo
.lock
);
470 drm_vma_node_unmap(&mmo
->vma_node
,
471 obj
->base
.dev
->anon_inode
->i_mapping
);
472 spin_lock(&obj
->mmo
.lock
);
474 spin_unlock(&obj
->mmo
.lock
);
478 * i915_gem_object_release_mmap - remove physical page mappings
479 * @obj: obj in question
481 * Preserve the reservation of the mmapping with the DRM core code, but
482 * relinquish ownership of the pages back to the system.
484 void i915_gem_object_release_mmap(struct drm_i915_gem_object
*obj
)
486 i915_gem_object_release_mmap_gtt(obj
);
487 i915_gem_object_release_mmap_offset(obj
);
490 static struct i915_mmap_offset
*
491 mmap_offset_attach(struct drm_i915_gem_object
*obj
,
492 enum i915_mmap_type mmap_type
,
493 struct drm_file
*file
)
495 struct drm_i915_private
*i915
= to_i915(obj
->base
.dev
);
496 struct i915_mmap_offset
*mmo
;
499 mmo
= kmalloc(sizeof(*mmo
), GFP_KERNEL
);
501 return ERR_PTR(-ENOMEM
);
504 mmo
->dev
= obj
->base
.dev
;
506 mmo
->mmap_type
= mmap_type
;
507 drm_vma_node_reset(&mmo
->vma_node
);
509 err
= drm_vma_offset_add(mmo
->dev
->vma_offset_manager
, &mmo
->vma_node
,
510 obj
->base
.size
/ PAGE_SIZE
);
514 /* Attempt to reap some mmap space from dead objects */
515 err
= intel_gt_retire_requests_timeout(&i915
->gt
, MAX_SCHEDULE_TIMEOUT
);
519 i915_gem_drain_freed_objects(i915
);
520 err
= drm_vma_offset_add(mmo
->dev
->vma_offset_manager
, &mmo
->vma_node
,
521 obj
->base
.size
/ PAGE_SIZE
);
527 drm_vma_node_allow(&mmo
->vma_node
, file
);
529 spin_lock(&obj
->mmo
.lock
);
530 list_add(&mmo
->offset
, &obj
->mmo
.offsets
);
531 spin_unlock(&obj
->mmo
.lock
);
541 __assign_mmap_offset(struct drm_file
*file
,
543 enum i915_mmap_type mmap_type
,
546 struct drm_i915_gem_object
*obj
;
547 struct i915_mmap_offset
*mmo
;
550 obj
= i915_gem_object_lookup(file
, handle
);
554 if (mmap_type
== I915_MMAP_TYPE_GTT
&&
555 i915_gem_object_never_bind_ggtt(obj
)) {
560 if (mmap_type
!= I915_MMAP_TYPE_GTT
&&
561 !i915_gem_object_type_has(obj
,
562 I915_GEM_OBJECT_HAS_STRUCT_PAGE
|
563 I915_GEM_OBJECT_HAS_IOMEM
)) {
568 mmo
= mmap_offset_attach(obj
, mmap_type
, file
);
574 *offset
= drm_vma_node_offset_addr(&mmo
->vma_node
);
577 i915_gem_object_put(obj
);
582 i915_gem_dumb_mmap_offset(struct drm_file
*file
,
583 struct drm_device
*dev
,
587 enum i915_mmap_type mmap_type
;
589 if (boot_cpu_has(X86_FEATURE_PAT
))
590 mmap_type
= I915_MMAP_TYPE_WC
;
591 else if (!i915_ggtt_has_aperture(&to_i915(dev
)->ggtt
))
594 mmap_type
= I915_MMAP_TYPE_GTT
;
596 return __assign_mmap_offset(file
, handle
, mmap_type
, offset
);
600 * i915_gem_mmap_offset_ioctl - prepare an object for GTT mmap'ing
602 * @data: GTT mapping ioctl data
603 * @file: GEM object info
605 * Simply returns the fake offset to userspace so it can mmap it.
606 * The mmap call will end up in drm_gem_mmap(), which will set things
607 * up so we can get faults in the handler above.
609 * The fault handler will take care of binding the object into the GTT
610 * (since it may have been evicted to make room for something), allocating
611 * a fence register, and mapping the appropriate aperture address into
615 i915_gem_mmap_offset_ioctl(struct drm_device
*dev
, void *data
,
616 struct drm_file
*file
)
618 struct drm_i915_private
*i915
= to_i915(dev
);
619 struct drm_i915_gem_mmap_offset
*args
= data
;
620 enum i915_mmap_type type
;
624 * Historically we failed to check args.pad and args.offset
625 * and so we cannot use those fields for user input and we cannot
626 * add -EINVAL for them as the ABI is fixed, i.e. old userspace
627 * may be feeding in garbage in those fields.
629 * if (args->pad) return -EINVAL; is verbotten!
632 err
= i915_user_extensions(u64_to_user_ptr(args
->extensions
),
637 switch (args
->flags
) {
638 case I915_MMAP_OFFSET_GTT
:
639 if (!i915_ggtt_has_aperture(&i915
->ggtt
))
641 type
= I915_MMAP_TYPE_GTT
;
644 case I915_MMAP_OFFSET_WC
:
645 if (!boot_cpu_has(X86_FEATURE_PAT
))
647 type
= I915_MMAP_TYPE_WC
;
650 case I915_MMAP_OFFSET_WB
:
651 type
= I915_MMAP_TYPE_WB
;
654 case I915_MMAP_OFFSET_UC
:
655 if (!boot_cpu_has(X86_FEATURE_PAT
))
657 type
= I915_MMAP_TYPE_UC
;
664 return __assign_mmap_offset(file
, args
->handle
, type
, &args
->offset
);
667 static void vm_open(struct vm_area_struct
*vma
)
669 struct i915_mmap_offset
*mmo
= vma
->vm_private_data
;
670 struct drm_i915_gem_object
*obj
= mmo
->obj
;
673 i915_gem_object_get(obj
);
676 static void vm_close(struct vm_area_struct
*vma
)
678 struct i915_mmap_offset
*mmo
= vma
->vm_private_data
;
679 struct drm_i915_gem_object
*obj
= mmo
->obj
;
682 i915_gem_object_put(obj
);
685 static const struct vm_operations_struct vm_ops_gtt
= {
686 .fault
= vm_fault_gtt
,
691 static const struct vm_operations_struct vm_ops_cpu
= {
692 .fault
= vm_fault_cpu
,
697 static int singleton_release(struct inode
*inode
, struct file
*file
)
699 struct drm_i915_private
*i915
= file
->private_data
;
701 cmpxchg(&i915
->gem
.mmap_singleton
, file
, NULL
);
702 drm_dev_put(&i915
->drm
);
707 static const struct file_operations singleton_fops
= {
708 .owner
= THIS_MODULE
,
709 .release
= singleton_release
,
712 static struct file
*mmap_singleton(struct drm_i915_private
*i915
)
717 file
= i915
->gem
.mmap_singleton
;
718 if (file
&& !get_file_rcu(file
))
724 file
= anon_inode_getfile("i915.gem", &singleton_fops
, i915
, O_RDWR
);
728 /* Everyone shares a single global address space */
729 file
->f_mapping
= i915
->drm
.anon_inode
->i_mapping
;
731 smp_store_mb(i915
->gem
.mmap_singleton
, file
);
732 drm_dev_get(&i915
->drm
);
738 * This overcomes the limitation in drm_gem_mmap's assignment of a
739 * drm_gem_object as the vma->vm_private_data. Since we need to
740 * be able to resolve multiple mmap offsets which could be tied
741 * to a single gem object.
743 int i915_gem_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
745 struct drm_vma_offset_node
*node
;
746 struct drm_file
*priv
= filp
->private_data
;
747 struct drm_device
*dev
= priv
->minor
->dev
;
748 struct i915_mmap_offset
*mmo
= NULL
;
749 struct drm_gem_object
*obj
= NULL
;
752 if (drm_dev_is_unplugged(dev
))
755 drm_vma_offset_lock_lookup(dev
->vma_offset_manager
);
756 node
= drm_vma_offset_exact_lookup_locked(dev
->vma_offset_manager
,
760 mmo
= container_of(node
, struct i915_mmap_offset
,
763 * In our dependency chain, the drm_vma_offset_node
764 * depends on the validity of the mmo, which depends on
765 * the gem object. However the only reference we have
766 * at this point is the mmo (as the parent of the node).
767 * Try to check if the gem object was at least cleared.
769 if (!mmo
|| !mmo
->obj
) {
770 drm_vma_offset_unlock_lookup(dev
->vma_offset_manager
);
774 * Skip 0-refcnted objects as it is in the process of being
775 * destroyed and will be invalid when the vma manager lock
778 obj
= &mmo
->obj
->base
;
779 if (!kref_get_unless_zero(&obj
->refcount
))
782 drm_vma_offset_unlock_lookup(dev
->vma_offset_manager
);
786 if (!drm_vma_node_is_allowed(node
, priv
)) {
787 drm_gem_object_put_unlocked(obj
);
791 if (i915_gem_object_is_readonly(to_intel_bo(obj
))) {
792 if (vma
->vm_flags
& VM_WRITE
) {
793 drm_gem_object_put_unlocked(obj
);
796 vma
->vm_flags
&= ~VM_MAYWRITE
;
799 anon
= mmap_singleton(to_i915(obj
->dev
));
801 drm_gem_object_put_unlocked(obj
);
802 return PTR_ERR(anon
);
805 vma
->vm_flags
|= VM_PFNMAP
| VM_DONTEXPAND
| VM_DONTDUMP
;
806 vma
->vm_private_data
= mmo
;
809 * We keep the ref on mmo->obj, not vm_file, but we require
810 * vma->vm_file->f_mapping, see vma_link(), for later revocation.
811 * Our userspace is accustomed to having per-file resource cleanup
812 * (i.e. contexts, objects and requests) on their close(fd), which
813 * requires avoiding extraneous references to their filp, hence why
814 * we prefer to use an anonymous file for their mmaps.
819 switch (mmo
->mmap_type
) {
820 case I915_MMAP_TYPE_WC
:
822 pgprot_writecombine(vm_get_page_prot(vma
->vm_flags
));
823 vma
->vm_ops
= &vm_ops_cpu
;
826 case I915_MMAP_TYPE_WB
:
827 vma
->vm_page_prot
= vm_get_page_prot(vma
->vm_flags
);
828 vma
->vm_ops
= &vm_ops_cpu
;
831 case I915_MMAP_TYPE_UC
:
833 pgprot_noncached(vm_get_page_prot(vma
->vm_flags
));
834 vma
->vm_ops
= &vm_ops_cpu
;
837 case I915_MMAP_TYPE_GTT
:
839 pgprot_writecombine(vm_get_page_prot(vma
->vm_flags
));
840 vma
->vm_ops
= &vm_ops_gtt
;
843 vma
->vm_page_prot
= pgprot_decrypted(vma
->vm_page_prot
);
848 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
849 #include "selftests/i915_gem_mman.c"