2 * SPDX-License-Identifier: MIT
4 * Copyright © 2014-2016 Intel Corporation
7 #include <linux/anon_inodes.h>
8 #include <linux/mman.h>
9 #include <linux/pfn_t.h>
10 #include <linux/sizes.h>
12 #include "gt/intel_gt.h"
13 #include "gt/intel_gt_requests.h"
16 #include "i915_gem_gtt.h"
17 #include "i915_gem_ioctls.h"
18 #include "i915_gem_object.h"
19 #include "i915_gem_mman.h"
20 #include "i915_trace.h"
21 #include "i915_user_extensions.h"
25 __vma_matches(struct vm_area_struct
*vma
, struct file
*filp
,
26 unsigned long addr
, unsigned long size
)
28 if (vma
->vm_file
!= filp
)
31 return vma
->vm_start
== addr
&&
32 (vma
->vm_end
- vma
->vm_start
) == PAGE_ALIGN(size
);
36 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
39 * @data: ioctl data blob
42 * While the mapping holds a reference on the contents of the object, it doesn't
43 * imply a ref on the object itself.
47 * DRM driver writers who look a this function as an example for how to do GEM
48 * mmap support, please don't implement mmap support like here. The modern way
49 * to implement DRM mmap support is with an mmap offset ioctl (like
50 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
51 * That way debug tooling like valgrind will understand what's going on, hiding
52 * the mmap call in a driver private ioctl will break that. The i915 driver only
53 * does cpu mmaps this way because we didn't know better.
56 i915_gem_mmap_ioctl(struct drm_device
*dev
, void *data
,
57 struct drm_file
*file
)
59 struct drm_i915_gem_mmap
*args
= data
;
60 struct drm_i915_gem_object
*obj
;
63 if (args
->flags
& ~(I915_MMAP_WC
))
66 if (args
->flags
& I915_MMAP_WC
&& !boot_cpu_has(X86_FEATURE_PAT
))
69 obj
= i915_gem_object_lookup(file
, args
->handle
);
73 /* prime objects have no backing filp to GEM mmap
76 if (!obj
->base
.filp
) {
81 if (range_overflows(args
->offset
, args
->size
, (u64
)obj
->base
.size
)) {
86 addr
= vm_mmap(obj
->base
.filp
, 0, args
->size
,
87 PROT_READ
| PROT_WRITE
, MAP_SHARED
,
89 if (IS_ERR_VALUE(addr
))
92 if (args
->flags
& I915_MMAP_WC
) {
93 struct mm_struct
*mm
= current
->mm
;
94 struct vm_area_struct
*vma
;
96 if (mmap_write_lock_killable(mm
)) {
100 vma
= find_vma(mm
, addr
);
101 if (vma
&& __vma_matches(vma
, obj
->base
.filp
, addr
, args
->size
))
103 pgprot_writecombine(vm_get_page_prot(vma
->vm_flags
));
106 mmap_write_unlock(mm
);
107 if (IS_ERR_VALUE(addr
))
110 i915_gem_object_put(obj
);
112 args
->addr_ptr
= (u64
)addr
;
116 i915_gem_object_put(obj
);
120 static unsigned int tile_row_pages(const struct drm_i915_gem_object
*obj
)
122 return i915_gem_object_get_tile_row_size(obj
) >> PAGE_SHIFT
;
126 * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
128 * A history of the GTT mmap interface:
130 * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
131 * aligned and suitable for fencing, and still fit into the available
132 * mappable space left by the pinned display objects. A classic problem
133 * we called the page-fault-of-doom where we would ping-pong between
134 * two objects that could not fit inside the GTT and so the memcpy
135 * would page one object in at the expense of the other between every
138 * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
139 * as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
140 * object is too large for the available space (or simply too large
141 * for the mappable aperture!), a view is created instead and faulted
142 * into userspace. (This view is aligned and sized appropriately for
145 * 2 - Recognise WC as a separate cache domain so that we can flush the
146 * delayed writes via GTT before performing direct access via WC.
148 * 3 - Remove implicit set-domain(GTT) and synchronisation on initial
149 * pagefault; swapin remains transparent.
151 * 4 - Support multiple fault handlers per object depending on object's
152 * backing storage (a.k.a. MMAP_OFFSET).
156 * * snoopable objects cannot be accessed via the GTT. It can cause machine
157 * hangs on some architectures, corruption on others. An attempt to service
158 * a GTT page fault from a snoopable object will generate a SIGBUS.
160 * * the object must be able to fit into RAM (physical memory, though no
161 * limited to the mappable aperture).
166 * * a new GTT page fault will synchronize rendering from the GPU and flush
167 * all data to system memory. Subsequent access will not be synchronized.
169 * * all mappings are revoked on runtime device suspend.
171 * * there are only 8, 16 or 32 fence registers to share between all users
172 * (older machines require fence register for display and blitter access
173 * as well). Contention of the fence registers will cause the previous users
174 * to be unmapped and any new access will generate new page faults.
176 * * running out of memory while servicing a fault may generate a SIGBUS,
177 * rather than the expected SIGSEGV.
179 int i915_gem_mmap_gtt_version(void)
184 static inline struct i915_ggtt_view
185 compute_partial_view(const struct drm_i915_gem_object
*obj
,
189 struct i915_ggtt_view view
;
191 if (i915_gem_object_is_tiled(obj
))
192 chunk
= roundup(chunk
, tile_row_pages(obj
));
194 view
.type
= I915_GGTT_VIEW_PARTIAL
;
195 view
.partial
.offset
= rounddown(page_offset
, chunk
);
197 min_t(unsigned int, chunk
,
198 (obj
->base
.size
>> PAGE_SHIFT
) - view
.partial
.offset
);
200 /* If the partial covers the entire object, just create a normal VMA. */
201 if (chunk
>= obj
->base
.size
>> PAGE_SHIFT
)
202 view
.type
= I915_GGTT_VIEW_NORMAL
;
207 static vm_fault_t
i915_error_to_vmf_fault(int err
)
211 WARN_ONCE(err
, "unhandled error in %s: %i\n", __func__
, err
);
213 case -EIO
: /* shmemfs failure from swap device */
214 case -EFAULT
: /* purged object */
215 case -ENODEV
: /* bad object, how did you get here! */
216 case -ENXIO
: /* unable to access backing store (on device) */
217 return VM_FAULT_SIGBUS
;
219 case -ENOMEM
: /* our allocation failure */
224 case -ENOSPC
: /* transient failure to evict? */
229 * EBUSY is ok: this just means that another thread
230 * already did the job.
232 return VM_FAULT_NOPAGE
;
236 static vm_fault_t
vm_fault_cpu(struct vm_fault
*vmf
)
238 struct vm_area_struct
*area
= vmf
->vma
;
239 struct i915_mmap_offset
*mmo
= area
->vm_private_data
;
240 struct drm_i915_gem_object
*obj
= mmo
->obj
;
241 resource_size_t iomap
;
244 /* Sanity check that we allow writing into this object */
245 if (unlikely(i915_gem_object_is_readonly(obj
) &&
246 area
->vm_flags
& VM_WRITE
))
247 return VM_FAULT_SIGBUS
;
249 err
= i915_gem_object_pin_pages(obj
);
254 if (!i915_gem_object_type_has(obj
, I915_GEM_OBJECT_HAS_STRUCT_PAGE
)) {
255 iomap
= obj
->mm
.region
->iomap
.base
;
256 iomap
-= obj
->mm
.region
->region
.start
;
259 /* PTEs are revoked in obj->ops->put_pages() */
260 err
= remap_io_sg(area
,
261 area
->vm_start
, area
->vm_end
- area
->vm_start
,
262 obj
->mm
.pages
->sgl
, iomap
);
264 if (area
->vm_flags
& VM_WRITE
) {
265 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj
));
266 obj
->mm
.dirty
= true;
269 i915_gem_object_unpin_pages(obj
);
272 return i915_error_to_vmf_fault(err
);
275 static vm_fault_t
vm_fault_gtt(struct vm_fault
*vmf
)
277 #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
278 struct vm_area_struct
*area
= vmf
->vma
;
279 struct i915_mmap_offset
*mmo
= area
->vm_private_data
;
280 struct drm_i915_gem_object
*obj
= mmo
->obj
;
281 struct drm_device
*dev
= obj
->base
.dev
;
282 struct drm_i915_private
*i915
= to_i915(dev
);
283 struct intel_runtime_pm
*rpm
= &i915
->runtime_pm
;
284 struct i915_ggtt
*ggtt
= &i915
->ggtt
;
285 bool write
= area
->vm_flags
& VM_WRITE
;
286 struct i915_gem_ww_ctx ww
;
287 intel_wakeref_t wakeref
;
288 struct i915_vma
*vma
;
293 /* We don't use vmf->pgoff since that has the fake offset */
294 page_offset
= (vmf
->address
- area
->vm_start
) >> PAGE_SHIFT
;
296 trace_i915_gem_object_fault(obj
, page_offset
, true, write
);
298 wakeref
= intel_runtime_pm_get(rpm
);
300 i915_gem_ww_ctx_init(&ww
, true);
302 ret
= i915_gem_object_lock(obj
, &ww
);
306 /* Sanity check that we allow writing into this object */
307 if (i915_gem_object_is_readonly(obj
) && write
) {
312 ret
= i915_gem_object_pin_pages(obj
);
316 ret
= intel_gt_reset_trylock(ggtt
->vm
.gt
, &srcu
);
320 /* Now pin it into the GTT as needed */
321 vma
= i915_gem_object_ggtt_pin_ww(obj
, &ww
, NULL
, 0, 0,
323 PIN_NONBLOCK
/* NOWARN */ |
325 if (IS_ERR(vma
) && vma
!= ERR_PTR(-EDEADLK
)) {
326 /* Use a partial view if it is bigger than available space */
327 struct i915_ggtt_view view
=
328 compute_partial_view(obj
, page_offset
, MIN_CHUNK_PAGES
);
331 flags
= PIN_MAPPABLE
| PIN_NOSEARCH
;
332 if (view
.type
== I915_GGTT_VIEW_NORMAL
)
333 flags
|= PIN_NONBLOCK
; /* avoid warnings for pinned */
336 * Userspace is now writing through an untracked VMA, abandon
337 * all hope that the hardware is able to track future writes.
340 vma
= i915_gem_object_ggtt_pin_ww(obj
, &ww
, &view
, 0, 0, flags
);
341 if (IS_ERR(vma
) && vma
!= ERR_PTR(-EDEADLK
)) {
342 flags
= PIN_MAPPABLE
;
343 view
.type
= I915_GGTT_VIEW_PARTIAL
;
344 vma
= i915_gem_object_ggtt_pin_ww(obj
, &ww
, &view
, 0, 0, flags
);
347 /* The entire mappable GGTT is pinned? Unexpected! */
348 GEM_BUG_ON(vma
== ERR_PTR(-ENOSPC
));
355 /* Access to snoopable pages through the GTT is incoherent. */
356 if (obj
->cache_level
!= I915_CACHE_NONE
&& !HAS_LLC(i915
)) {
361 ret
= i915_vma_pin_fence(vma
);
365 /* Finally, remap it using the new GTT offset */
366 ret
= remap_io_mapping(area
,
367 area
->vm_start
+ (vma
->ggtt_view
.partial
.offset
<< PAGE_SHIFT
),
368 (ggtt
->gmadr
.start
+ vma
->node
.start
) >> PAGE_SHIFT
,
369 min_t(u64
, vma
->size
, area
->vm_end
- area
->vm_start
),
374 assert_rpm_wakelock_held(rpm
);
376 /* Mark as being mmapped into userspace for later revocation */
377 mutex_lock(&i915
->ggtt
.vm
.mutex
);
378 if (!i915_vma_set_userfault(vma
) && !obj
->userfault_count
++)
379 list_add(&obj
->userfault_link
, &i915
->ggtt
.userfault_list
);
380 mutex_unlock(&i915
->ggtt
.vm
.mutex
);
382 /* Track the mmo associated with the fenced vma */
385 if (IS_ACTIVE(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND
))
386 intel_wakeref_auto(&i915
->ggtt
.userfault_wakeref
,
387 msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND
));
390 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj
));
391 i915_vma_set_ggtt_write(vma
);
392 obj
->mm
.dirty
= true;
396 i915_vma_unpin_fence(vma
);
398 __i915_vma_unpin(vma
);
400 intel_gt_reset_unlock(ggtt
->vm
.gt
, srcu
);
402 i915_gem_object_unpin_pages(obj
);
404 if (ret
== -EDEADLK
) {
405 ret
= i915_gem_ww_ctx_backoff(&ww
);
409 i915_gem_ww_ctx_fini(&ww
);
410 intel_runtime_pm_put(rpm
, wakeref
);
411 return i915_error_to_vmf_fault(ret
);
415 vm_access(struct vm_area_struct
*area
, unsigned long addr
,
416 void *buf
, int len
, int write
)
418 struct i915_mmap_offset
*mmo
= area
->vm_private_data
;
419 struct drm_i915_gem_object
*obj
= mmo
->obj
;
422 if (i915_gem_object_is_readonly(obj
) && write
)
425 addr
-= area
->vm_start
;
426 if (addr
>= obj
->base
.size
)
429 /* As this is primarily for debugging, let's focus on simplicity */
430 vaddr
= i915_gem_object_pin_map(obj
, I915_MAP_FORCE_WC
);
432 return PTR_ERR(vaddr
);
435 memcpy(vaddr
+ addr
, buf
, len
);
436 __i915_gem_object_flush_map(obj
, addr
, len
);
438 memcpy(buf
, vaddr
+ addr
, len
);
441 i915_gem_object_unpin_map(obj
);
446 void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object
*obj
)
448 struct i915_vma
*vma
;
450 GEM_BUG_ON(!obj
->userfault_count
);
452 for_each_ggtt_vma(vma
, obj
)
453 i915_vma_revoke_mmap(vma
);
455 GEM_BUG_ON(obj
->userfault_count
);
459 * It is vital that we remove the page mapping if we have mapped a tiled
460 * object through the GTT and then lose the fence register due to
461 * resource pressure. Similarly if the object has been moved out of the
462 * aperture, than pages mapped into userspace must be revoked. Removing the
463 * mapping will then trigger a page fault on the next user access, allowing
464 * fixup by vm_fault_gtt().
466 void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object
*obj
)
468 struct drm_i915_private
*i915
= to_i915(obj
->base
.dev
);
469 intel_wakeref_t wakeref
;
472 * Serialisation between user GTT access and our code depends upon
473 * revoking the CPU's PTE whilst the mutex is held. The next user
474 * pagefault then has to wait until we release the mutex.
476 * Note that RPM complicates somewhat by adding an additional
477 * requirement that operations to the GGTT be made holding the RPM
480 wakeref
= intel_runtime_pm_get(&i915
->runtime_pm
);
481 mutex_lock(&i915
->ggtt
.vm
.mutex
);
483 if (!obj
->userfault_count
)
486 __i915_gem_object_release_mmap_gtt(obj
);
489 * Ensure that the CPU's PTE are revoked and there are not outstanding
490 * memory transactions from userspace before we return. The TLB
491 * flushing implied above by changing the PTE above *should* be
492 * sufficient, an extra barrier here just provides us with a bit
493 * of paranoid documentation about our requirement to serialise
494 * memory writes before touching registers / GSM.
499 mutex_unlock(&i915
->ggtt
.vm
.mutex
);
500 intel_runtime_pm_put(&i915
->runtime_pm
, wakeref
);
503 void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object
*obj
)
505 struct i915_mmap_offset
*mmo
, *mn
;
507 spin_lock(&obj
->mmo
.lock
);
508 rbtree_postorder_for_each_entry_safe(mmo
, mn
,
509 &obj
->mmo
.offsets
, offset
) {
511 * vma_node_unmap for GTT mmaps handled already in
512 * __i915_gem_object_release_mmap_gtt
514 if (mmo
->mmap_type
== I915_MMAP_TYPE_GTT
)
517 spin_unlock(&obj
->mmo
.lock
);
518 drm_vma_node_unmap(&mmo
->vma_node
,
519 obj
->base
.dev
->anon_inode
->i_mapping
);
520 spin_lock(&obj
->mmo
.lock
);
522 spin_unlock(&obj
->mmo
.lock
);
525 static struct i915_mmap_offset
*
526 lookup_mmo(struct drm_i915_gem_object
*obj
,
527 enum i915_mmap_type mmap_type
)
531 spin_lock(&obj
->mmo
.lock
);
532 rb
= obj
->mmo
.offsets
.rb_node
;
534 struct i915_mmap_offset
*mmo
=
535 rb_entry(rb
, typeof(*mmo
), offset
);
537 if (mmo
->mmap_type
== mmap_type
) {
538 spin_unlock(&obj
->mmo
.lock
);
542 if (mmo
->mmap_type
< mmap_type
)
547 spin_unlock(&obj
->mmo
.lock
);
552 static struct i915_mmap_offset
*
553 insert_mmo(struct drm_i915_gem_object
*obj
, struct i915_mmap_offset
*mmo
)
555 struct rb_node
*rb
, **p
;
557 spin_lock(&obj
->mmo
.lock
);
559 p
= &obj
->mmo
.offsets
.rb_node
;
561 struct i915_mmap_offset
*pos
;
564 pos
= rb_entry(rb
, typeof(*pos
), offset
);
566 if (pos
->mmap_type
== mmo
->mmap_type
) {
567 spin_unlock(&obj
->mmo
.lock
);
568 drm_vma_offset_remove(obj
->base
.dev
->vma_offset_manager
,
574 if (pos
->mmap_type
< mmo
->mmap_type
)
579 rb_link_node(&mmo
->offset
, rb
, p
);
580 rb_insert_color(&mmo
->offset
, &obj
->mmo
.offsets
);
581 spin_unlock(&obj
->mmo
.lock
);
586 static struct i915_mmap_offset
*
587 mmap_offset_attach(struct drm_i915_gem_object
*obj
,
588 enum i915_mmap_type mmap_type
,
589 struct drm_file
*file
)
591 struct drm_i915_private
*i915
= to_i915(obj
->base
.dev
);
592 struct i915_mmap_offset
*mmo
;
595 mmo
= lookup_mmo(obj
, mmap_type
);
599 mmo
= kmalloc(sizeof(*mmo
), GFP_KERNEL
);
601 return ERR_PTR(-ENOMEM
);
604 mmo
->mmap_type
= mmap_type
;
605 drm_vma_node_reset(&mmo
->vma_node
);
607 err
= drm_vma_offset_add(obj
->base
.dev
->vma_offset_manager
,
608 &mmo
->vma_node
, obj
->base
.size
/ PAGE_SIZE
);
612 /* Attempt to reap some mmap space from dead objects */
613 err
= intel_gt_retire_requests_timeout(&i915
->gt
, MAX_SCHEDULE_TIMEOUT
);
617 i915_gem_drain_freed_objects(i915
);
618 err
= drm_vma_offset_add(obj
->base
.dev
->vma_offset_manager
,
619 &mmo
->vma_node
, obj
->base
.size
/ PAGE_SIZE
);
624 mmo
= insert_mmo(obj
, mmo
);
625 GEM_BUG_ON(lookup_mmo(obj
, mmap_type
) != mmo
);
628 drm_vma_node_allow(&mmo
->vma_node
, file
);
637 __assign_mmap_offset(struct drm_file
*file
,
639 enum i915_mmap_type mmap_type
,
642 struct drm_i915_gem_object
*obj
;
643 struct i915_mmap_offset
*mmo
;
646 obj
= i915_gem_object_lookup(file
, handle
);
650 if (i915_gem_object_never_mmap(obj
)) {
655 if (mmap_type
!= I915_MMAP_TYPE_GTT
&&
656 !i915_gem_object_type_has(obj
,
657 I915_GEM_OBJECT_HAS_STRUCT_PAGE
|
658 I915_GEM_OBJECT_HAS_IOMEM
)) {
663 mmo
= mmap_offset_attach(obj
, mmap_type
, file
);
669 *offset
= drm_vma_node_offset_addr(&mmo
->vma_node
);
672 i915_gem_object_put(obj
);
677 i915_gem_dumb_mmap_offset(struct drm_file
*file
,
678 struct drm_device
*dev
,
682 enum i915_mmap_type mmap_type
;
684 if (boot_cpu_has(X86_FEATURE_PAT
))
685 mmap_type
= I915_MMAP_TYPE_WC
;
686 else if (!i915_ggtt_has_aperture(&to_i915(dev
)->ggtt
))
689 mmap_type
= I915_MMAP_TYPE_GTT
;
691 return __assign_mmap_offset(file
, handle
, mmap_type
, offset
);
695 * i915_gem_mmap_offset_ioctl - prepare an object for GTT mmap'ing
697 * @data: GTT mapping ioctl data
698 * @file: GEM object info
700 * Simply returns the fake offset to userspace so it can mmap it.
701 * The mmap call will end up in drm_gem_mmap(), which will set things
702 * up so we can get faults in the handler above.
704 * The fault handler will take care of binding the object into the GTT
705 * (since it may have been evicted to make room for something), allocating
706 * a fence register, and mapping the appropriate aperture address into
710 i915_gem_mmap_offset_ioctl(struct drm_device
*dev
, void *data
,
711 struct drm_file
*file
)
713 struct drm_i915_private
*i915
= to_i915(dev
);
714 struct drm_i915_gem_mmap_offset
*args
= data
;
715 enum i915_mmap_type type
;
719 * Historically we failed to check args.pad and args.offset
720 * and so we cannot use those fields for user input and we cannot
721 * add -EINVAL for them as the ABI is fixed, i.e. old userspace
722 * may be feeding in garbage in those fields.
724 * if (args->pad) return -EINVAL; is verbotten!
727 err
= i915_user_extensions(u64_to_user_ptr(args
->extensions
),
732 switch (args
->flags
) {
733 case I915_MMAP_OFFSET_GTT
:
734 if (!i915_ggtt_has_aperture(&i915
->ggtt
))
736 type
= I915_MMAP_TYPE_GTT
;
739 case I915_MMAP_OFFSET_WC
:
740 if (!boot_cpu_has(X86_FEATURE_PAT
))
742 type
= I915_MMAP_TYPE_WC
;
745 case I915_MMAP_OFFSET_WB
:
746 type
= I915_MMAP_TYPE_WB
;
749 case I915_MMAP_OFFSET_UC
:
750 if (!boot_cpu_has(X86_FEATURE_PAT
))
752 type
= I915_MMAP_TYPE_UC
;
759 return __assign_mmap_offset(file
, args
->handle
, type
, &args
->offset
);
762 static void vm_open(struct vm_area_struct
*vma
)
764 struct i915_mmap_offset
*mmo
= vma
->vm_private_data
;
765 struct drm_i915_gem_object
*obj
= mmo
->obj
;
768 i915_gem_object_get(obj
);
771 static void vm_close(struct vm_area_struct
*vma
)
773 struct i915_mmap_offset
*mmo
= vma
->vm_private_data
;
774 struct drm_i915_gem_object
*obj
= mmo
->obj
;
777 i915_gem_object_put(obj
);
780 static const struct vm_operations_struct vm_ops_gtt
= {
781 .fault
= vm_fault_gtt
,
787 static const struct vm_operations_struct vm_ops_cpu
= {
788 .fault
= vm_fault_cpu
,
794 static int singleton_release(struct inode
*inode
, struct file
*file
)
796 struct drm_i915_private
*i915
= file
->private_data
;
798 cmpxchg(&i915
->gem
.mmap_singleton
, file
, NULL
);
799 drm_dev_put(&i915
->drm
);
804 static const struct file_operations singleton_fops
= {
805 .owner
= THIS_MODULE
,
806 .release
= singleton_release
,
809 static struct file
*mmap_singleton(struct drm_i915_private
*i915
)
814 file
= READ_ONCE(i915
->gem
.mmap_singleton
);
815 if (file
&& !get_file_rcu(file
))
821 file
= anon_inode_getfile("i915.gem", &singleton_fops
, i915
, O_RDWR
);
825 /* Everyone shares a single global address space */
826 file
->f_mapping
= i915
->drm
.anon_inode
->i_mapping
;
828 smp_store_mb(i915
->gem
.mmap_singleton
, file
);
829 drm_dev_get(&i915
->drm
);
835 * This overcomes the limitation in drm_gem_mmap's assignment of a
836 * drm_gem_object as the vma->vm_private_data. Since we need to
837 * be able to resolve multiple mmap offsets which could be tied
838 * to a single gem object.
840 int i915_gem_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
842 struct drm_vma_offset_node
*node
;
843 struct drm_file
*priv
= filp
->private_data
;
844 struct drm_device
*dev
= priv
->minor
->dev
;
845 struct drm_i915_gem_object
*obj
= NULL
;
846 struct i915_mmap_offset
*mmo
= NULL
;
849 if (drm_dev_is_unplugged(dev
))
853 drm_vma_offset_lock_lookup(dev
->vma_offset_manager
);
854 node
= drm_vma_offset_exact_lookup_locked(dev
->vma_offset_manager
,
857 if (node
&& drm_vma_node_is_allowed(node
, priv
)) {
859 * Skip 0-refcnted objects as it is in the process of being
860 * destroyed and will be invalid when the vma manager lock
863 mmo
= container_of(node
, struct i915_mmap_offset
, vma_node
);
864 obj
= i915_gem_object_get_rcu(mmo
->obj
);
866 drm_vma_offset_unlock_lookup(dev
->vma_offset_manager
);
869 return node
? -EACCES
: -EINVAL
;
871 if (i915_gem_object_is_readonly(obj
)) {
872 if (vma
->vm_flags
& VM_WRITE
) {
873 i915_gem_object_put(obj
);
876 vma
->vm_flags
&= ~VM_MAYWRITE
;
879 anon
= mmap_singleton(to_i915(dev
));
881 i915_gem_object_put(obj
);
882 return PTR_ERR(anon
);
885 vma
->vm_flags
|= VM_PFNMAP
| VM_DONTEXPAND
| VM_DONTDUMP
;
886 vma
->vm_private_data
= mmo
;
889 * We keep the ref on mmo->obj, not vm_file, but we require
890 * vma->vm_file->f_mapping, see vma_link(), for later revocation.
891 * Our userspace is accustomed to having per-file resource cleanup
892 * (i.e. contexts, objects and requests) on their close(fd), which
893 * requires avoiding extraneous references to their filp, hence why
894 * we prefer to use an anonymous file for their mmaps.
896 vma_set_file(vma
, anon
);
897 /* Drop the initial creation reference, the vma is now holding one. */
900 switch (mmo
->mmap_type
) {
901 case I915_MMAP_TYPE_WC
:
903 pgprot_writecombine(vm_get_page_prot(vma
->vm_flags
));
904 vma
->vm_ops
= &vm_ops_cpu
;
907 case I915_MMAP_TYPE_WB
:
908 vma
->vm_page_prot
= vm_get_page_prot(vma
->vm_flags
);
909 vma
->vm_ops
= &vm_ops_cpu
;
912 case I915_MMAP_TYPE_UC
:
914 pgprot_noncached(vm_get_page_prot(vma
->vm_flags
));
915 vma
->vm_ops
= &vm_ops_cpu
;
918 case I915_MMAP_TYPE_GTT
:
920 pgprot_writecombine(vm_get_page_prot(vma
->vm_flags
));
921 vma
->vm_ops
= &vm_ops_gtt
;
924 vma
->vm_page_prot
= pgprot_decrypted(vma
->vm_page_prot
);
929 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
930 #include "selftests/i915_gem_mman.c"