2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/sched/mm.h>
26 #include <drm/drm_gem.h>
28 #include "display/intel_frontbuffer.h"
30 #include "gt/intel_engine.h"
31 #include "gt/intel_engine_heartbeat.h"
32 #include "gt/intel_gt.h"
33 #include "gt/intel_gt_requests.h"
36 #include "i915_globals.h"
37 #include "i915_sw_fence_work.h"
38 #include "i915_trace.h"
41 static struct i915_global_vma
{
42 struct i915_global base
;
43 struct kmem_cache
*slab_vmas
;
46 struct i915_vma
*i915_vma_alloc(void)
48 return kmem_cache_zalloc(global
.slab_vmas
, GFP_KERNEL
);
51 void i915_vma_free(struct i915_vma
*vma
)
53 return kmem_cache_free(global
.slab_vmas
, vma
);
56 #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
58 #include <linux/stackdepot.h>
60 static void vma_print_allocator(struct i915_vma
*vma
, const char *reason
)
62 unsigned long *entries
;
63 unsigned int nr_entries
;
66 if (!vma
->node
.stack
) {
67 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
68 vma
->node
.start
, vma
->node
.size
, reason
);
72 nr_entries
= stack_depot_fetch(vma
->node
.stack
, &entries
);
73 stack_trace_snprint(buf
, sizeof(buf
), entries
, nr_entries
, 0);
74 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
75 vma
->node
.start
, vma
->node
.size
, reason
, buf
);
80 static void vma_print_allocator(struct i915_vma
*vma
, const char *reason
)
86 static inline struct i915_vma
*active_to_vma(struct i915_active
*ref
)
88 return container_of(ref
, typeof(struct i915_vma
), active
);
91 static int __i915_vma_active(struct i915_active
*ref
)
93 return i915_vma_tryget(active_to_vma(ref
)) ? 0 : -ENOENT
;
97 static void __i915_vma_retire(struct i915_active
*ref
)
99 i915_vma_put(active_to_vma(ref
));
102 static struct i915_vma
*
103 vma_create(struct drm_i915_gem_object
*obj
,
104 struct i915_address_space
*vm
,
105 const struct i915_ggtt_view
*view
)
107 struct i915_vma
*pos
= ERR_PTR(-E2BIG
);
108 struct i915_vma
*vma
;
109 struct rb_node
*rb
, **p
;
111 /* The aliasing_ppgtt should never be used directly! */
112 GEM_BUG_ON(vm
== &vm
->gt
->ggtt
->alias
->vm
);
114 vma
= i915_vma_alloc();
116 return ERR_PTR(-ENOMEM
);
118 kref_init(&vma
->ref
);
119 mutex_init(&vma
->pages_mutex
);
120 vma
->vm
= i915_vm_get(vm
);
121 vma
->ops
= &vm
->vma_ops
;
123 vma
->resv
= obj
->base
.resv
;
124 vma
->size
= obj
->base
.size
;
125 vma
->display_alignment
= I915_GTT_MIN_ALIGNMENT
;
127 i915_active_init(&vma
->active
, __i915_vma_active
, __i915_vma_retire
);
129 /* Declare ourselves safe for use inside shrinkers */
130 if (IS_ENABLED(CONFIG_LOCKDEP
)) {
131 fs_reclaim_acquire(GFP_KERNEL
);
132 might_lock(&vma
->active
.mutex
);
133 fs_reclaim_release(GFP_KERNEL
);
136 INIT_LIST_HEAD(&vma
->closed_link
);
138 if (view
&& view
->type
!= I915_GGTT_VIEW_NORMAL
) {
139 vma
->ggtt_view
= *view
;
140 if (view
->type
== I915_GGTT_VIEW_PARTIAL
) {
141 GEM_BUG_ON(range_overflows_t(u64
,
142 view
->partial
.offset
,
144 obj
->base
.size
>> PAGE_SHIFT
));
145 vma
->size
= view
->partial
.size
;
146 vma
->size
<<= PAGE_SHIFT
;
147 GEM_BUG_ON(vma
->size
> obj
->base
.size
);
148 } else if (view
->type
== I915_GGTT_VIEW_ROTATED
) {
149 vma
->size
= intel_rotation_info_size(&view
->rotated
);
150 vma
->size
<<= PAGE_SHIFT
;
151 } else if (view
->type
== I915_GGTT_VIEW_REMAPPED
) {
152 vma
->size
= intel_remapped_info_size(&view
->remapped
);
153 vma
->size
<<= PAGE_SHIFT
;
157 if (unlikely(vma
->size
> vm
->total
))
160 GEM_BUG_ON(!IS_ALIGNED(vma
->size
, I915_GTT_PAGE_SIZE
));
162 spin_lock(&obj
->vma
.lock
);
164 if (i915_is_ggtt(vm
)) {
165 if (unlikely(overflows_type(vma
->size
, u32
)))
168 vma
->fence_size
= i915_gem_fence_size(vm
->i915
, vma
->size
,
169 i915_gem_object_get_tiling(obj
),
170 i915_gem_object_get_stride(obj
));
171 if (unlikely(vma
->fence_size
< vma
->size
|| /* overflow */
172 vma
->fence_size
> vm
->total
))
175 GEM_BUG_ON(!IS_ALIGNED(vma
->fence_size
, I915_GTT_MIN_ALIGNMENT
));
177 vma
->fence_alignment
= i915_gem_fence_alignment(vm
->i915
, vma
->size
,
178 i915_gem_object_get_tiling(obj
),
179 i915_gem_object_get_stride(obj
));
180 GEM_BUG_ON(!is_power_of_2(vma
->fence_alignment
));
182 __set_bit(I915_VMA_GGTT_BIT
, __i915_vma_flags(vma
));
186 p
= &obj
->vma
.tree
.rb_node
;
191 pos
= rb_entry(rb
, struct i915_vma
, obj_node
);
194 * If the view already exists in the tree, another thread
195 * already created a matching vma, so return the older instance
196 * and dispose of ours.
198 cmp
= i915_vma_compare(pos
, vm
, view
);
206 rb_link_node(&vma
->obj_node
, rb
, p
);
207 rb_insert_color(&vma
->obj_node
, &obj
->vma
.tree
);
209 if (i915_vma_is_ggtt(vma
))
211 * We put the GGTT vma at the start of the vma-list, followed
212 * by the ppGGTT vma. This allows us to break early when
213 * iterating over only the GGTT vma for an object, see
214 * for_each_ggtt_vma()
216 list_add(&vma
->obj_link
, &obj
->vma
.list
);
218 list_add_tail(&vma
->obj_link
, &obj
->vma
.list
);
220 spin_unlock(&obj
->vma
.lock
);
225 spin_unlock(&obj
->vma
.lock
);
232 static struct i915_vma
*
233 vma_lookup(struct drm_i915_gem_object
*obj
,
234 struct i915_address_space
*vm
,
235 const struct i915_ggtt_view
*view
)
239 rb
= obj
->vma
.tree
.rb_node
;
241 struct i915_vma
*vma
= rb_entry(rb
, struct i915_vma
, obj_node
);
244 cmp
= i915_vma_compare(vma
, vm
, view
);
258 * i915_vma_instance - return the singleton instance of the VMA
259 * @obj: parent &struct drm_i915_gem_object to be mapped
260 * @vm: address space in which the mapping is located
261 * @view: additional mapping requirements
263 * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
264 * the same @view characteristics. If a match is not found, one is created.
265 * Once created, the VMA is kept until either the object is freed, or the
266 * address space is closed.
268 * Returns the vma, or an error pointer.
271 i915_vma_instance(struct drm_i915_gem_object
*obj
,
272 struct i915_address_space
*vm
,
273 const struct i915_ggtt_view
*view
)
275 struct i915_vma
*vma
;
277 GEM_BUG_ON(view
&& !i915_is_ggtt(vm
));
278 GEM_BUG_ON(!atomic_read(&vm
->open
));
280 spin_lock(&obj
->vma
.lock
);
281 vma
= vma_lookup(obj
, vm
, view
);
282 spin_unlock(&obj
->vma
.lock
);
284 /* vma_create() will resolve the race if another creates the vma */
286 vma
= vma_create(obj
, vm
, view
);
288 GEM_BUG_ON(!IS_ERR(vma
) && i915_vma_compare(vma
, vm
, view
));
292 struct i915_vma_work
{
293 struct dma_fence_work base
;
294 struct i915_address_space
*vm
;
295 struct i915_vm_pt_stash stash
;
296 struct i915_vma
*vma
;
297 struct drm_i915_gem_object
*pinned
;
298 struct i915_sw_dma_fence_cb cb
;
299 enum i915_cache_level cache_level
;
303 static int __vma_bind(struct dma_fence_work
*work
)
305 struct i915_vma_work
*vw
= container_of(work
, typeof(*vw
), base
);
306 struct i915_vma
*vma
= vw
->vma
;
308 vma
->ops
->bind_vma(vw
->vm
, &vw
->stash
,
309 vma
, vw
->cache_level
, vw
->flags
);
313 static void __vma_release(struct dma_fence_work
*work
)
315 struct i915_vma_work
*vw
= container_of(work
, typeof(*vw
), base
);
318 __i915_gem_object_unpin_pages(vw
->pinned
);
319 i915_gem_object_put(vw
->pinned
);
322 i915_vm_free_pt_stash(vw
->vm
, &vw
->stash
);
326 static const struct dma_fence_work_ops bind_ops
= {
329 .release
= __vma_release
,
332 struct i915_vma_work
*i915_vma_work(void)
334 struct i915_vma_work
*vw
;
336 vw
= kzalloc(sizeof(*vw
), GFP_KERNEL
);
340 dma_fence_work_init(&vw
->base
, &bind_ops
);
341 vw
->base
.dma
.error
= -EAGAIN
; /* disable the worker by default */
346 int i915_vma_wait_for_bind(struct i915_vma
*vma
)
350 if (rcu_access_pointer(vma
->active
.excl
.fence
)) {
351 struct dma_fence
*fence
;
354 fence
= dma_fence_get_rcu_safe(&vma
->active
.excl
.fence
);
357 err
= dma_fence_wait(fence
, MAX_SCHEDULE_TIMEOUT
);
358 dma_fence_put(fence
);
366 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
368 * @cache_level: mapping cache level
369 * @flags: flags like global or local mapping
370 * @work: preallocated worker for allocating and binding the PTE
372 * DMA addresses are taken from the scatter-gather table of this object (or of
373 * this VMA in case of non-default GGTT views) and PTE entries set up.
374 * Note that DMA addresses are also the only part of the SG table we care about.
376 int i915_vma_bind(struct i915_vma
*vma
,
377 enum i915_cache_level cache_level
,
379 struct i915_vma_work
*work
)
384 GEM_BUG_ON(!drm_mm_node_allocated(&vma
->node
));
385 GEM_BUG_ON(vma
->size
> vma
->node
.size
);
387 if (GEM_DEBUG_WARN_ON(range_overflows(vma
->node
.start
,
392 if (GEM_DEBUG_WARN_ON(!flags
))
396 bind_flags
&= I915_VMA_GLOBAL_BIND
| I915_VMA_LOCAL_BIND
;
398 vma_flags
= atomic_read(&vma
->flags
);
399 vma_flags
&= I915_VMA_GLOBAL_BIND
| I915_VMA_LOCAL_BIND
;
401 bind_flags
&= ~vma_flags
;
405 GEM_BUG_ON(!vma
->pages
);
407 trace_i915_vma_bind(vma
, bind_flags
);
408 if (work
&& bind_flags
& vma
->vm
->bind_async_flags
) {
409 struct dma_fence
*prev
;
412 work
->cache_level
= cache_level
;
413 work
->flags
= bind_flags
;
416 * Note we only want to chain up to the migration fence on
417 * the pages (not the object itself). As we don't track that,
418 * yet, we have to use the exclusive fence instead.
420 * Also note that we do not want to track the async vma as
421 * part of the obj->resv->excl_fence as it only affects
422 * execution and not content or object's backing store lifetime.
424 prev
= i915_active_set_exclusive(&vma
->active
, &work
->base
.dma
);
426 __i915_sw_fence_await_dma_fence(&work
->base
.chain
,
432 work
->base
.dma
.error
= 0; /* enable the queue_work() */
435 __i915_gem_object_pin_pages(vma
->obj
);
436 work
->pinned
= i915_gem_object_get(vma
->obj
);
439 vma
->ops
->bind_vma(vma
->vm
, NULL
, vma
, cache_level
, bind_flags
);
442 atomic_or(bind_flags
, &vma
->flags
);
446 void __iomem
*i915_vma_pin_iomap(struct i915_vma
*vma
)
451 if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma
))) {
456 GEM_BUG_ON(!i915_vma_is_ggtt(vma
));
457 GEM_BUG_ON(!i915_vma_is_bound(vma
, I915_VMA_GLOBAL_BIND
));
459 ptr
= READ_ONCE(vma
->iomap
);
461 ptr
= io_mapping_map_wc(&i915_vm_to_ggtt(vma
->vm
)->iomap
,
469 if (unlikely(cmpxchg(&vma
->iomap
, NULL
, ptr
))) {
470 io_mapping_unmap(ptr
);
477 err
= i915_vma_pin_fence(vma
);
481 i915_vma_set_ggtt_write(vma
);
483 /* NB Access through the GTT requires the device to be awake. */
487 __i915_vma_unpin(vma
);
489 return IO_ERR_PTR(err
);
492 void i915_vma_flush_writes(struct i915_vma
*vma
)
494 if (i915_vma_unset_ggtt_write(vma
))
495 intel_gt_flush_ggtt_writes(vma
->vm
->gt
);
498 void i915_vma_unpin_iomap(struct i915_vma
*vma
)
500 GEM_BUG_ON(vma
->iomap
== NULL
);
502 i915_vma_flush_writes(vma
);
504 i915_vma_unpin_fence(vma
);
508 void i915_vma_unpin_and_release(struct i915_vma
**p_vma
, unsigned int flags
)
510 struct i915_vma
*vma
;
511 struct drm_i915_gem_object
*obj
;
513 vma
= fetch_and_zero(p_vma
);
522 if (flags
& I915_VMA_RELEASE_MAP
)
523 i915_gem_object_unpin_map(obj
);
525 i915_gem_object_put(obj
);
528 bool i915_vma_misplaced(const struct i915_vma
*vma
,
529 u64 size
, u64 alignment
, u64 flags
)
531 if (!drm_mm_node_allocated(&vma
->node
))
534 if (test_bit(I915_VMA_ERROR_BIT
, __i915_vma_flags(vma
)))
537 if (vma
->node
.size
< size
)
540 GEM_BUG_ON(alignment
&& !is_power_of_2(alignment
));
541 if (alignment
&& !IS_ALIGNED(vma
->node
.start
, alignment
))
544 if (flags
& PIN_MAPPABLE
&& !i915_vma_is_map_and_fenceable(vma
))
547 if (flags
& PIN_OFFSET_BIAS
&&
548 vma
->node
.start
< (flags
& PIN_OFFSET_MASK
))
551 if (flags
& PIN_OFFSET_FIXED
&&
552 vma
->node
.start
!= (flags
& PIN_OFFSET_MASK
))
558 void __i915_vma_set_map_and_fenceable(struct i915_vma
*vma
)
560 bool mappable
, fenceable
;
562 GEM_BUG_ON(!i915_vma_is_ggtt(vma
));
563 GEM_BUG_ON(!vma
->fence_size
);
565 fenceable
= (vma
->node
.size
>= vma
->fence_size
&&
566 IS_ALIGNED(vma
->node
.start
, vma
->fence_alignment
));
568 mappable
= vma
->node
.start
+ vma
->fence_size
<= i915_vm_to_ggtt(vma
->vm
)->mappable_end
;
570 if (mappable
&& fenceable
)
571 set_bit(I915_VMA_CAN_FENCE_BIT
, __i915_vma_flags(vma
));
573 clear_bit(I915_VMA_CAN_FENCE_BIT
, __i915_vma_flags(vma
));
576 bool i915_gem_valid_gtt_space(struct i915_vma
*vma
, unsigned long color
)
578 struct drm_mm_node
*node
= &vma
->node
;
579 struct drm_mm_node
*other
;
582 * On some machines we have to be careful when putting differing types
583 * of snoopable memory together to avoid the prefetcher crossing memory
584 * domains and dying. During vm initialisation, we decide whether or not
585 * these constraints apply and set the drm_mm.color_adjust
588 if (!i915_vm_has_cache_coloring(vma
->vm
))
591 /* Only valid to be called on an already inserted vma */
592 GEM_BUG_ON(!drm_mm_node_allocated(node
));
593 GEM_BUG_ON(list_empty(&node
->node_list
));
595 other
= list_prev_entry(node
, node_list
);
596 if (i915_node_color_differs(other
, color
) &&
597 !drm_mm_hole_follows(other
))
600 other
= list_next_entry(node
, node_list
);
601 if (i915_node_color_differs(other
, color
) &&
602 !drm_mm_hole_follows(node
))
609 * i915_vma_insert - finds a slot for the vma in its address space
611 * @size: requested size in bytes (can be larger than the VMA)
612 * @alignment: required alignment
613 * @flags: mask of PIN_* flags to use
615 * First we try to allocate some free space that meets the requirements for
616 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
617 * preferrably the oldest idle entry to make room for the new VMA.
620 * 0 on success, negative error code otherwise.
623 i915_vma_insert(struct i915_vma
*vma
, u64 size
, u64 alignment
, u64 flags
)
629 GEM_BUG_ON(i915_vma_is_bound(vma
, I915_VMA_GLOBAL_BIND
| I915_VMA_LOCAL_BIND
));
630 GEM_BUG_ON(drm_mm_node_allocated(&vma
->node
));
632 size
= max(size
, vma
->size
);
633 alignment
= max(alignment
, vma
->display_alignment
);
634 if (flags
& PIN_MAPPABLE
) {
635 size
= max_t(typeof(size
), size
, vma
->fence_size
);
636 alignment
= max_t(typeof(alignment
),
637 alignment
, vma
->fence_alignment
);
640 GEM_BUG_ON(!IS_ALIGNED(size
, I915_GTT_PAGE_SIZE
));
641 GEM_BUG_ON(!IS_ALIGNED(alignment
, I915_GTT_MIN_ALIGNMENT
));
642 GEM_BUG_ON(!is_power_of_2(alignment
));
644 start
= flags
& PIN_OFFSET_BIAS
? flags
& PIN_OFFSET_MASK
: 0;
645 GEM_BUG_ON(!IS_ALIGNED(start
, I915_GTT_PAGE_SIZE
));
647 end
= vma
->vm
->total
;
648 if (flags
& PIN_MAPPABLE
)
649 end
= min_t(u64
, end
, i915_vm_to_ggtt(vma
->vm
)->mappable_end
);
650 if (flags
& PIN_ZONE_4G
)
651 end
= min_t(u64
, end
, (1ULL << 32) - I915_GTT_PAGE_SIZE
);
652 GEM_BUG_ON(!IS_ALIGNED(end
, I915_GTT_PAGE_SIZE
));
654 /* If binding the object/GGTT view requires more space than the entire
655 * aperture has, reject it early before evicting everything in a vain
656 * attempt to find space.
659 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
660 size
, flags
& PIN_MAPPABLE
? "mappable" : "total",
666 if (vma
->obj
&& i915_vm_has_cache_coloring(vma
->vm
))
667 color
= vma
->obj
->cache_level
;
669 if (flags
& PIN_OFFSET_FIXED
) {
670 u64 offset
= flags
& PIN_OFFSET_MASK
;
671 if (!IS_ALIGNED(offset
, alignment
) ||
672 range_overflows(offset
, size
, end
))
675 ret
= i915_gem_gtt_reserve(vma
->vm
, &vma
->node
,
682 * We only support huge gtt pages through the 48b PPGTT,
683 * however we also don't want to force any alignment for
684 * objects which need to be tightly packed into the low 32bits.
686 * Note that we assume that GGTT are limited to 4GiB for the
687 * forseeable future. See also i915_ggtt_offset().
689 if (upper_32_bits(end
- 1) &&
690 vma
->page_sizes
.sg
> I915_GTT_PAGE_SIZE
) {
692 * We can't mix 64K and 4K PTEs in the same page-table
693 * (2M block), and so to avoid the ugliness and
694 * complexity of coloring we opt for just aligning 64K
698 rounddown_pow_of_two(vma
->page_sizes
.sg
|
699 I915_GTT_PAGE_SIZE_2M
);
702 * Check we don't expand for the limited Global GTT
703 * (mappable aperture is even more precious!). This
704 * also checks that we exclude the aliasing-ppgtt.
706 GEM_BUG_ON(i915_vma_is_ggtt(vma
));
708 alignment
= max(alignment
, page_alignment
);
710 if (vma
->page_sizes
.sg
& I915_GTT_PAGE_SIZE_64K
)
711 size
= round_up(size
, I915_GTT_PAGE_SIZE_2M
);
714 ret
= i915_gem_gtt_insert(vma
->vm
, &vma
->node
,
715 size
, alignment
, color
,
720 GEM_BUG_ON(vma
->node
.start
< start
);
721 GEM_BUG_ON(vma
->node
.start
+ vma
->node
.size
> end
);
723 GEM_BUG_ON(!drm_mm_node_allocated(&vma
->node
));
724 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma
, color
));
726 list_add_tail(&vma
->vm_link
, &vma
->vm
->bound_list
);
732 i915_vma_detach(struct i915_vma
*vma
)
734 GEM_BUG_ON(!drm_mm_node_allocated(&vma
->node
));
735 GEM_BUG_ON(i915_vma_is_bound(vma
, I915_VMA_GLOBAL_BIND
| I915_VMA_LOCAL_BIND
));
738 * And finally now the object is completely decoupled from this
739 * vma, we can drop its hold on the backing storage and allow
740 * it to be reaped by the shrinker.
742 list_del(&vma
->vm_link
);
745 static bool try_qad_pin(struct i915_vma
*vma
, unsigned int flags
)
750 bound
= atomic_read(&vma
->flags
);
752 if (unlikely(flags
& ~bound
))
755 if (unlikely(bound
& (I915_VMA_OVERFLOW
| I915_VMA_ERROR
)))
758 if (!(bound
& I915_VMA_PIN_MASK
))
761 GEM_BUG_ON(((bound
+ 1) & I915_VMA_PIN_MASK
) == 0);
762 } while (!atomic_try_cmpxchg(&vma
->flags
, &bound
, bound
+ 1));
768 * If pin_count==0, but we are bound, check under the lock to avoid
769 * racing with a concurrent i915_vma_unbind().
771 mutex_lock(&vma
->vm
->mutex
);
773 if (unlikely(bound
& (I915_VMA_OVERFLOW
| I915_VMA_ERROR
))) {
778 if (unlikely(flags
& ~bound
)) {
782 } while (!atomic_try_cmpxchg(&vma
->flags
, &bound
, bound
+ 1));
783 mutex_unlock(&vma
->vm
->mutex
);
788 static int vma_get_pages(struct i915_vma
*vma
)
792 if (atomic_add_unless(&vma
->pages_count
, 1, 0))
795 /* Allocations ahoy! */
796 if (mutex_lock_interruptible(&vma
->pages_mutex
))
799 if (!atomic_read(&vma
->pages_count
)) {
801 err
= i915_gem_object_pin_pages(vma
->obj
);
806 err
= vma
->ops
->set_pages(vma
);
809 i915_gem_object_unpin_pages(vma
->obj
);
813 atomic_inc(&vma
->pages_count
);
816 mutex_unlock(&vma
->pages_mutex
);
821 static void __vma_put_pages(struct i915_vma
*vma
, unsigned int count
)
823 /* We allocate under vma_get_pages, so beware the shrinker */
824 mutex_lock_nested(&vma
->pages_mutex
, SINGLE_DEPTH_NESTING
);
825 GEM_BUG_ON(atomic_read(&vma
->pages_count
) < count
);
826 if (atomic_sub_return(count
, &vma
->pages_count
) == 0) {
827 vma
->ops
->clear_pages(vma
);
828 GEM_BUG_ON(vma
->pages
);
830 i915_gem_object_unpin_pages(vma
->obj
);
832 mutex_unlock(&vma
->pages_mutex
);
835 static void vma_put_pages(struct i915_vma
*vma
)
837 if (atomic_add_unless(&vma
->pages_count
, -1, 1))
840 __vma_put_pages(vma
, 1);
843 static void vma_unbind_pages(struct i915_vma
*vma
)
847 lockdep_assert_held(&vma
->vm
->mutex
);
849 /* The upper portion of pages_count is the number of bindings */
850 count
= atomic_read(&vma
->pages_count
);
851 count
>>= I915_VMA_PAGES_BIAS
;
854 __vma_put_pages(vma
, count
| count
<< I915_VMA_PAGES_BIAS
);
857 int i915_vma_pin_ww(struct i915_vma
*vma
, struct i915_gem_ww_ctx
*ww
,
858 u64 size
, u64 alignment
, u64 flags
)
860 struct i915_vma_work
*work
= NULL
;
861 intel_wakeref_t wakeref
= 0;
865 #ifdef CONFIG_PROVE_LOCKING
866 if (debug_locks
&& lockdep_is_held(&vma
->vm
->i915
->drm
.struct_mutex
))
870 BUILD_BUG_ON(PIN_GLOBAL
!= I915_VMA_GLOBAL_BIND
);
871 BUILD_BUG_ON(PIN_USER
!= I915_VMA_LOCAL_BIND
);
873 GEM_BUG_ON(!(flags
& (PIN_USER
| PIN_GLOBAL
)));
875 /* First try and grab the pin without rebinding the vma */
876 if (try_qad_pin(vma
, flags
& I915_VMA_BIND_MASK
))
879 err
= vma_get_pages(vma
);
883 if (flags
& PIN_GLOBAL
)
884 wakeref
= intel_runtime_pm_get(&vma
->vm
->i915
->runtime_pm
);
886 if (flags
& vma
->vm
->bind_async_flags
) {
887 work
= i915_vma_work();
893 work
->vm
= i915_vm_get(vma
->vm
);
895 /* Allocate enough page directories to used PTE */
896 if (vma
->vm
->allocate_va_range
) {
897 err
= i915_vm_alloc_pt_stash(vma
->vm
,
903 err
= i915_vm_pin_pt_stash(vma
->vm
,
911 * Differentiate between user/kernel vma inside the aliasing-ppgtt.
913 * We conflate the Global GTT with the user's vma when using the
914 * aliasing-ppgtt, but it is still vitally important to try and
915 * keep the use cases distinct. For example, userptr objects are
916 * not allowed inside the Global GTT as that will cause lock
917 * inversions when we have to evict them the mmu_notifier callbacks -
918 * but they are allowed to be part of the user ppGTT which can never
919 * be mapped. As such we try to give the distinct users of the same
920 * mutex, distinct lockclasses [equivalent to how we keep i915_ggtt
921 * and i915_ppgtt separate].
923 * NB this may cause us to mask real lock inversions -- while the
924 * code is safe today, lockdep may not be able to spot future
927 err
= mutex_lock_interruptible_nested(&vma
->vm
->mutex
,
928 !(flags
& PIN_GLOBAL
));
932 /* No more allocations allowed now we hold vm->mutex */
934 if (unlikely(i915_vma_is_closed(vma
))) {
939 bound
= atomic_read(&vma
->flags
);
940 if (unlikely(bound
& I915_VMA_ERROR
)) {
945 if (unlikely(!((bound
+ 1) & I915_VMA_PIN_MASK
))) {
946 err
= -EAGAIN
; /* pins are meant to be fairly temporary */
950 if (unlikely(!(flags
& ~bound
& I915_VMA_BIND_MASK
))) {
955 err
= i915_active_acquire(&vma
->active
);
959 if (!(bound
& I915_VMA_BIND_MASK
)) {
960 err
= i915_vma_insert(vma
, size
, alignment
, flags
);
964 if (i915_is_ggtt(vma
->vm
))
965 __i915_vma_set_map_and_fenceable(vma
);
968 GEM_BUG_ON(!vma
->pages
);
969 err
= i915_vma_bind(vma
,
970 vma
->obj
? vma
->obj
->cache_level
: 0,
975 /* There should only be at most 2 active bindings (user, global) */
976 GEM_BUG_ON(bound
+ I915_VMA_PAGES_ACTIVE
< bound
);
977 atomic_add(I915_VMA_PAGES_ACTIVE
, &vma
->pages_count
);
978 list_move_tail(&vma
->vm_link
, &vma
->vm
->bound_list
);
981 GEM_BUG_ON(!i915_vma_is_pinned(vma
));
982 GEM_BUG_ON(!i915_vma_is_bound(vma
, flags
));
983 GEM_BUG_ON(i915_vma_misplaced(vma
, size
, alignment
, flags
));
986 if (!i915_vma_is_bound(vma
, I915_VMA_BIND_MASK
)) {
987 i915_vma_detach(vma
);
988 drm_mm_remove_node(&vma
->node
);
991 i915_active_release(&vma
->active
);
993 mutex_unlock(&vma
->vm
->mutex
);
996 dma_fence_work_commit_imm(&work
->base
);
999 intel_runtime_pm_put(&vma
->vm
->i915
->runtime_pm
, wakeref
);
1004 static void flush_idle_contexts(struct intel_gt
*gt
)
1006 struct intel_engine_cs
*engine
;
1007 enum intel_engine_id id
;
1009 for_each_engine(engine
, gt
, id
)
1010 intel_engine_flush_barriers(engine
);
1012 intel_gt_wait_for_idle(gt
, MAX_SCHEDULE_TIMEOUT
);
1015 int i915_ggtt_pin(struct i915_vma
*vma
, struct i915_gem_ww_ctx
*ww
,
1016 u32 align
, unsigned int flags
)
1018 struct i915_address_space
*vm
= vma
->vm
;
1021 GEM_BUG_ON(!i915_vma_is_ggtt(vma
));
1024 err
= i915_vma_pin_ww(vma
, ww
, 0, align
, flags
| PIN_GLOBAL
);
1025 if (err
!= -ENOSPC
) {
1027 err
= i915_vma_wait_for_bind(vma
);
1029 i915_vma_unpin(vma
);
1034 /* Unlike i915_vma_pin, we don't take no for an answer! */
1035 flush_idle_contexts(vm
->gt
);
1036 if (mutex_lock_interruptible(&vm
->mutex
) == 0) {
1037 i915_gem_evict_vm(vm
);
1038 mutex_unlock(&vm
->mutex
);
1043 static void __vma_close(struct i915_vma
*vma
, struct intel_gt
*gt
)
1046 * We defer actually closing, unbinding and destroying the VMA until
1047 * the next idle point, or if the object is freed in the meantime. By
1048 * postponing the unbind, we allow for it to be resurrected by the
1049 * client, avoiding the work required to rebind the VMA. This is
1050 * advantageous for DRI, where the client/server pass objects
1051 * between themselves, temporarily opening a local VMA to the
1052 * object, and then closing it again. The same object is then reused
1053 * on the next frame (or two, depending on the depth of the swap queue)
1054 * causing us to rebind the VMA once more. This ends up being a lot
1055 * of wasted work for the steady state.
1057 GEM_BUG_ON(i915_vma_is_closed(vma
));
1058 list_add(&vma
->closed_link
, >
->closed_vma
);
1061 void i915_vma_close(struct i915_vma
*vma
)
1063 struct intel_gt
*gt
= vma
->vm
->gt
;
1064 unsigned long flags
;
1066 if (i915_vma_is_ggtt(vma
))
1069 GEM_BUG_ON(!atomic_read(&vma
->open_count
));
1070 if (atomic_dec_and_lock_irqsave(&vma
->open_count
,
1073 __vma_close(vma
, gt
);
1074 spin_unlock_irqrestore(>
->closed_lock
, flags
);
1078 static void __i915_vma_remove_closed(struct i915_vma
*vma
)
1080 struct intel_gt
*gt
= vma
->vm
->gt
;
1082 spin_lock_irq(>
->closed_lock
);
1083 list_del_init(&vma
->closed_link
);
1084 spin_unlock_irq(>
->closed_lock
);
1087 void i915_vma_reopen(struct i915_vma
*vma
)
1089 if (i915_vma_is_closed(vma
))
1090 __i915_vma_remove_closed(vma
);
1093 void i915_vma_release(struct kref
*ref
)
1095 struct i915_vma
*vma
= container_of(ref
, typeof(*vma
), ref
);
1097 if (drm_mm_node_allocated(&vma
->node
)) {
1098 mutex_lock(&vma
->vm
->mutex
);
1099 atomic_and(~I915_VMA_PIN_MASK
, &vma
->flags
);
1100 WARN_ON(__i915_vma_unbind(vma
));
1101 mutex_unlock(&vma
->vm
->mutex
);
1102 GEM_BUG_ON(drm_mm_node_allocated(&vma
->node
));
1104 GEM_BUG_ON(i915_vma_is_active(vma
));
1107 struct drm_i915_gem_object
*obj
= vma
->obj
;
1109 spin_lock(&obj
->vma
.lock
);
1110 list_del(&vma
->obj_link
);
1111 if (!RB_EMPTY_NODE(&vma
->obj_node
))
1112 rb_erase(&vma
->obj_node
, &obj
->vma
.tree
);
1113 spin_unlock(&obj
->vma
.lock
);
1116 __i915_vma_remove_closed(vma
);
1117 i915_vm_put(vma
->vm
);
1119 i915_active_fini(&vma
->active
);
1123 void i915_vma_parked(struct intel_gt
*gt
)
1125 struct i915_vma
*vma
, *next
;
1128 spin_lock_irq(>
->closed_lock
);
1129 list_for_each_entry_safe(vma
, next
, >
->closed_vma
, closed_link
) {
1130 struct drm_i915_gem_object
*obj
= vma
->obj
;
1131 struct i915_address_space
*vm
= vma
->vm
;
1133 /* XXX All to avoid keeping a reference on i915_vma itself */
1135 if (!kref_get_unless_zero(&obj
->base
.refcount
))
1138 if (!i915_vm_tryopen(vm
)) {
1139 i915_gem_object_put(obj
);
1143 list_move(&vma
->closed_link
, &closed
);
1145 spin_unlock_irq(>
->closed_lock
);
1147 /* As the GT is held idle, no vma can be reopened as we destroy them */
1148 list_for_each_entry_safe(vma
, next
, &closed
, closed_link
) {
1149 struct drm_i915_gem_object
*obj
= vma
->obj
;
1150 struct i915_address_space
*vm
= vma
->vm
;
1152 INIT_LIST_HEAD(&vma
->closed_link
);
1153 __i915_vma_put(vma
);
1155 i915_gem_object_put(obj
);
1160 static void __i915_vma_iounmap(struct i915_vma
*vma
)
1162 GEM_BUG_ON(i915_vma_is_pinned(vma
));
1164 if (vma
->iomap
== NULL
)
1167 io_mapping_unmap(vma
->iomap
);
1171 void i915_vma_revoke_mmap(struct i915_vma
*vma
)
1173 struct drm_vma_offset_node
*node
;
1176 if (!i915_vma_has_userfault(vma
))
1179 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma
));
1180 GEM_BUG_ON(!vma
->obj
->userfault_count
);
1182 node
= &vma
->mmo
->vma_node
;
1183 vma_offset
= vma
->ggtt_view
.partial
.offset
<< PAGE_SHIFT
;
1184 unmap_mapping_range(vma
->vm
->i915
->drm
.anon_inode
->i_mapping
,
1185 drm_vma_node_offset_addr(node
) + vma_offset
,
1189 i915_vma_unset_userfault(vma
);
1190 if (!--vma
->obj
->userfault_count
)
1191 list_del(&vma
->obj
->userfault_link
);
1195 __i915_request_await_bind(struct i915_request
*rq
, struct i915_vma
*vma
)
1197 return __i915_request_await_exclusive(rq
, &vma
->active
);
1200 int __i915_vma_move_to_active(struct i915_vma
*vma
, struct i915_request
*rq
)
1204 GEM_BUG_ON(!i915_vma_is_pinned(vma
));
1206 /* Wait for the vma to be bound before we start! */
1207 err
= __i915_request_await_bind(rq
, vma
);
1211 return i915_active_add_request(&vma
->active
, rq
);
1214 int i915_vma_move_to_active(struct i915_vma
*vma
,
1215 struct i915_request
*rq
,
1218 struct drm_i915_gem_object
*obj
= vma
->obj
;
1221 assert_object_held(obj
);
1223 err
= __i915_vma_move_to_active(vma
, rq
);
1227 if (flags
& EXEC_OBJECT_WRITE
) {
1228 struct intel_frontbuffer
*front
;
1230 front
= __intel_frontbuffer_get(obj
);
1231 if (unlikely(front
)) {
1232 if (intel_frontbuffer_invalidate(front
, ORIGIN_CS
))
1233 i915_active_add_request(&front
->write
, rq
);
1234 intel_frontbuffer_put(front
);
1237 dma_resv_add_excl_fence(vma
->resv
, &rq
->fence
);
1238 obj
->write_domain
= I915_GEM_DOMAIN_RENDER
;
1239 obj
->read_domains
= 0;
1241 err
= dma_resv_reserve_shared(vma
->resv
, 1);
1245 dma_resv_add_shared_fence(vma
->resv
, &rq
->fence
);
1246 obj
->write_domain
= 0;
1249 if (flags
& EXEC_OBJECT_NEEDS_FENCE
&& vma
->fence
)
1250 i915_active_add_request(&vma
->fence
->active
, rq
);
1252 obj
->read_domains
|= I915_GEM_GPU_DOMAINS
;
1253 obj
->mm
.dirty
= true;
1255 GEM_BUG_ON(!i915_vma_is_active(vma
));
1259 void __i915_vma_evict(struct i915_vma
*vma
)
1261 GEM_BUG_ON(i915_vma_is_pinned(vma
));
1263 if (i915_vma_is_map_and_fenceable(vma
)) {
1264 /* Force a pagefault for domain tracking on next user access */
1265 i915_vma_revoke_mmap(vma
);
1268 * Check that we have flushed all writes through the GGTT
1269 * before the unbind, other due to non-strict nature of those
1270 * indirect writes they may end up referencing the GGTT PTE
1273 * Note that we may be concurrently poking at the GGTT_WRITE
1274 * bit from set-domain, as we mark all GGTT vma associated
1275 * with an object. We know this is for another vma, as we
1276 * are currently unbinding this one -- so if this vma will be
1277 * reused, it will be refaulted and have its dirty bit set
1278 * before the next write.
1280 i915_vma_flush_writes(vma
);
1282 /* release the fence reg _after_ flushing */
1283 i915_vma_revoke_fence(vma
);
1285 __i915_vma_iounmap(vma
);
1286 clear_bit(I915_VMA_CAN_FENCE_BIT
, __i915_vma_flags(vma
));
1288 GEM_BUG_ON(vma
->fence
);
1289 GEM_BUG_ON(i915_vma_has_userfault(vma
));
1291 if (likely(atomic_read(&vma
->vm
->open
))) {
1292 trace_i915_vma_unbind(vma
);
1293 vma
->ops
->unbind_vma(vma
->vm
, vma
);
1295 atomic_and(~(I915_VMA_BIND_MASK
| I915_VMA_ERROR
| I915_VMA_GGTT_WRITE
),
1298 i915_vma_detach(vma
);
1299 vma_unbind_pages(vma
);
1302 int __i915_vma_unbind(struct i915_vma
*vma
)
1306 lockdep_assert_held(&vma
->vm
->mutex
);
1308 if (!drm_mm_node_allocated(&vma
->node
))
1311 if (i915_vma_is_pinned(vma
)) {
1312 vma_print_allocator(vma
, "is pinned");
1317 * After confirming that no one else is pinning this vma, wait for
1318 * any laggards who may have crept in during the wait (through
1319 * a residual pin skipping the vm->mutex) to complete.
1321 ret
= i915_vma_sync(vma
);
1325 GEM_BUG_ON(i915_vma_is_active(vma
));
1326 __i915_vma_evict(vma
);
1328 drm_mm_remove_node(&vma
->node
); /* pairs with i915_vma_release() */
1332 int i915_vma_unbind(struct i915_vma
*vma
)
1334 struct i915_address_space
*vm
= vma
->vm
;
1335 intel_wakeref_t wakeref
= 0;
1338 /* Optimistic wait before taking the mutex */
1339 err
= i915_vma_sync(vma
);
1343 if (!drm_mm_node_allocated(&vma
->node
))
1346 if (i915_vma_is_pinned(vma
)) {
1347 vma_print_allocator(vma
, "is pinned");
1351 if (i915_vma_is_bound(vma
, I915_VMA_GLOBAL_BIND
))
1352 /* XXX not always required: nop_clear_range */
1353 wakeref
= intel_runtime_pm_get(&vm
->i915
->runtime_pm
);
1355 err
= mutex_lock_interruptible_nested(&vma
->vm
->mutex
, !wakeref
);
1359 err
= __i915_vma_unbind(vma
);
1360 mutex_unlock(&vm
->mutex
);
1364 intel_runtime_pm_put(&vm
->i915
->runtime_pm
, wakeref
);
1368 struct i915_vma
*i915_vma_make_unshrinkable(struct i915_vma
*vma
)
1370 i915_gem_object_make_unshrinkable(vma
->obj
);
1374 void i915_vma_make_shrinkable(struct i915_vma
*vma
)
1376 i915_gem_object_make_shrinkable(vma
->obj
);
1379 void i915_vma_make_purgeable(struct i915_vma
*vma
)
1381 i915_gem_object_make_purgeable(vma
->obj
);
1384 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1385 #include "selftests/i915_vma.c"
1388 static void i915_global_vma_shrink(void)
1390 kmem_cache_shrink(global
.slab_vmas
);
1393 static void i915_global_vma_exit(void)
1395 kmem_cache_destroy(global
.slab_vmas
);
1398 static struct i915_global_vma global
= { {
1399 .shrink
= i915_global_vma_shrink
,
1400 .exit
= i915_global_vma_exit
,
1403 int __init
i915_global_vma_init(void)
1405 global
.slab_vmas
= KMEM_CACHE(i915_vma
, SLAB_HWCACHE_ALIGN
);
1406 if (!global
.slab_vmas
)
1409 i915_global_register(&global
.base
);