2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "intel_ringbuffer.h"
29 #include "intel_frontbuffer.h"
31 #include <drm/drm_gem.h>
34 i915_vma_retire(struct i915_gem_active
*active
,
35 struct drm_i915_gem_request
*rq
)
37 const unsigned int idx
= rq
->engine
->id
;
38 struct i915_vma
*vma
=
39 container_of(active
, struct i915_vma
, last_read
[idx
]);
40 struct drm_i915_gem_object
*obj
= vma
->obj
;
42 GEM_BUG_ON(!i915_vma_has_active_engine(vma
, idx
));
44 i915_vma_clear_active(vma
, idx
);
45 if (i915_vma_is_active(vma
))
48 GEM_BUG_ON(!drm_mm_node_allocated(&vma
->node
));
49 list_move_tail(&vma
->vm_link
, &vma
->vm
->inactive_list
);
50 if (unlikely(i915_vma_is_closed(vma
) && !i915_vma_is_pinned(vma
)))
51 WARN_ON(i915_vma_unbind(vma
));
53 GEM_BUG_ON(!i915_gem_object_is_active(obj
));
54 if (--obj
->active_count
)
57 /* Prune the shared fence arrays iff completely idle (inc. external) */
58 if (reservation_object_trylock(obj
->resv
)) {
59 if (reservation_object_test_signaled_rcu(obj
->resv
, true))
60 reservation_object_add_excl_fence(obj
->resv
, NULL
);
61 reservation_object_unlock(obj
->resv
);
64 /* Bump our place on the bound list to keep it roughly in LRU order
65 * so that we don't steal from recently used but inactive objects
66 * (unless we are forced to ofc!)
68 spin_lock(&rq
->i915
->mm
.obj_lock
);
70 list_move_tail(&obj
->mm
.link
, &rq
->i915
->mm
.bound_list
);
71 spin_unlock(&rq
->i915
->mm
.obj_lock
);
73 obj
->mm
.dirty
= true; /* be paranoid */
75 if (i915_gem_object_has_active_reference(obj
)) {
76 i915_gem_object_clear_active_reference(obj
);
77 i915_gem_object_put(obj
);
81 static struct i915_vma
*
82 vma_create(struct drm_i915_gem_object
*obj
,
83 struct i915_address_space
*vm
,
84 const struct i915_ggtt_view
*view
)
87 struct rb_node
*rb
, **p
;
90 /* The aliasing_ppgtt should never be used directly! */
91 GEM_BUG_ON(vm
== &vm
->i915
->mm
.aliasing_ppgtt
->base
);
93 vma
= kmem_cache_zalloc(vm
->i915
->vmas
, GFP_KERNEL
);
95 return ERR_PTR(-ENOMEM
);
97 for (i
= 0; i
< ARRAY_SIZE(vma
->last_read
); i
++)
98 init_request_active(&vma
->last_read
[i
], i915_vma_retire
);
99 init_request_active(&vma
->last_fence
, NULL
);
102 vma
->resv
= obj
->resv
;
103 vma
->size
= obj
->base
.size
;
104 vma
->display_alignment
= I915_GTT_MIN_ALIGNMENT
;
106 if (view
&& view
->type
!= I915_GGTT_VIEW_NORMAL
) {
107 vma
->ggtt_view
= *view
;
108 if (view
->type
== I915_GGTT_VIEW_PARTIAL
) {
109 GEM_BUG_ON(range_overflows_t(u64
,
110 view
->partial
.offset
,
112 obj
->base
.size
>> PAGE_SHIFT
));
113 vma
->size
= view
->partial
.size
;
114 vma
->size
<<= PAGE_SHIFT
;
115 GEM_BUG_ON(vma
->size
>= obj
->base
.size
);
116 } else if (view
->type
== I915_GGTT_VIEW_ROTATED
) {
117 vma
->size
= intel_rotation_info_size(&view
->rotated
);
118 vma
->size
<<= PAGE_SHIFT
;
122 if (unlikely(vma
->size
> vm
->total
))
125 GEM_BUG_ON(!IS_ALIGNED(vma
->size
, I915_GTT_PAGE_SIZE
));
127 if (i915_is_ggtt(vm
)) {
128 if (unlikely(overflows_type(vma
->size
, u32
)))
131 vma
->fence_size
= i915_gem_fence_size(vm
->i915
, vma
->size
,
132 i915_gem_object_get_tiling(obj
),
133 i915_gem_object_get_stride(obj
));
134 if (unlikely(vma
->fence_size
< vma
->size
|| /* overflow */
135 vma
->fence_size
> vm
->total
))
138 GEM_BUG_ON(!IS_ALIGNED(vma
->fence_size
, I915_GTT_MIN_ALIGNMENT
));
140 vma
->fence_alignment
= i915_gem_fence_alignment(vm
->i915
, vma
->size
,
141 i915_gem_object_get_tiling(obj
),
142 i915_gem_object_get_stride(obj
));
143 GEM_BUG_ON(!is_power_of_2(vma
->fence_alignment
));
146 * We put the GGTT vma at the start of the vma-list, followed
147 * by the ppGGTT vma. This allows us to break early when
148 * iterating over only the GGTT vma for an object, see
149 * for_each_ggtt_vma()
151 vma
->flags
|= I915_VMA_GGTT
;
152 list_add(&vma
->obj_link
, &obj
->vma_list
);
154 i915_ppgtt_get(i915_vm_to_ppgtt(vm
));
155 list_add_tail(&vma
->obj_link
, &obj
->vma_list
);
159 p
= &obj
->vma_tree
.rb_node
;
161 struct i915_vma
*pos
;
164 pos
= rb_entry(rb
, struct i915_vma
, obj_node
);
165 if (i915_vma_compare(pos
, vm
, view
) < 0)
170 rb_link_node(&vma
->obj_node
, rb
, p
);
171 rb_insert_color(&vma
->obj_node
, &obj
->vma_tree
);
172 list_add(&vma
->vm_link
, &vm
->unbound_list
);
177 kmem_cache_free(vm
->i915
->vmas
, vma
);
178 return ERR_PTR(-E2BIG
);
181 static struct i915_vma
*
182 vma_lookup(struct drm_i915_gem_object
*obj
,
183 struct i915_address_space
*vm
,
184 const struct i915_ggtt_view
*view
)
188 rb
= obj
->vma_tree
.rb_node
;
190 struct i915_vma
*vma
= rb_entry(rb
, struct i915_vma
, obj_node
);
193 cmp
= i915_vma_compare(vma
, vm
, view
);
207 * i915_vma_instance - return the singleton instance of the VMA
208 * @obj: parent &struct drm_i915_gem_object to be mapped
209 * @vm: address space in which the mapping is located
210 * @view: additional mapping requirements
212 * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
213 * the same @view characteristics. If a match is not found, one is created.
214 * Once created, the VMA is kept until either the object is freed, or the
215 * address space is closed.
217 * Must be called with struct_mutex held.
219 * Returns the vma, or an error pointer.
222 i915_vma_instance(struct drm_i915_gem_object
*obj
,
223 struct i915_address_space
*vm
,
224 const struct i915_ggtt_view
*view
)
226 struct i915_vma
*vma
;
228 lockdep_assert_held(&obj
->base
.dev
->struct_mutex
);
229 GEM_BUG_ON(view
&& !i915_is_ggtt(vm
));
230 GEM_BUG_ON(vm
->closed
);
232 vma
= vma_lookup(obj
, vm
, view
);
234 vma
= vma_create(obj
, vm
, view
);
236 GEM_BUG_ON(!IS_ERR(vma
) && i915_vma_is_closed(vma
));
237 GEM_BUG_ON(!IS_ERR(vma
) && i915_vma_compare(vma
, vm
, view
));
238 GEM_BUG_ON(!IS_ERR(vma
) && vma_lookup(obj
, vm
, view
) != vma
);
243 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
245 * @cache_level: mapping cache level
246 * @flags: flags like global or local mapping
248 * DMA addresses are taken from the scatter-gather table of this object (or of
249 * this VMA in case of non-default GGTT views) and PTE entries set up.
250 * Note that DMA addresses are also the only part of the SG table we care about.
252 int i915_vma_bind(struct i915_vma
*vma
, enum i915_cache_level cache_level
,
259 GEM_BUG_ON(!drm_mm_node_allocated(&vma
->node
));
260 GEM_BUG_ON(vma
->size
> vma
->node
.size
);
262 if (GEM_WARN_ON(range_overflows(vma
->node
.start
,
267 if (GEM_WARN_ON(!flags
))
271 if (flags
& PIN_GLOBAL
)
272 bind_flags
|= I915_VMA_GLOBAL_BIND
;
273 if (flags
& PIN_USER
)
274 bind_flags
|= I915_VMA_LOCAL_BIND
;
276 vma_flags
= vma
->flags
& (I915_VMA_GLOBAL_BIND
| I915_VMA_LOCAL_BIND
);
277 if (flags
& PIN_UPDATE
)
278 bind_flags
|= vma_flags
;
280 bind_flags
&= ~vma_flags
;
284 GEM_BUG_ON(!vma
->pages
);
286 trace_i915_vma_bind(vma
, bind_flags
);
287 ret
= vma
->vm
->bind_vma(vma
, cache_level
, bind_flags
);
291 vma
->flags
|= bind_flags
;
295 void __iomem
*i915_vma_pin_iomap(struct i915_vma
*vma
)
300 /* Access through the GTT requires the device to be awake. */
301 assert_rpm_wakelock_held(vma
->vm
->i915
);
303 lockdep_assert_held(&vma
->vm
->i915
->drm
.struct_mutex
);
304 if (WARN_ON(!i915_vma_is_map_and_fenceable(vma
))) {
309 GEM_BUG_ON(!i915_vma_is_ggtt(vma
));
310 GEM_BUG_ON((vma
->flags
& I915_VMA_GLOBAL_BIND
) == 0);
314 ptr
= io_mapping_map_wc(&i915_vm_to_ggtt(vma
->vm
)->iomap
,
327 err
= i915_vma_pin_fence(vma
);
331 i915_vma_set_ggtt_write(vma
);
335 __i915_vma_unpin(vma
);
337 return IO_ERR_PTR(err
);
340 void i915_vma_flush_writes(struct i915_vma
*vma
)
342 if (!i915_vma_has_ggtt_write(vma
))
345 i915_gem_flush_ggtt_writes(vma
->vm
->i915
);
347 i915_vma_unset_ggtt_write(vma
);
350 void i915_vma_unpin_iomap(struct i915_vma
*vma
)
352 lockdep_assert_held(&vma
->obj
->base
.dev
->struct_mutex
);
354 GEM_BUG_ON(vma
->iomap
== NULL
);
356 i915_vma_flush_writes(vma
);
358 i915_vma_unpin_fence(vma
);
362 void i915_vma_unpin_and_release(struct i915_vma
**p_vma
)
364 struct i915_vma
*vma
;
365 struct drm_i915_gem_object
*obj
;
367 vma
= fetch_and_zero(p_vma
);
376 __i915_gem_object_release_unless_active(obj
);
379 bool i915_vma_misplaced(const struct i915_vma
*vma
,
380 u64 size
, u64 alignment
, u64 flags
)
382 if (!drm_mm_node_allocated(&vma
->node
))
385 if (vma
->node
.size
< size
)
388 GEM_BUG_ON(alignment
&& !is_power_of_2(alignment
));
389 if (alignment
&& !IS_ALIGNED(vma
->node
.start
, alignment
))
392 if (flags
& PIN_MAPPABLE
&& !i915_vma_is_map_and_fenceable(vma
))
395 if (flags
& PIN_OFFSET_BIAS
&&
396 vma
->node
.start
< (flags
& PIN_OFFSET_MASK
))
399 if (flags
& PIN_OFFSET_FIXED
&&
400 vma
->node
.start
!= (flags
& PIN_OFFSET_MASK
))
406 void __i915_vma_set_map_and_fenceable(struct i915_vma
*vma
)
408 bool mappable
, fenceable
;
410 GEM_BUG_ON(!i915_vma_is_ggtt(vma
));
411 GEM_BUG_ON(!vma
->fence_size
);
414 * Explicitly disable for rotated VMA since the display does not
415 * need the fence and the VMA is not accessible to other users.
417 if (vma
->ggtt_view
.type
== I915_GGTT_VIEW_ROTATED
)
420 fenceable
= (vma
->node
.size
>= vma
->fence_size
&&
421 IS_ALIGNED(vma
->node
.start
, vma
->fence_alignment
));
423 mappable
= vma
->node
.start
+ vma
->fence_size
<= i915_vm_to_ggtt(vma
->vm
)->mappable_end
;
425 if (mappable
&& fenceable
)
426 vma
->flags
|= I915_VMA_CAN_FENCE
;
428 vma
->flags
&= ~I915_VMA_CAN_FENCE
;
431 static bool color_differs(struct drm_mm_node
*node
, unsigned long color
)
433 return node
->allocated
&& node
->color
!= color
;
436 bool i915_gem_valid_gtt_space(struct i915_vma
*vma
, unsigned long cache_level
)
438 struct drm_mm_node
*node
= &vma
->node
;
439 struct drm_mm_node
*other
;
442 * On some machines we have to be careful when putting differing types
443 * of snoopable memory together to avoid the prefetcher crossing memory
444 * domains and dying. During vm initialisation, we decide whether or not
445 * these constraints apply and set the drm_mm.color_adjust
448 if (vma
->vm
->mm
.color_adjust
== NULL
)
451 /* Only valid to be called on an already inserted vma */
452 GEM_BUG_ON(!drm_mm_node_allocated(node
));
453 GEM_BUG_ON(list_empty(&node
->node_list
));
455 other
= list_prev_entry(node
, node_list
);
456 if (color_differs(other
, cache_level
) && !drm_mm_hole_follows(other
))
459 other
= list_next_entry(node
, node_list
);
460 if (color_differs(other
, cache_level
) && !drm_mm_hole_follows(node
))
467 * i915_vma_insert - finds a slot for the vma in its address space
469 * @size: requested size in bytes (can be larger than the VMA)
470 * @alignment: required alignment
471 * @flags: mask of PIN_* flags to use
473 * First we try to allocate some free space that meets the requirements for
474 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
475 * preferrably the oldest idle entry to make room for the new VMA.
478 * 0 on success, negative error code otherwise.
481 i915_vma_insert(struct i915_vma
*vma
, u64 size
, u64 alignment
, u64 flags
)
483 struct drm_i915_private
*dev_priv
= vma
->vm
->i915
;
484 struct drm_i915_gem_object
*obj
= vma
->obj
;
488 GEM_BUG_ON(i915_vma_is_closed(vma
));
489 GEM_BUG_ON(vma
->flags
& (I915_VMA_GLOBAL_BIND
| I915_VMA_LOCAL_BIND
));
490 GEM_BUG_ON(drm_mm_node_allocated(&vma
->node
));
492 size
= max(size
, vma
->size
);
493 alignment
= max(alignment
, vma
->display_alignment
);
494 if (flags
& PIN_MAPPABLE
) {
495 size
= max_t(typeof(size
), size
, vma
->fence_size
);
496 alignment
= max_t(typeof(alignment
),
497 alignment
, vma
->fence_alignment
);
500 GEM_BUG_ON(!IS_ALIGNED(size
, I915_GTT_PAGE_SIZE
));
501 GEM_BUG_ON(!IS_ALIGNED(alignment
, I915_GTT_MIN_ALIGNMENT
));
502 GEM_BUG_ON(!is_power_of_2(alignment
));
504 start
= flags
& PIN_OFFSET_BIAS
? flags
& PIN_OFFSET_MASK
: 0;
505 GEM_BUG_ON(!IS_ALIGNED(start
, I915_GTT_PAGE_SIZE
));
507 end
= vma
->vm
->total
;
508 if (flags
& PIN_MAPPABLE
)
509 end
= min_t(u64
, end
, dev_priv
->ggtt
.mappable_end
);
510 if (flags
& PIN_ZONE_4G
)
511 end
= min_t(u64
, end
, (1ULL << 32) - I915_GTT_PAGE_SIZE
);
512 GEM_BUG_ON(!IS_ALIGNED(end
, I915_GTT_PAGE_SIZE
));
514 /* If binding the object/GGTT view requires more space than the entire
515 * aperture has, reject it early before evicting everything in a vain
516 * attempt to find space.
519 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
520 size
, obj
->base
.size
,
521 flags
& PIN_MAPPABLE
? "mappable" : "total",
526 ret
= i915_gem_object_pin_pages(obj
);
530 GEM_BUG_ON(vma
->pages
);
532 ret
= vma
->vm
->set_pages(vma
);
536 if (flags
& PIN_OFFSET_FIXED
) {
537 u64 offset
= flags
& PIN_OFFSET_MASK
;
538 if (!IS_ALIGNED(offset
, alignment
) ||
539 range_overflows(offset
, size
, end
)) {
544 ret
= i915_gem_gtt_reserve(vma
->vm
, &vma
->node
,
545 size
, offset
, obj
->cache_level
,
551 * We only support huge gtt pages through the 48b PPGTT,
552 * however we also don't want to force any alignment for
553 * objects which need to be tightly packed into the low 32bits.
555 * Note that we assume that GGTT are limited to 4GiB for the
556 * forseeable future. See also i915_ggtt_offset().
558 if (upper_32_bits(end
- 1) &&
559 vma
->page_sizes
.sg
> I915_GTT_PAGE_SIZE
) {
561 * We can't mix 64K and 4K PTEs in the same page-table
562 * (2M block), and so to avoid the ugliness and
563 * complexity of coloring we opt for just aligning 64K
567 rounddown_pow_of_two(vma
->page_sizes
.sg
|
568 I915_GTT_PAGE_SIZE_2M
);
571 * Check we don't expand for the limited Global GTT
572 * (mappable aperture is even more precious!). This
573 * also checks that we exclude the aliasing-ppgtt.
575 GEM_BUG_ON(i915_vma_is_ggtt(vma
));
577 alignment
= max(alignment
, page_alignment
);
579 if (vma
->page_sizes
.sg
& I915_GTT_PAGE_SIZE_64K
)
580 size
= round_up(size
, I915_GTT_PAGE_SIZE_2M
);
583 ret
= i915_gem_gtt_insert(vma
->vm
, &vma
->node
,
584 size
, alignment
, obj
->cache_level
,
589 GEM_BUG_ON(vma
->node
.start
< start
);
590 GEM_BUG_ON(vma
->node
.start
+ vma
->node
.size
> end
);
592 GEM_BUG_ON(!drm_mm_node_allocated(&vma
->node
));
593 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma
, obj
->cache_level
));
595 list_move_tail(&vma
->vm_link
, &vma
->vm
->inactive_list
);
597 spin_lock(&dev_priv
->mm
.obj_lock
);
598 list_move_tail(&obj
->mm
.link
, &dev_priv
->mm
.bound_list
);
600 spin_unlock(&dev_priv
->mm
.obj_lock
);
602 GEM_BUG_ON(atomic_read(&obj
->mm
.pages_pin_count
) < obj
->bind_count
);
607 vma
->vm
->clear_pages(vma
);
609 i915_gem_object_unpin_pages(obj
);
614 i915_vma_remove(struct i915_vma
*vma
)
616 struct drm_i915_private
*i915
= vma
->vm
->i915
;
617 struct drm_i915_gem_object
*obj
= vma
->obj
;
619 GEM_BUG_ON(!drm_mm_node_allocated(&vma
->node
));
620 GEM_BUG_ON(vma
->flags
& (I915_VMA_GLOBAL_BIND
| I915_VMA_LOCAL_BIND
));
622 vma
->vm
->clear_pages(vma
);
624 drm_mm_remove_node(&vma
->node
);
625 list_move_tail(&vma
->vm_link
, &vma
->vm
->unbound_list
);
627 /* Since the unbound list is global, only move to that list if
628 * no more VMAs exist.
630 spin_lock(&i915
->mm
.obj_lock
);
631 if (--obj
->bind_count
== 0)
632 list_move_tail(&obj
->mm
.link
, &i915
->mm
.unbound_list
);
633 spin_unlock(&i915
->mm
.obj_lock
);
635 /* And finally now the object is completely decoupled from this vma,
636 * we can drop its hold on the backing storage and allow it to be
637 * reaped by the shrinker.
639 i915_gem_object_unpin_pages(obj
);
640 GEM_BUG_ON(atomic_read(&obj
->mm
.pages_pin_count
) < obj
->bind_count
);
643 int __i915_vma_do_pin(struct i915_vma
*vma
,
644 u64 size
, u64 alignment
, u64 flags
)
646 const unsigned int bound
= vma
->flags
;
649 lockdep_assert_held(&vma
->vm
->i915
->drm
.struct_mutex
);
650 GEM_BUG_ON((flags
& (PIN_GLOBAL
| PIN_USER
)) == 0);
651 GEM_BUG_ON((flags
& PIN_GLOBAL
) && !i915_vma_is_ggtt(vma
));
653 if (WARN_ON(bound
& I915_VMA_PIN_OVERFLOW
)) {
658 if ((bound
& I915_VMA_BIND_MASK
) == 0) {
659 ret
= i915_vma_insert(vma
, size
, alignment
, flags
);
663 GEM_BUG_ON(!drm_mm_node_allocated(&vma
->node
));
665 ret
= i915_vma_bind(vma
, vma
->obj
->cache_level
, flags
);
669 GEM_BUG_ON((vma
->flags
& I915_VMA_BIND_MASK
) == 0);
671 if ((bound
^ vma
->flags
) & I915_VMA_GLOBAL_BIND
)
672 __i915_vma_set_map_and_fenceable(vma
);
674 GEM_BUG_ON(i915_vma_misplaced(vma
, size
, alignment
, flags
));
678 if ((bound
& I915_VMA_BIND_MASK
) == 0) {
679 i915_vma_remove(vma
);
680 GEM_BUG_ON(vma
->pages
);
681 GEM_BUG_ON(vma
->flags
& I915_VMA_BIND_MASK
);
684 __i915_vma_unpin(vma
);
688 static void i915_vma_destroy(struct i915_vma
*vma
)
692 GEM_BUG_ON(vma
->node
.allocated
);
693 GEM_BUG_ON(i915_vma_is_active(vma
));
694 GEM_BUG_ON(!i915_vma_is_closed(vma
));
695 GEM_BUG_ON(vma
->fence
);
697 for (i
= 0; i
< ARRAY_SIZE(vma
->last_read
); i
++)
698 GEM_BUG_ON(i915_gem_active_isset(&vma
->last_read
[i
]));
699 GEM_BUG_ON(i915_gem_active_isset(&vma
->last_fence
));
701 list_del(&vma
->obj_link
);
702 list_del(&vma
->vm_link
);
704 if (!i915_vma_is_ggtt(vma
))
705 i915_ppgtt_put(i915_vm_to_ppgtt(vma
->vm
));
707 kmem_cache_free(to_i915(vma
->obj
->base
.dev
)->vmas
, vma
);
710 void i915_vma_close(struct i915_vma
*vma
)
712 GEM_BUG_ON(i915_vma_is_closed(vma
));
713 vma
->flags
|= I915_VMA_CLOSED
;
715 rb_erase(&vma
->obj_node
, &vma
->obj
->vma_tree
);
717 if (!i915_vma_is_active(vma
) && !i915_vma_is_pinned(vma
))
718 WARN_ON(i915_vma_unbind(vma
));
721 static void __i915_vma_iounmap(struct i915_vma
*vma
)
723 GEM_BUG_ON(i915_vma_is_pinned(vma
));
725 if (vma
->iomap
== NULL
)
728 io_mapping_unmap(vma
->iomap
);
732 void i915_vma_revoke_mmap(struct i915_vma
*vma
)
734 struct drm_vma_offset_node
*node
= &vma
->obj
->base
.vma_node
;
737 lockdep_assert_held(&vma
->vm
->i915
->drm
.struct_mutex
);
739 if (!i915_vma_has_userfault(vma
))
742 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma
));
743 GEM_BUG_ON(!vma
->obj
->userfault_count
);
745 vma_offset
= vma
->ggtt_view
.partial
.offset
<< PAGE_SHIFT
;
746 unmap_mapping_range(vma
->vm
->i915
->drm
.anon_inode
->i_mapping
,
747 drm_vma_node_offset_addr(node
) + vma_offset
,
751 i915_vma_unset_userfault(vma
);
752 if (!--vma
->obj
->userfault_count
)
753 list_del(&vma
->obj
->userfault_link
);
756 int i915_vma_unbind(struct i915_vma
*vma
)
758 struct drm_i915_gem_object
*obj
= vma
->obj
;
759 unsigned long active
;
762 lockdep_assert_held(&obj
->base
.dev
->struct_mutex
);
764 /* First wait upon any activity as retiring the request may
765 * have side-effects such as unpinning or even unbinding this vma.
768 active
= i915_vma_get_active(vma
);
772 /* When a closed VMA is retired, it is unbound - eek.
773 * In order to prevent it from being recursively closed,
774 * take a pin on the vma so that the second unbind is
777 * Even more scary is that the retire callback may free
778 * the object (last active vma). To prevent the explosion
779 * we defer the actual object free to a worker that can
780 * only proceed once it acquires the struct_mutex (which
781 * we currently hold, therefore it cannot free this object
782 * before we are finished).
786 for_each_active(active
, idx
) {
787 ret
= i915_gem_active_retire(&vma
->last_read
[idx
],
788 &vma
->vm
->i915
->drm
.struct_mutex
);
794 ret
= i915_gem_active_retire(&vma
->last_fence
,
795 &vma
->vm
->i915
->drm
.struct_mutex
);
798 __i915_vma_unpin(vma
);
802 GEM_BUG_ON(i915_vma_is_active(vma
));
804 if (i915_vma_is_pinned(vma
))
807 if (!drm_mm_node_allocated(&vma
->node
))
810 GEM_BUG_ON(obj
->bind_count
== 0);
811 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj
));
813 if (i915_vma_is_map_and_fenceable(vma
)) {
815 * Check that we have flushed all writes through the GGTT
816 * before the unbind, other due to non-strict nature of those
817 * indirect writes they may end up referencing the GGTT PTE
820 i915_vma_flush_writes(vma
);
821 GEM_BUG_ON(i915_vma_has_ggtt_write(vma
));
823 /* release the fence reg _after_ flushing */
824 ret
= i915_vma_put_fence(vma
);
828 /* Force a pagefault for domain tracking on next user access */
829 i915_vma_revoke_mmap(vma
);
831 __i915_vma_iounmap(vma
);
832 vma
->flags
&= ~I915_VMA_CAN_FENCE
;
834 GEM_BUG_ON(vma
->fence
);
835 GEM_BUG_ON(i915_vma_has_userfault(vma
));
837 if (likely(!vma
->vm
->closed
)) {
838 trace_i915_vma_unbind(vma
);
839 vma
->vm
->unbind_vma(vma
);
841 vma
->flags
&= ~(I915_VMA_GLOBAL_BIND
| I915_VMA_LOCAL_BIND
);
843 i915_vma_remove(vma
);
846 if (unlikely(i915_vma_is_closed(vma
)))
847 i915_vma_destroy(vma
);
852 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
853 #include "selftests/i915_vma.c"