1 // SPDX-License-Identifier: MIT
3 * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
5 * Based on bo.c which bears the following copyright notice,
6 * but is dual licensed:
8 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the
13 * "Software"), to deal in the Software without restriction, including
14 * without limitation the rights to use, copy, modify, merge, publish,
15 * distribute, sub license, and/or sell copies of the Software, and to
16 * permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
19 * The above copyright notice and this permission notice (including the
20 * next paragraph) shall be included in all copies or substantial portions
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
26 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
27 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
28 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
29 * USE OR OTHER DEALINGS IN THE SOFTWARE.
31 **************************************************************************/
33 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
36 #include <linux/dma-resv.h>
37 #include <linux/dma-fence-array.h>
38 #include <linux/export.h>
40 #include <linux/sched/mm.h>
41 #include <linux/mmu_notifier.h>
42 #include <linux/seq_file.h>
45 * DOC: Reservation Object Overview
47 * The reservation object provides a mechanism to manage a container of
48 * dma_fence object associated with a resource. A reservation object
49 * can have any number of fences attaches to it. Each fence carries an usage
50 * parameter determining how the operation represented by the fence is using the
51 * resource. The RCU mechanism is used to protect read access to fences from
52 * locked write-side updates.
54 * See struct dma_resv for more details.
57 DEFINE_WD_CLASS(reservation_ww_class
);
58 EXPORT_SYMBOL(reservation_ww_class
);
60 /* Mask for the lower fence pointer bits */
61 #define DMA_RESV_LIST_MASK 0x3
63 struct dma_resv_list
{
65 u32 num_fences
, max_fences
;
66 struct dma_fence __rcu
*table
[];
69 /* Extract the fence and usage flags from an RCU protected entry in the list. */
70 static void dma_resv_list_entry(struct dma_resv_list
*list
, unsigned int index
,
71 struct dma_resv
*resv
, struct dma_fence
**fence
,
72 enum dma_resv_usage
*usage
)
76 tmp
= (long)rcu_dereference_check(list
->table
[index
],
77 resv
? dma_resv_held(resv
) : true);
78 *fence
= (struct dma_fence
*)(tmp
& ~DMA_RESV_LIST_MASK
);
80 *usage
= tmp
& DMA_RESV_LIST_MASK
;
83 /* Set the fence and usage flags at the specific index in the list. */
84 static void dma_resv_list_set(struct dma_resv_list
*list
,
86 struct dma_fence
*fence
,
87 enum dma_resv_usage usage
)
89 long tmp
= ((long)fence
) | usage
;
91 RCU_INIT_POINTER(list
->table
[index
], (struct dma_fence
*)tmp
);
95 * Allocate a new dma_resv_list and make sure to correctly initialize
98 static struct dma_resv_list
*dma_resv_list_alloc(unsigned int max_fences
)
100 struct dma_resv_list
*list
;
103 /* Round up to the next kmalloc bucket size. */
104 size
= kmalloc_size_roundup(struct_size(list
, table
, max_fences
));
106 list
= kmalloc(size
, GFP_KERNEL
);
110 /* Given the resulting bucket size, recalculated max_fences. */
111 list
->max_fences
= (size
- offsetof(typeof(*list
), table
)) /
112 sizeof(*list
->table
);
117 /* Free a dma_resv_list and make sure to drop all references. */
118 static void dma_resv_list_free(struct dma_resv_list
*list
)
125 for (i
= 0; i
< list
->num_fences
; ++i
) {
126 struct dma_fence
*fence
;
128 dma_resv_list_entry(list
, i
, NULL
, &fence
, NULL
);
129 dma_fence_put(fence
);
131 kfree_rcu(list
, rcu
);
135 * dma_resv_init - initialize a reservation object
136 * @obj: the reservation object
138 void dma_resv_init(struct dma_resv
*obj
)
140 ww_mutex_init(&obj
->lock
, &reservation_ww_class
);
142 RCU_INIT_POINTER(obj
->fences
, NULL
);
144 EXPORT_SYMBOL(dma_resv_init
);
147 * dma_resv_fini - destroys a reservation object
148 * @obj: the reservation object
150 void dma_resv_fini(struct dma_resv
*obj
)
153 * This object should be dead and all references must have
154 * been released to it, so no need to be protected with rcu.
156 dma_resv_list_free(rcu_dereference_protected(obj
->fences
, true));
157 ww_mutex_destroy(&obj
->lock
);
159 EXPORT_SYMBOL(dma_resv_fini
);
161 /* Dereference the fences while ensuring RCU rules */
162 static inline struct dma_resv_list
*dma_resv_fences_list(struct dma_resv
*obj
)
164 return rcu_dereference_check(obj
->fences
, dma_resv_held(obj
));
168 * dma_resv_reserve_fences - Reserve space to add fences to a dma_resv object.
169 * @obj: reservation object
170 * @num_fences: number of fences we want to add
172 * Should be called before dma_resv_add_fence(). Must be called with @obj
173 * locked through dma_resv_lock().
175 * Note that the preallocated slots need to be re-reserved if @obj is unlocked
176 * at any time before calling dma_resv_add_fence(). This is validated when
177 * CONFIG_DEBUG_MUTEXES is enabled.
180 * Zero for success, or -errno
182 int dma_resv_reserve_fences(struct dma_resv
*obj
, unsigned int num_fences
)
184 struct dma_resv_list
*old
, *new;
185 unsigned int i
, j
, k
, max
;
187 dma_resv_assert_held(obj
);
189 /* Driver and component code should never call this function with
190 * num_fences=0. If they do it usually points to bugs when calculating
191 * the number of needed fences dynamically.
193 if (WARN_ON(!num_fences
))
196 old
= dma_resv_fences_list(obj
);
197 if (old
&& old
->max_fences
) {
198 if ((old
->num_fences
+ num_fences
) <= old
->max_fences
)
200 max
= max(old
->num_fences
+ num_fences
, old
->max_fences
* 2);
202 max
= max(4ul, roundup_pow_of_two(num_fences
));
205 new = dma_resv_list_alloc(max
);
210 * no need to bump fence refcounts, rcu_read access
211 * requires the use of kref_get_unless_zero, and the
212 * references from the old struct are carried over to
215 for (i
= 0, j
= 0, k
= max
; i
< (old
? old
->num_fences
: 0); ++i
) {
216 enum dma_resv_usage usage
;
217 struct dma_fence
*fence
;
219 dma_resv_list_entry(old
, i
, obj
, &fence
, &usage
);
220 if (dma_fence_is_signaled(fence
))
221 RCU_INIT_POINTER(new->table
[--k
], fence
);
223 dma_resv_list_set(new, j
++, fence
, usage
);
228 * We are not changing the effective set of fences here so can
229 * merely update the pointer to the new array; both existing
230 * readers and new readers will see exactly the same set of
231 * active (unsignaled) fences. Individual fences and the
232 * old array are protected by RCU and so will not vanish under
233 * the gaze of the rcu_read_lock() readers.
235 rcu_assign_pointer(obj
->fences
, new);
240 /* Drop the references to the signaled fences */
241 for (i
= k
; i
< max
; ++i
) {
242 struct dma_fence
*fence
;
244 fence
= rcu_dereference_protected(new->table
[i
],
246 dma_fence_put(fence
);
252 EXPORT_SYMBOL(dma_resv_reserve_fences
);
254 #ifdef CONFIG_DEBUG_MUTEXES
256 * dma_resv_reset_max_fences - reset fences for debugging
257 * @obj: the dma_resv object to reset
259 * Reset the number of pre-reserved fence slots to test that drivers do
260 * correct slot allocation using dma_resv_reserve_fences(). See also
261 * &dma_resv_list.max_fences.
263 void dma_resv_reset_max_fences(struct dma_resv
*obj
)
265 struct dma_resv_list
*fences
= dma_resv_fences_list(obj
);
267 dma_resv_assert_held(obj
);
269 /* Test fence slot reservation */
271 fences
->max_fences
= fences
->num_fences
;
273 EXPORT_SYMBOL(dma_resv_reset_max_fences
);
277 * dma_resv_add_fence - Add a fence to the dma_resv obj
278 * @obj: the reservation object
279 * @fence: the fence to add
280 * @usage: how the fence is used, see enum dma_resv_usage
282 * Add a fence to a slot, @obj must be locked with dma_resv_lock(), and
283 * dma_resv_reserve_fences() has been called.
285 * See also &dma_resv.fence for a discussion of the semantics.
287 void dma_resv_add_fence(struct dma_resv
*obj
, struct dma_fence
*fence
,
288 enum dma_resv_usage usage
)
290 struct dma_resv_list
*fobj
;
291 struct dma_fence
*old
;
292 unsigned int i
, count
;
294 dma_fence_get(fence
);
296 dma_resv_assert_held(obj
);
298 /* Drivers should not add containers here, instead add each fence
301 WARN_ON(dma_fence_is_container(fence
));
303 fobj
= dma_resv_fences_list(obj
);
304 count
= fobj
->num_fences
;
306 for (i
= 0; i
< count
; ++i
) {
307 enum dma_resv_usage old_usage
;
309 dma_resv_list_entry(fobj
, i
, obj
, &old
, &old_usage
);
310 if ((old
->context
== fence
->context
&& old_usage
>= usage
&&
311 dma_fence_is_later_or_same(fence
, old
)) ||
312 dma_fence_is_signaled(old
)) {
313 dma_resv_list_set(fobj
, i
, fence
, usage
);
319 BUG_ON(fobj
->num_fences
>= fobj
->max_fences
);
322 dma_resv_list_set(fobj
, i
, fence
, usage
);
323 /* pointer update must be visible before we extend the num_fences */
324 smp_store_mb(fobj
->num_fences
, count
);
326 EXPORT_SYMBOL(dma_resv_add_fence
);
329 * dma_resv_replace_fences - replace fences in the dma_resv obj
330 * @obj: the reservation object
331 * @context: the context of the fences to replace
332 * @replacement: the new fence to use instead
333 * @usage: how the new fence is used, see enum dma_resv_usage
335 * Replace fences with a specified context with a new fence. Only valid if the
336 * operation represented by the original fence has no longer access to the
337 * resources represented by the dma_resv object when the new fence completes.
339 * And example for using this is replacing a preemption fence with a page table
340 * update fence which makes the resource inaccessible.
342 void dma_resv_replace_fences(struct dma_resv
*obj
, uint64_t context
,
343 struct dma_fence
*replacement
,
344 enum dma_resv_usage usage
)
346 struct dma_resv_list
*list
;
349 dma_resv_assert_held(obj
);
351 list
= dma_resv_fences_list(obj
);
352 for (i
= 0; list
&& i
< list
->num_fences
; ++i
) {
353 struct dma_fence
*old
;
355 dma_resv_list_entry(list
, i
, obj
, &old
, NULL
);
356 if (old
->context
!= context
)
359 dma_resv_list_set(list
, i
, dma_fence_get(replacement
), usage
);
363 EXPORT_SYMBOL(dma_resv_replace_fences
);
365 /* Restart the unlocked iteration by initializing the cursor object. */
366 static void dma_resv_iter_restart_unlocked(struct dma_resv_iter
*cursor
)
369 cursor
->num_fences
= 0;
370 cursor
->fences
= dma_resv_fences_list(cursor
->obj
);
372 cursor
->num_fences
= cursor
->fences
->num_fences
;
373 cursor
->is_restarted
= true;
376 /* Walk to the next not signaled fence and grab a reference to it */
377 static void dma_resv_iter_walk_unlocked(struct dma_resv_iter
*cursor
)
383 /* Drop the reference from the previous round */
384 dma_fence_put(cursor
->fence
);
386 if (cursor
->index
>= cursor
->num_fences
) {
387 cursor
->fence
= NULL
;
392 dma_resv_list_entry(cursor
->fences
, cursor
->index
++,
393 cursor
->obj
, &cursor
->fence
,
394 &cursor
->fence_usage
);
395 cursor
->fence
= dma_fence_get_rcu(cursor
->fence
);
396 if (!cursor
->fence
) {
397 dma_resv_iter_restart_unlocked(cursor
);
401 if (!dma_fence_is_signaled(cursor
->fence
) &&
402 cursor
->usage
>= cursor
->fence_usage
)
408 * dma_resv_iter_first_unlocked - first fence in an unlocked dma_resv obj.
409 * @cursor: the cursor with the current position
411 * Subsequent fences are iterated with dma_resv_iter_next_unlocked().
413 * Beware that the iterator can be restarted. Code which accumulates statistics
414 * or similar needs to check for this with dma_resv_iter_is_restarted(). For
415 * this reason prefer the locked dma_resv_iter_first() whenever possible.
417 * Returns the first fence from an unlocked dma_resv obj.
419 struct dma_fence
*dma_resv_iter_first_unlocked(struct dma_resv_iter
*cursor
)
423 dma_resv_iter_restart_unlocked(cursor
);
424 dma_resv_iter_walk_unlocked(cursor
);
425 } while (dma_resv_fences_list(cursor
->obj
) != cursor
->fences
);
428 return cursor
->fence
;
430 EXPORT_SYMBOL(dma_resv_iter_first_unlocked
);
433 * dma_resv_iter_next_unlocked - next fence in an unlocked dma_resv obj.
434 * @cursor: the cursor with the current position
436 * Beware that the iterator can be restarted. Code which accumulates statistics
437 * or similar needs to check for this with dma_resv_iter_is_restarted(). For
438 * this reason prefer the locked dma_resv_iter_next() whenever possible.
440 * Returns the next fence from an unlocked dma_resv obj.
442 struct dma_fence
*dma_resv_iter_next_unlocked(struct dma_resv_iter
*cursor
)
447 cursor
->is_restarted
= false;
448 restart
= dma_resv_fences_list(cursor
->obj
) != cursor
->fences
;
451 dma_resv_iter_restart_unlocked(cursor
);
452 dma_resv_iter_walk_unlocked(cursor
);
454 } while (dma_resv_fences_list(cursor
->obj
) != cursor
->fences
);
457 return cursor
->fence
;
459 EXPORT_SYMBOL(dma_resv_iter_next_unlocked
);
462 * dma_resv_iter_first - first fence from a locked dma_resv object
463 * @cursor: cursor to record the current position
465 * Subsequent fences are iterated with dma_resv_iter_next_unlocked().
467 * Return the first fence in the dma_resv object while holding the
470 struct dma_fence
*dma_resv_iter_first(struct dma_resv_iter
*cursor
)
472 struct dma_fence
*fence
;
474 dma_resv_assert_held(cursor
->obj
);
477 cursor
->fences
= dma_resv_fences_list(cursor
->obj
);
479 fence
= dma_resv_iter_next(cursor
);
480 cursor
->is_restarted
= true;
483 EXPORT_SYMBOL_GPL(dma_resv_iter_first
);
486 * dma_resv_iter_next - next fence from a locked dma_resv object
487 * @cursor: cursor to record the current position
489 * Return the next fences from the dma_resv object while holding the
492 struct dma_fence
*dma_resv_iter_next(struct dma_resv_iter
*cursor
)
494 struct dma_fence
*fence
;
496 dma_resv_assert_held(cursor
->obj
);
498 cursor
->is_restarted
= false;
501 if (!cursor
->fences
||
502 cursor
->index
>= cursor
->fences
->num_fences
)
505 dma_resv_list_entry(cursor
->fences
, cursor
->index
++,
506 cursor
->obj
, &fence
, &cursor
->fence_usage
);
507 } while (cursor
->fence_usage
> cursor
->usage
);
511 EXPORT_SYMBOL_GPL(dma_resv_iter_next
);
514 * dma_resv_copy_fences - Copy all fences from src to dst.
515 * @dst: the destination reservation object
516 * @src: the source reservation object
518 * Copy all fences from src to dst. dst-lock must be held.
520 int dma_resv_copy_fences(struct dma_resv
*dst
, struct dma_resv
*src
)
522 struct dma_resv_iter cursor
;
523 struct dma_resv_list
*list
;
526 dma_resv_assert_held(dst
);
530 dma_resv_iter_begin(&cursor
, src
, DMA_RESV_USAGE_BOOKKEEP
);
531 dma_resv_for_each_fence_unlocked(&cursor
, f
) {
533 if (dma_resv_iter_is_restarted(&cursor
)) {
534 dma_resv_list_free(list
);
536 list
= dma_resv_list_alloc(cursor
.num_fences
);
538 dma_resv_iter_end(&cursor
);
541 list
->num_fences
= 0;
545 dma_resv_list_set(list
, list
->num_fences
++, f
,
546 dma_resv_iter_usage(&cursor
));
548 dma_resv_iter_end(&cursor
);
550 list
= rcu_replace_pointer(dst
->fences
, list
, dma_resv_held(dst
));
551 dma_resv_list_free(list
);
554 EXPORT_SYMBOL(dma_resv_copy_fences
);
557 * dma_resv_get_fences - Get an object's fences
558 * fences without update side lock held
559 * @obj: the reservation object
560 * @usage: controls which fences to include, see enum dma_resv_usage.
561 * @num_fences: the number of fences returned
562 * @fences: the array of fence ptrs returned (array is krealloc'd to the
563 * required size, and must be freed by caller)
565 * Retrieve all fences from the reservation object.
566 * Returns either zero or -ENOMEM.
568 int dma_resv_get_fences(struct dma_resv
*obj
, enum dma_resv_usage usage
,
569 unsigned int *num_fences
, struct dma_fence
***fences
)
571 struct dma_resv_iter cursor
;
572 struct dma_fence
*fence
;
577 dma_resv_iter_begin(&cursor
, obj
, usage
);
578 dma_resv_for_each_fence_unlocked(&cursor
, fence
) {
580 if (dma_resv_iter_is_restarted(&cursor
)) {
581 struct dma_fence
**new_fences
;
585 dma_fence_put((*fences
)[--(*num_fences
)]);
587 count
= cursor
.num_fences
+ 1;
589 /* Eventually re-allocate the array */
590 new_fences
= krealloc_array(*fences
, count
,
593 if (count
&& !new_fences
) {
597 dma_resv_iter_end(&cursor
);
600 *fences
= new_fences
;
603 (*fences
)[(*num_fences
)++] = dma_fence_get(fence
);
605 dma_resv_iter_end(&cursor
);
609 EXPORT_SYMBOL_GPL(dma_resv_get_fences
);
612 * dma_resv_get_singleton - Get a single fence for all the fences
613 * @obj: the reservation object
614 * @usage: controls which fences to include, see enum dma_resv_usage.
615 * @fence: the resulting fence
617 * Get a single fence representing all the fences inside the resv object.
618 * Returns either 0 for success or -ENOMEM.
620 * Warning: This can't be used like this when adding the fence back to the resv
621 * object since that can lead to stack corruption when finalizing the
624 * Returns 0 on success and negative error values on failure.
626 int dma_resv_get_singleton(struct dma_resv
*obj
, enum dma_resv_usage usage
,
627 struct dma_fence
**fence
)
629 struct dma_fence_array
*array
;
630 struct dma_fence
**fences
;
634 r
= dma_resv_get_fences(obj
, usage
, &count
, &fences
);
649 array
= dma_fence_array_create(count
, fences
,
650 dma_fence_context_alloc(1),
654 dma_fence_put(fences
[count
]);
659 *fence
= &array
->base
;
662 EXPORT_SYMBOL_GPL(dma_resv_get_singleton
);
665 * dma_resv_wait_timeout - Wait on reservation's objects fences
666 * @obj: the reservation object
667 * @usage: controls which fences to include, see enum dma_resv_usage.
668 * @intr: if true, do interruptible wait
669 * @timeout: timeout value in jiffies or zero to return immediately
671 * Callers are not required to hold specific locks, but maybe hold
672 * dma_resv_lock() already
674 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
675 * greater than zero on success.
677 long dma_resv_wait_timeout(struct dma_resv
*obj
, enum dma_resv_usage usage
,
678 bool intr
, unsigned long timeout
)
680 long ret
= timeout
? timeout
: 1;
681 struct dma_resv_iter cursor
;
682 struct dma_fence
*fence
;
684 dma_resv_iter_begin(&cursor
, obj
, usage
);
685 dma_resv_for_each_fence_unlocked(&cursor
, fence
) {
687 ret
= dma_fence_wait_timeout(fence
, intr
, ret
);
689 dma_resv_iter_end(&cursor
);
693 dma_resv_iter_end(&cursor
);
697 EXPORT_SYMBOL_GPL(dma_resv_wait_timeout
);
700 * dma_resv_set_deadline - Set a deadline on reservation's objects fences
701 * @obj: the reservation object
702 * @usage: controls which fences to include, see enum dma_resv_usage.
703 * @deadline: the requested deadline (MONOTONIC)
705 * May be called without holding the dma_resv lock. Sets @deadline on
706 * all fences filtered by @usage.
708 void dma_resv_set_deadline(struct dma_resv
*obj
, enum dma_resv_usage usage
,
711 struct dma_resv_iter cursor
;
712 struct dma_fence
*fence
;
714 dma_resv_iter_begin(&cursor
, obj
, usage
);
715 dma_resv_for_each_fence_unlocked(&cursor
, fence
) {
716 dma_fence_set_deadline(fence
, deadline
);
718 dma_resv_iter_end(&cursor
);
720 EXPORT_SYMBOL_GPL(dma_resv_set_deadline
);
723 * dma_resv_test_signaled - Test if a reservation object's fences have been
725 * @obj: the reservation object
726 * @usage: controls which fences to include, see enum dma_resv_usage.
728 * Callers are not required to hold specific locks, but maybe hold
729 * dma_resv_lock() already.
733 * True if all fences signaled, else false.
735 bool dma_resv_test_signaled(struct dma_resv
*obj
, enum dma_resv_usage usage
)
737 struct dma_resv_iter cursor
;
738 struct dma_fence
*fence
;
740 dma_resv_iter_begin(&cursor
, obj
, usage
);
741 dma_resv_for_each_fence_unlocked(&cursor
, fence
) {
742 dma_resv_iter_end(&cursor
);
745 dma_resv_iter_end(&cursor
);
748 EXPORT_SYMBOL_GPL(dma_resv_test_signaled
);
751 * dma_resv_describe - Dump description of the resv object into seq_file
752 * @obj: the reservation object
753 * @seq: the seq_file to dump the description into
755 * Dump a textual description of the fences inside an dma_resv object into the
758 void dma_resv_describe(struct dma_resv
*obj
, struct seq_file
*seq
)
760 static const char *usage
[] = { "kernel", "write", "read", "bookkeep" };
761 struct dma_resv_iter cursor
;
762 struct dma_fence
*fence
;
764 dma_resv_for_each_fence(&cursor
, obj
, DMA_RESV_USAGE_READ
, fence
) {
765 seq_printf(seq
, "\t%s fence:",
766 usage
[dma_resv_iter_usage(&cursor
)]);
767 dma_fence_describe(fence
, seq
);
770 EXPORT_SYMBOL_GPL(dma_resv_describe
);
772 #if IS_ENABLED(CONFIG_LOCKDEP)
773 static int __init
dma_resv_lockdep(void)
775 struct mm_struct
*mm
= mm_alloc();
776 struct ww_acquire_ctx ctx
;
778 struct address_space mapping
;
785 address_space_init_once(&mapping
);
788 ww_acquire_init(&ctx
, &reservation_ww_class
);
789 ret
= dma_resv_lock(&obj
, &ctx
);
791 dma_resv_lock_slow(&obj
, &ctx
);
792 fs_reclaim_acquire(GFP_KERNEL
);
793 /* for unmap_mapping_range on trylocked buffer objects in shrinkers */
794 i_mmap_lock_write(&mapping
);
795 i_mmap_unlock_write(&mapping
);
796 #ifdef CONFIG_MMU_NOTIFIER
797 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map
);
798 __dma_fence_might_wait();
799 lock_map_release(&__mmu_notifier_invalidate_range_start_map
);
801 __dma_fence_might_wait();
803 fs_reclaim_release(GFP_KERNEL
);
804 ww_mutex_unlock(&obj
.lock
);
805 ww_acquire_fini(&ctx
);
806 mmap_read_unlock(mm
);
812 subsys_initcall(dma_resv_lockdep
);