2 * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
4 * Based on bo.c which bears the following copyright notice,
5 * but is dual licensed:
7 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
10 * Permission is hereby granted, free of charge, to any person obtaining a
11 * copy of this software and associated documentation files (the
12 * "Software"), to deal in the Software without restriction, including
13 * without limitation the rights to use, copy, modify, merge, publish,
14 * distribute, sub license, and/or sell copies of the Software, and to
15 * permit persons to whom the Software is furnished to do so, subject to
16 * the following conditions:
18 * The above copyright notice and this permission notice (including the
19 * next paragraph) shall be included in all copies or substantial portions
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
25 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
26 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
27 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
28 * USE OR OTHER DEALINGS IN THE SOFTWARE.
30 **************************************************************************/
32 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
35 #include <linux/dma-resv.h>
36 #include <linux/export.h>
38 #include <linux/sched/mm.h>
39 #include <linux/mmu_notifier.h>
42 * DOC: Reservation Object Overview
44 * The reservation object provides a mechanism to manage shared and
45 * exclusive fences associated with a buffer. A reservation object
46 * can have attached one exclusive fence (normally associated with
47 * write operations) or N shared fences (read operations). The RCU
48 * mechanism is used to protect read access to fences from locked
52 DEFINE_WD_CLASS(reservation_ww_class
);
53 EXPORT_SYMBOL(reservation_ww_class
);
56 * dma_resv_list_alloc - allocate fence list
57 * @shared_max: number of fences we need space for
59 * Allocate a new dma_resv_list and make sure to correctly initialize
62 static struct dma_resv_list
*dma_resv_list_alloc(unsigned int shared_max
)
64 struct dma_resv_list
*list
;
66 list
= kmalloc(struct_size(list
, shared
, shared_max
), GFP_KERNEL
);
70 list
->shared_max
= (ksize(list
) - offsetof(typeof(*list
), shared
)) /
71 sizeof(*list
->shared
);
77 * dma_resv_list_free - free fence list
80 * Free a dma_resv_list and make sure to drop all references.
82 static void dma_resv_list_free(struct dma_resv_list
*list
)
89 for (i
= 0; i
< list
->shared_count
; ++i
)
90 dma_fence_put(rcu_dereference_protected(list
->shared
[i
], true));
95 #if IS_ENABLED(CONFIG_LOCKDEP)
96 static int __init
dma_resv_lockdep(void)
98 struct mm_struct
*mm
= mm_alloc();
99 struct ww_acquire_ctx ctx
;
101 struct address_space mapping
;
108 address_space_init_once(&mapping
);
111 ww_acquire_init(&ctx
, &reservation_ww_class
);
112 ret
= dma_resv_lock(&obj
, &ctx
);
114 dma_resv_lock_slow(&obj
, &ctx
);
115 fs_reclaim_acquire(GFP_KERNEL
);
116 /* for unmap_mapping_range on trylocked buffer objects in shrinkers */
117 i_mmap_lock_write(&mapping
);
118 i_mmap_unlock_write(&mapping
);
119 #ifdef CONFIG_MMU_NOTIFIER
120 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map
);
121 __dma_fence_might_wait();
122 lock_map_release(&__mmu_notifier_invalidate_range_start_map
);
124 __dma_fence_might_wait();
126 fs_reclaim_release(GFP_KERNEL
);
127 ww_mutex_unlock(&obj
.lock
);
128 ww_acquire_fini(&ctx
);
129 mmap_read_unlock(mm
);
135 subsys_initcall(dma_resv_lockdep
);
139 * dma_resv_init - initialize a reservation object
140 * @obj: the reservation object
142 void dma_resv_init(struct dma_resv
*obj
)
144 ww_mutex_init(&obj
->lock
, &reservation_ww_class
);
145 seqcount_ww_mutex_init(&obj
->seq
, &obj
->lock
);
147 RCU_INIT_POINTER(obj
->fence
, NULL
);
148 RCU_INIT_POINTER(obj
->fence_excl
, NULL
);
150 EXPORT_SYMBOL(dma_resv_init
);
153 * dma_resv_fini - destroys a reservation object
154 * @obj: the reservation object
156 void dma_resv_fini(struct dma_resv
*obj
)
158 struct dma_resv_list
*fobj
;
159 struct dma_fence
*excl
;
162 * This object should be dead and all references must have
163 * been released to it, so no need to be protected with rcu.
165 excl
= rcu_dereference_protected(obj
->fence_excl
, 1);
169 fobj
= rcu_dereference_protected(obj
->fence
, 1);
170 dma_resv_list_free(fobj
);
171 ww_mutex_destroy(&obj
->lock
);
173 EXPORT_SYMBOL(dma_resv_fini
);
176 * dma_resv_reserve_shared - Reserve space to add shared fences to
178 * @obj: reservation object
179 * @num_fences: number of fences we want to add
181 * Should be called before dma_resv_add_shared_fence(). Must
182 * be called with obj->lock held.
185 * Zero for success, or -errno
187 int dma_resv_reserve_shared(struct dma_resv
*obj
, unsigned int num_fences
)
189 struct dma_resv_list
*old
, *new;
190 unsigned int i
, j
, k
, max
;
192 dma_resv_assert_held(obj
);
194 old
= dma_resv_get_list(obj
);
196 if (old
&& old
->shared_max
) {
197 if ((old
->shared_count
+ num_fences
) <= old
->shared_max
)
200 max
= max(old
->shared_count
+ num_fences
,
201 old
->shared_max
* 2);
203 max
= max(4ul, roundup_pow_of_two(num_fences
));
206 new = dma_resv_list_alloc(max
);
211 * no need to bump fence refcounts, rcu_read access
212 * requires the use of kref_get_unless_zero, and the
213 * references from the old struct are carried over to
216 for (i
= 0, j
= 0, k
= max
; i
< (old
? old
->shared_count
: 0); ++i
) {
217 struct dma_fence
*fence
;
219 fence
= rcu_dereference_protected(old
->shared
[i
],
221 if (dma_fence_is_signaled(fence
))
222 RCU_INIT_POINTER(new->shared
[--k
], fence
);
224 RCU_INIT_POINTER(new->shared
[j
++], fence
);
226 new->shared_count
= j
;
229 * We are not changing the effective set of fences here so can
230 * merely update the pointer to the new array; both existing
231 * readers and new readers will see exactly the same set of
232 * active (unsignaled) shared fences. Individual fences and the
233 * old array are protected by RCU and so will not vanish under
234 * the gaze of the rcu_read_lock() readers.
236 rcu_assign_pointer(obj
->fence
, new);
241 /* Drop the references to the signaled fences */
242 for (i
= k
; i
< max
; ++i
) {
243 struct dma_fence
*fence
;
245 fence
= rcu_dereference_protected(new->shared
[i
],
247 dma_fence_put(fence
);
253 EXPORT_SYMBOL(dma_resv_reserve_shared
);
256 * dma_resv_add_shared_fence - Add a fence to a shared slot
257 * @obj: the reservation object
258 * @fence: the shared fence to add
260 * Add a fence to a shared slot, obj->lock must be held, and
261 * dma_resv_reserve_shared() has been called.
263 void dma_resv_add_shared_fence(struct dma_resv
*obj
, struct dma_fence
*fence
)
265 struct dma_resv_list
*fobj
;
266 struct dma_fence
*old
;
267 unsigned int i
, count
;
269 dma_fence_get(fence
);
271 dma_resv_assert_held(obj
);
273 fobj
= dma_resv_get_list(obj
);
274 count
= fobj
->shared_count
;
276 write_seqcount_begin(&obj
->seq
);
278 for (i
= 0; i
< count
; ++i
) {
280 old
= rcu_dereference_protected(fobj
->shared
[i
],
282 if (old
->context
== fence
->context
||
283 dma_fence_is_signaled(old
))
287 BUG_ON(fobj
->shared_count
>= fobj
->shared_max
);
292 RCU_INIT_POINTER(fobj
->shared
[i
], fence
);
293 /* pointer update must be visible before we extend the shared_count */
294 smp_store_mb(fobj
->shared_count
, count
);
296 write_seqcount_end(&obj
->seq
);
299 EXPORT_SYMBOL(dma_resv_add_shared_fence
);
302 * dma_resv_add_excl_fence - Add an exclusive fence.
303 * @obj: the reservation object
304 * @fence: the shared fence to add
306 * Add a fence to the exclusive slot. The obj->lock must be held.
308 void dma_resv_add_excl_fence(struct dma_resv
*obj
, struct dma_fence
*fence
)
310 struct dma_fence
*old_fence
= dma_resv_get_excl(obj
);
311 struct dma_resv_list
*old
;
314 dma_resv_assert_held(obj
);
316 old
= dma_resv_get_list(obj
);
318 i
= old
->shared_count
;
321 dma_fence_get(fence
);
323 write_seqcount_begin(&obj
->seq
);
324 /* write_seqcount_begin provides the necessary memory barrier */
325 RCU_INIT_POINTER(obj
->fence_excl
, fence
);
327 old
->shared_count
= 0;
328 write_seqcount_end(&obj
->seq
);
330 /* inplace update, no shared fences */
332 dma_fence_put(rcu_dereference_protected(old
->shared
[i
],
333 dma_resv_held(obj
)));
335 dma_fence_put(old_fence
);
337 EXPORT_SYMBOL(dma_resv_add_excl_fence
);
340 * dma_resv_copy_fences - Copy all fences from src to dst.
341 * @dst: the destination reservation object
342 * @src: the source reservation object
344 * Copy all fences from src to dst. dst-lock must be held.
346 int dma_resv_copy_fences(struct dma_resv
*dst
, struct dma_resv
*src
)
348 struct dma_resv_list
*src_list
, *dst_list
;
349 struct dma_fence
*old
, *new;
352 dma_resv_assert_held(dst
);
355 src_list
= rcu_dereference(src
->fence
);
359 unsigned shared_count
= src_list
->shared_count
;
363 dst_list
= dma_resv_list_alloc(shared_count
);
368 src_list
= rcu_dereference(src
->fence
);
369 if (!src_list
|| src_list
->shared_count
> shared_count
) {
374 dst_list
->shared_count
= 0;
375 for (i
= 0; i
< src_list
->shared_count
; ++i
) {
376 struct dma_fence
*fence
;
378 fence
= rcu_dereference(src_list
->shared
[i
]);
379 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT
,
383 if (!dma_fence_get_rcu(fence
)) {
384 dma_resv_list_free(dst_list
);
385 src_list
= rcu_dereference(src
->fence
);
389 if (dma_fence_is_signaled(fence
)) {
390 dma_fence_put(fence
);
394 rcu_assign_pointer(dst_list
->shared
[dst_list
->shared_count
++], fence
);
400 new = dma_fence_get_rcu_safe(&src
->fence_excl
);
403 src_list
= dma_resv_get_list(dst
);
404 old
= dma_resv_get_excl(dst
);
406 write_seqcount_begin(&dst
->seq
);
407 /* write_seqcount_begin provides the necessary memory barrier */
408 RCU_INIT_POINTER(dst
->fence_excl
, new);
409 RCU_INIT_POINTER(dst
->fence
, dst_list
);
410 write_seqcount_end(&dst
->seq
);
412 dma_resv_list_free(src_list
);
417 EXPORT_SYMBOL(dma_resv_copy_fences
);
420 * dma_resv_get_fences_rcu - Get an object's shared and exclusive
421 * fences without update side lock held
422 * @obj: the reservation object
423 * @pfence_excl: the returned exclusive fence (or NULL)
424 * @pshared_count: the number of shared fences returned
425 * @pshared: the array of shared fence ptrs returned (array is krealloc'd to
426 * the required size, and must be freed by caller)
428 * Retrieve all fences from the reservation object. If the pointer for the
429 * exclusive fence is not specified the fence is put into the array of the
430 * shared fences as well. Returns either zero or -ENOMEM.
432 int dma_resv_get_fences_rcu(struct dma_resv
*obj
,
433 struct dma_fence
**pfence_excl
,
434 unsigned *pshared_count
,
435 struct dma_fence
***pshared
)
437 struct dma_fence
**shared
= NULL
;
438 struct dma_fence
*fence_excl
;
439 unsigned int shared_count
;
443 struct dma_resv_list
*fobj
;
447 shared_count
= i
= 0;
450 seq
= read_seqcount_begin(&obj
->seq
);
452 fence_excl
= rcu_dereference(obj
->fence_excl
);
453 if (fence_excl
&& !dma_fence_get_rcu(fence_excl
))
456 fobj
= rcu_dereference(obj
->fence
);
458 sz
+= sizeof(*shared
) * fobj
->shared_max
;
460 if (!pfence_excl
&& fence_excl
)
461 sz
+= sizeof(*shared
);
464 struct dma_fence
**nshared
;
466 nshared
= krealloc(shared
, sz
,
467 GFP_NOWAIT
| __GFP_NOWARN
);
471 dma_fence_put(fence_excl
);
474 nshared
= krealloc(shared
, sz
, GFP_KERNEL
);
484 shared_count
= fobj
? fobj
->shared_count
: 0;
485 for (i
= 0; i
< shared_count
; ++i
) {
486 shared
[i
] = rcu_dereference(fobj
->shared
[i
]);
487 if (!dma_fence_get_rcu(shared
[i
]))
492 if (i
!= shared_count
|| read_seqcount_retry(&obj
->seq
, seq
)) {
494 dma_fence_put(shared
[i
]);
495 dma_fence_put(fence_excl
);
505 *pfence_excl
= fence_excl
;
507 shared
[shared_count
++] = fence_excl
;
514 *pshared_count
= shared_count
;
518 EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu
);
521 * dma_resv_wait_timeout_rcu - Wait on reservation's objects
522 * shared and/or exclusive fences.
523 * @obj: the reservation object
524 * @wait_all: if true, wait on all fences, else wait on just exclusive fence
525 * @intr: if true, do interruptible wait
526 * @timeout: timeout value in jiffies or zero to return immediately
529 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
530 * greater than zer on success.
532 long dma_resv_wait_timeout_rcu(struct dma_resv
*obj
,
533 bool wait_all
, bool intr
,
534 unsigned long timeout
)
536 struct dma_fence
*fence
;
537 unsigned seq
, shared_count
;
538 long ret
= timeout
? timeout
: 1;
543 seq
= read_seqcount_begin(&obj
->seq
);
547 fence
= rcu_dereference(obj
->fence_excl
);
548 if (fence
&& !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT
, &fence
->flags
)) {
549 if (!dma_fence_get_rcu(fence
))
552 if (dma_fence_is_signaled(fence
)) {
553 dma_fence_put(fence
);
562 struct dma_resv_list
*fobj
= rcu_dereference(obj
->fence
);
565 shared_count
= fobj
->shared_count
;
567 for (i
= 0; !fence
&& i
< shared_count
; ++i
) {
568 struct dma_fence
*lfence
= rcu_dereference(fobj
->shared
[i
]);
570 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT
,
574 if (!dma_fence_get_rcu(lfence
))
577 if (dma_fence_is_signaled(lfence
)) {
578 dma_fence_put(lfence
);
589 if (read_seqcount_retry(&obj
->seq
, seq
)) {
590 dma_fence_put(fence
);
594 ret
= dma_fence_wait_timeout(fence
, intr
, ret
);
595 dma_fence_put(fence
);
596 if (ret
> 0 && wait_all
&& (i
+ 1 < shared_count
))
605 EXPORT_SYMBOL_GPL(dma_resv_wait_timeout_rcu
);
608 static inline int dma_resv_test_signaled_single(struct dma_fence
*passed_fence
)
610 struct dma_fence
*fence
, *lfence
= passed_fence
;
613 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT
, &lfence
->flags
)) {
614 fence
= dma_fence_get_rcu(lfence
);
618 ret
= !!dma_fence_is_signaled(fence
);
619 dma_fence_put(fence
);
625 * dma_resv_test_signaled_rcu - Test if a reservation object's
626 * fences have been signaled.
627 * @obj: the reservation object
628 * @test_all: if true, test all fences, otherwise only test the exclusive
632 * true if all fences signaled, else false
634 bool dma_resv_test_signaled_rcu(struct dma_resv
*obj
, bool test_all
)
636 unsigned seq
, shared_count
;
643 seq
= read_seqcount_begin(&obj
->seq
);
648 struct dma_resv_list
*fobj
= rcu_dereference(obj
->fence
);
651 shared_count
= fobj
->shared_count
;
653 for (i
= 0; i
< shared_count
; ++i
) {
654 struct dma_fence
*fence
= rcu_dereference(fobj
->shared
[i
]);
656 ret
= dma_resv_test_signaled_single(fence
);
663 if (read_seqcount_retry(&obj
->seq
, seq
))
668 struct dma_fence
*fence_excl
= rcu_dereference(obj
->fence_excl
);
671 ret
= dma_resv_test_signaled_single(fence_excl
);
675 if (read_seqcount_retry(&obj
->seq
, seq
))
683 EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu
);