2 * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
4 * Based on bo.c which bears the following copyright notice,
5 * but is dual licensed:
7 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
10 * Permission is hereby granted, free of charge, to any person obtaining a
11 * copy of this software and associated documentation files (the
12 * "Software"), to deal in the Software without restriction, including
13 * without limitation the rights to use, copy, modify, merge, publish,
14 * distribute, sub license, and/or sell copies of the Software, and to
15 * permit persons to whom the Software is furnished to do so, subject to
16 * the following conditions:
18 * The above copyright notice and this permission notice (including the
19 * next paragraph) shall be included in all copies or substantial portions
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
25 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
26 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
27 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
28 * USE OR OTHER DEALINGS IN THE SOFTWARE.
30 **************************************************************************/
32 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
35 #include <linux/dma-resv.h>
36 #include <linux/export.h>
37 #include <linux/sched/mm.h>
40 * DOC: Reservation Object Overview
42 * The reservation object provides a mechanism to manage shared and
43 * exclusive fences associated with a buffer. A reservation object
44 * can have attached one exclusive fence (normally associated with
45 * write operations) or N shared fences (read operations). The RCU
46 * mechanism is used to protect read access to fences from locked
50 DEFINE_WD_CLASS(reservation_ww_class
);
51 EXPORT_SYMBOL(reservation_ww_class
);
53 struct lock_class_key reservation_seqcount_class
;
54 EXPORT_SYMBOL(reservation_seqcount_class
);
56 const char reservation_seqcount_string
[] = "reservation_seqcount";
57 EXPORT_SYMBOL(reservation_seqcount_string
);
60 * dma_resv_list_alloc - allocate fence list
61 * @shared_max: number of fences we need space for
63 * Allocate a new dma_resv_list and make sure to correctly initialize
66 static struct dma_resv_list
*dma_resv_list_alloc(unsigned int shared_max
)
68 struct dma_resv_list
*list
;
70 list
= kmalloc(offsetof(typeof(*list
), shared
[shared_max
]), GFP_KERNEL
);
74 list
->shared_max
= (ksize(list
) - offsetof(typeof(*list
), shared
)) /
75 sizeof(*list
->shared
);
81 * dma_resv_list_free - free fence list
84 * Free a dma_resv_list and make sure to drop all references.
86 static void dma_resv_list_free(struct dma_resv_list
*list
)
93 for (i
= 0; i
< list
->shared_count
; ++i
)
94 dma_fence_put(rcu_dereference_protected(list
->shared
[i
], true));
99 #if IS_ENABLED(CONFIG_LOCKDEP)
100 static int __init
dma_resv_lockdep(void)
102 struct mm_struct
*mm
= mm_alloc();
103 struct ww_acquire_ctx ctx
;
112 down_read(&mm
->mmap_sem
);
113 ww_acquire_init(&ctx
, &reservation_ww_class
);
114 ret
= dma_resv_lock(&obj
, &ctx
);
116 dma_resv_lock_slow(&obj
, &ctx
);
117 fs_reclaim_acquire(GFP_KERNEL
);
118 fs_reclaim_release(GFP_KERNEL
);
119 ww_mutex_unlock(&obj
.lock
);
120 ww_acquire_fini(&ctx
);
121 up_read(&mm
->mmap_sem
);
127 subsys_initcall(dma_resv_lockdep
);
131 * dma_resv_init - initialize a reservation object
132 * @obj: the reservation object
134 void dma_resv_init(struct dma_resv
*obj
)
136 ww_mutex_init(&obj
->lock
, &reservation_ww_class
);
138 __seqcount_init(&obj
->seq
, reservation_seqcount_string
,
139 &reservation_seqcount_class
);
140 RCU_INIT_POINTER(obj
->fence
, NULL
);
141 RCU_INIT_POINTER(obj
->fence_excl
, NULL
);
143 EXPORT_SYMBOL(dma_resv_init
);
146 * dma_resv_fini - destroys a reservation object
147 * @obj: the reservation object
149 void dma_resv_fini(struct dma_resv
*obj
)
151 struct dma_resv_list
*fobj
;
152 struct dma_fence
*excl
;
155 * This object should be dead and all references must have
156 * been released to it, so no need to be protected with rcu.
158 excl
= rcu_dereference_protected(obj
->fence_excl
, 1);
162 fobj
= rcu_dereference_protected(obj
->fence
, 1);
163 dma_resv_list_free(fobj
);
164 ww_mutex_destroy(&obj
->lock
);
166 EXPORT_SYMBOL(dma_resv_fini
);
169 * dma_resv_reserve_shared - Reserve space to add shared fences to
171 * @obj: reservation object
172 * @num_fences: number of fences we want to add
174 * Should be called before dma_resv_add_shared_fence(). Must
175 * be called with obj->lock held.
178 * Zero for success, or -errno
180 int dma_resv_reserve_shared(struct dma_resv
*obj
, unsigned int num_fences
)
182 struct dma_resv_list
*old
, *new;
183 unsigned int i
, j
, k
, max
;
185 dma_resv_assert_held(obj
);
187 old
= dma_resv_get_list(obj
);
189 if (old
&& old
->shared_max
) {
190 if ((old
->shared_count
+ num_fences
) <= old
->shared_max
)
193 max
= max(old
->shared_count
+ num_fences
,
194 old
->shared_max
* 2);
199 new = dma_resv_list_alloc(max
);
204 * no need to bump fence refcounts, rcu_read access
205 * requires the use of kref_get_unless_zero, and the
206 * references from the old struct are carried over to
209 for (i
= 0, j
= 0, k
= max
; i
< (old
? old
->shared_count
: 0); ++i
) {
210 struct dma_fence
*fence
;
212 fence
= rcu_dereference_protected(old
->shared
[i
],
214 if (dma_fence_is_signaled(fence
))
215 RCU_INIT_POINTER(new->shared
[--k
], fence
);
217 RCU_INIT_POINTER(new->shared
[j
++], fence
);
219 new->shared_count
= j
;
222 * We are not changing the effective set of fences here so can
223 * merely update the pointer to the new array; both existing
224 * readers and new readers will see exactly the same set of
225 * active (unsignaled) shared fences. Individual fences and the
226 * old array are protected by RCU and so will not vanish under
227 * the gaze of the rcu_read_lock() readers.
229 rcu_assign_pointer(obj
->fence
, new);
234 /* Drop the references to the signaled fences */
235 for (i
= k
; i
< max
; ++i
) {
236 struct dma_fence
*fence
;
238 fence
= rcu_dereference_protected(new->shared
[i
],
240 dma_fence_put(fence
);
246 EXPORT_SYMBOL(dma_resv_reserve_shared
);
249 * dma_resv_add_shared_fence - Add a fence to a shared slot
250 * @obj: the reservation object
251 * @fence: the shared fence to add
253 * Add a fence to a shared slot, obj->lock must be held, and
254 * dma_resv_reserve_shared() has been called.
256 void dma_resv_add_shared_fence(struct dma_resv
*obj
, struct dma_fence
*fence
)
258 struct dma_resv_list
*fobj
;
259 struct dma_fence
*old
;
260 unsigned int i
, count
;
262 dma_fence_get(fence
);
264 dma_resv_assert_held(obj
);
266 fobj
= dma_resv_get_list(obj
);
267 count
= fobj
->shared_count
;
270 write_seqcount_begin(&obj
->seq
);
272 for (i
= 0; i
< count
; ++i
) {
274 old
= rcu_dereference_protected(fobj
->shared
[i
],
276 if (old
->context
== fence
->context
||
277 dma_fence_is_signaled(old
))
281 BUG_ON(fobj
->shared_count
>= fobj
->shared_max
);
286 RCU_INIT_POINTER(fobj
->shared
[i
], fence
);
287 /* pointer update must be visible before we extend the shared_count */
288 smp_store_mb(fobj
->shared_count
, count
);
290 write_seqcount_end(&obj
->seq
);
294 EXPORT_SYMBOL(dma_resv_add_shared_fence
);
297 * dma_resv_add_excl_fence - Add an exclusive fence.
298 * @obj: the reservation object
299 * @fence: the shared fence to add
301 * Add a fence to the exclusive slot. The obj->lock must be held.
303 void dma_resv_add_excl_fence(struct dma_resv
*obj
, struct dma_fence
*fence
)
305 struct dma_fence
*old_fence
= dma_resv_get_excl(obj
);
306 struct dma_resv_list
*old
;
309 dma_resv_assert_held(obj
);
311 old
= dma_resv_get_list(obj
);
313 i
= old
->shared_count
;
316 dma_fence_get(fence
);
319 write_seqcount_begin(&obj
->seq
);
320 /* write_seqcount_begin provides the necessary memory barrier */
321 RCU_INIT_POINTER(obj
->fence_excl
, fence
);
323 old
->shared_count
= 0;
324 write_seqcount_end(&obj
->seq
);
327 /* inplace update, no shared fences */
329 dma_fence_put(rcu_dereference_protected(old
->shared
[i
],
330 dma_resv_held(obj
)));
332 dma_fence_put(old_fence
);
334 EXPORT_SYMBOL(dma_resv_add_excl_fence
);
337 * dma_resv_copy_fences - Copy all fences from src to dst.
338 * @dst: the destination reservation object
339 * @src: the source reservation object
341 * Copy all fences from src to dst. dst-lock must be held.
343 int dma_resv_copy_fences(struct dma_resv
*dst
, struct dma_resv
*src
)
345 struct dma_resv_list
*src_list
, *dst_list
;
346 struct dma_fence
*old
, *new;
349 dma_resv_assert_held(dst
);
352 src_list
= rcu_dereference(src
->fence
);
356 unsigned shared_count
= src_list
->shared_count
;
360 dst_list
= dma_resv_list_alloc(shared_count
);
365 src_list
= rcu_dereference(src
->fence
);
366 if (!src_list
|| src_list
->shared_count
> shared_count
) {
371 dst_list
->shared_count
= 0;
372 for (i
= 0; i
< src_list
->shared_count
; ++i
) {
373 struct dma_fence
*fence
;
375 fence
= rcu_dereference(src_list
->shared
[i
]);
376 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT
,
380 if (!dma_fence_get_rcu(fence
)) {
381 dma_resv_list_free(dst_list
);
382 src_list
= rcu_dereference(src
->fence
);
386 if (dma_fence_is_signaled(fence
)) {
387 dma_fence_put(fence
);
391 rcu_assign_pointer(dst_list
->shared
[dst_list
->shared_count
++], fence
);
397 new = dma_fence_get_rcu_safe(&src
->fence_excl
);
400 src_list
= dma_resv_get_list(dst
);
401 old
= dma_resv_get_excl(dst
);
404 write_seqcount_begin(&dst
->seq
);
405 /* write_seqcount_begin provides the necessary memory barrier */
406 RCU_INIT_POINTER(dst
->fence_excl
, new);
407 RCU_INIT_POINTER(dst
->fence
, dst_list
);
408 write_seqcount_end(&dst
->seq
);
411 dma_resv_list_free(src_list
);
416 EXPORT_SYMBOL(dma_resv_copy_fences
);
419 * dma_resv_get_fences_rcu - Get an object's shared and exclusive
420 * fences without update side lock held
421 * @obj: the reservation object
422 * @pfence_excl: the returned exclusive fence (or NULL)
423 * @pshared_count: the number of shared fences returned
424 * @pshared: the array of shared fence ptrs returned (array is krealloc'd to
425 * the required size, and must be freed by caller)
427 * Retrieve all fences from the reservation object. If the pointer for the
428 * exclusive fence is not specified the fence is put into the array of the
429 * shared fences as well. Returns either zero or -ENOMEM.
431 int dma_resv_get_fences_rcu(struct dma_resv
*obj
,
432 struct dma_fence
**pfence_excl
,
433 unsigned *pshared_count
,
434 struct dma_fence
***pshared
)
436 struct dma_fence
**shared
= NULL
;
437 struct dma_fence
*fence_excl
;
438 unsigned int shared_count
;
442 struct dma_resv_list
*fobj
;
446 shared_count
= i
= 0;
449 seq
= read_seqcount_begin(&obj
->seq
);
451 fence_excl
= rcu_dereference(obj
->fence_excl
);
452 if (fence_excl
&& !dma_fence_get_rcu(fence_excl
))
455 fobj
= rcu_dereference(obj
->fence
);
457 sz
+= sizeof(*shared
) * fobj
->shared_max
;
459 if (!pfence_excl
&& fence_excl
)
460 sz
+= sizeof(*shared
);
463 struct dma_fence
**nshared
;
465 nshared
= krealloc(shared
, sz
,
466 GFP_NOWAIT
| __GFP_NOWARN
);
470 dma_fence_put(fence_excl
);
473 nshared
= krealloc(shared
, sz
, GFP_KERNEL
);
483 shared_count
= fobj
? fobj
->shared_count
: 0;
484 for (i
= 0; i
< shared_count
; ++i
) {
485 shared
[i
] = rcu_dereference(fobj
->shared
[i
]);
486 if (!dma_fence_get_rcu(shared
[i
]))
491 if (i
!= shared_count
|| read_seqcount_retry(&obj
->seq
, seq
)) {
493 dma_fence_put(shared
[i
]);
494 dma_fence_put(fence_excl
);
504 *pfence_excl
= fence_excl
;
506 shared
[shared_count
++] = fence_excl
;
513 *pshared_count
= shared_count
;
517 EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu
);
520 * dma_resv_wait_timeout_rcu - Wait on reservation's objects
521 * shared and/or exclusive fences.
522 * @obj: the reservation object
523 * @wait_all: if true, wait on all fences, else wait on just exclusive fence
524 * @intr: if true, do interruptible wait
525 * @timeout: timeout value in jiffies or zero to return immediately
528 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
529 * greater than zer on success.
531 long dma_resv_wait_timeout_rcu(struct dma_resv
*obj
,
532 bool wait_all
, bool intr
,
533 unsigned long timeout
)
535 struct dma_fence
*fence
;
536 unsigned seq
, shared_count
;
537 long ret
= timeout
? timeout
: 1;
542 seq
= read_seqcount_begin(&obj
->seq
);
546 fence
= rcu_dereference(obj
->fence_excl
);
547 if (fence
&& !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT
, &fence
->flags
)) {
548 if (!dma_fence_get_rcu(fence
))
551 if (dma_fence_is_signaled(fence
)) {
552 dma_fence_put(fence
);
561 struct dma_resv_list
*fobj
= rcu_dereference(obj
->fence
);
564 shared_count
= fobj
->shared_count
;
566 for (i
= 0; !fence
&& i
< shared_count
; ++i
) {
567 struct dma_fence
*lfence
= rcu_dereference(fobj
->shared
[i
]);
569 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT
,
573 if (!dma_fence_get_rcu(lfence
))
576 if (dma_fence_is_signaled(lfence
)) {
577 dma_fence_put(lfence
);
588 if (read_seqcount_retry(&obj
->seq
, seq
)) {
589 dma_fence_put(fence
);
593 ret
= dma_fence_wait_timeout(fence
, intr
, ret
);
594 dma_fence_put(fence
);
595 if (ret
> 0 && wait_all
&& (i
+ 1 < shared_count
))
604 EXPORT_SYMBOL_GPL(dma_resv_wait_timeout_rcu
);
607 static inline int dma_resv_test_signaled_single(struct dma_fence
*passed_fence
)
609 struct dma_fence
*fence
, *lfence
= passed_fence
;
612 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT
, &lfence
->flags
)) {
613 fence
= dma_fence_get_rcu(lfence
);
617 ret
= !!dma_fence_is_signaled(fence
);
618 dma_fence_put(fence
);
624 * dma_resv_test_signaled_rcu - Test if a reservation object's
625 * fences have been signaled.
626 * @obj: the reservation object
627 * @test_all: if true, test all fences, otherwise only test the exclusive
631 * true if all fences signaled, else false
633 bool dma_resv_test_signaled_rcu(struct dma_resv
*obj
, bool test_all
)
635 unsigned seq
, shared_count
;
642 seq
= read_seqcount_begin(&obj
->seq
);
647 struct dma_resv_list
*fobj
= rcu_dereference(obj
->fence
);
650 shared_count
= fobj
->shared_count
;
652 for (i
= 0; i
< shared_count
; ++i
) {
653 struct dma_fence
*fence
= rcu_dereference(fobj
->shared
[i
]);
655 ret
= dma_resv_test_signaled_single(fence
);
662 if (read_seqcount_retry(&obj
->seq
, seq
))
667 struct dma_fence
*fence_excl
= rcu_dereference(obj
->fence_excl
);
670 ret
= dma_resv_test_signaled_single(fence_excl
);
674 if (read_seqcount_retry(&obj
->seq
, seq
))
682 EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu
);