2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
24 * Solaris Porting Layer (SPL) Credential Implementation.
27 #include <sys/condvar.h>
29 #include <linux/hrtimer.h>
30 #include <linux/compiler_compat.h>
33 __cv_init(kcondvar_t
*cvp
, char *name
, kcv_type_t type
, void *arg
)
37 ASSERT(type
== CV_DEFAULT
);
40 cvp
->cv_magic
= CV_MAGIC
;
41 init_waitqueue_head(&cvp
->cv_event
);
42 init_waitqueue_head(&cvp
->cv_destroy
);
43 atomic_set(&cvp
->cv_waiters
, 0);
44 atomic_set(&cvp
->cv_refs
, 1);
47 EXPORT_SYMBOL(__cv_init
);
50 cv_destroy_wakeup(kcondvar_t
*cvp
)
52 if (!atomic_read(&cvp
->cv_waiters
) && !atomic_read(&cvp
->cv_refs
)) {
53 ASSERT(cvp
->cv_mutex
== NULL
);
54 ASSERT(!waitqueue_active(&cvp
->cv_event
));
62 __cv_destroy(kcondvar_t
*cvp
)
65 ASSERT(cvp
->cv_magic
== CV_MAGIC
);
67 cvp
->cv_magic
= CV_DESTROY
;
68 atomic_dec(&cvp
->cv_refs
);
70 /* Block until all waiters are woken and references dropped. */
71 while (cv_destroy_wakeup(cvp
) == 0)
72 wait_event_timeout(cvp
->cv_destroy
, cv_destroy_wakeup(cvp
), 1);
74 ASSERT3P(cvp
->cv_mutex
, ==, NULL
);
75 ASSERT3S(atomic_read(&cvp
->cv_refs
), ==, 0);
76 ASSERT3S(atomic_read(&cvp
->cv_waiters
), ==, 0);
77 ASSERT3S(waitqueue_active(&cvp
->cv_event
), ==, 0);
79 EXPORT_SYMBOL(__cv_destroy
);
82 cv_wait_common(kcondvar_t
*cvp
, kmutex_t
*mp
, int state
, int io
)
89 ASSERT(cvp
->cv_magic
== CV_MAGIC
);
90 ASSERT(mutex_owned(mp
));
91 atomic_inc(&cvp
->cv_refs
);
93 m
= READ_ONCE(cvp
->cv_mutex
);
95 m
= xchg(&cvp
->cv_mutex
, mp
);
96 /* Ensure the same mutex is used by all callers */
97 ASSERT(m
== NULL
|| m
== mp
);
99 prepare_to_wait_exclusive(&cvp
->cv_event
, &wait
, state
);
100 atomic_inc(&cvp
->cv_waiters
);
103 * Mutex should be dropped after prepare_to_wait() this
104 * ensures we're linked in to the waiters list and avoids the
105 * race where 'cvp->cv_waiters > 0' but the list is empty.
113 /* No more waiters a different mutex could be used */
114 if (atomic_dec_and_test(&cvp
->cv_waiters
)) {
116 * This is set without any lock, so it's racy. But this is
117 * just for debug anyway, so make it best-effort
119 cvp
->cv_mutex
= NULL
;
120 wake_up(&cvp
->cv_destroy
);
123 finish_wait(&cvp
->cv_event
, &wait
);
124 atomic_dec(&cvp
->cv_refs
);
127 * Hold mutex after we release the cvp, otherwise we could dead lock
128 * with a thread holding the mutex and call cv_destroy.
134 __cv_wait(kcondvar_t
*cvp
, kmutex_t
*mp
)
136 cv_wait_common(cvp
, mp
, TASK_UNINTERRUPTIBLE
, 0);
138 EXPORT_SYMBOL(__cv_wait
);
141 __cv_wait_io(kcondvar_t
*cvp
, kmutex_t
*mp
)
143 cv_wait_common(cvp
, mp
, TASK_UNINTERRUPTIBLE
, 1);
145 EXPORT_SYMBOL(__cv_wait_io
);
148 __cv_wait_sig(kcondvar_t
*cvp
, kmutex_t
*mp
)
150 cv_wait_common(cvp
, mp
, TASK_INTERRUPTIBLE
, 0);
152 EXPORT_SYMBOL(__cv_wait_sig
);
154 #if defined(HAVE_IO_SCHEDULE_TIMEOUT)
155 #define spl_io_schedule_timeout(t) io_schedule_timeout(t)
158 __cv_wakeup(unsigned long data
)
160 wake_up_process((struct task_struct
*)data
);
164 spl_io_schedule_timeout(long time_left
)
166 long expire_time
= jiffies
+ time_left
;
167 struct timer_list timer
;
170 setup_timer(&timer
, __cv_wakeup
, (unsigned long)current
);
171 timer
.expires
= expire_time
;
176 del_timer_sync(&timer
);
177 time_left
= expire_time
- jiffies
;
179 return (time_left
< 0 ? 0 : time_left
);
184 * 'expire_time' argument is an absolute wall clock time in jiffies.
185 * Return value is time left (expire_time - now) or -1 if timeout occurred.
188 __cv_timedwait_common(kcondvar_t
*cvp
, kmutex_t
*mp
, clock_t expire_time
,
197 ASSERT(cvp
->cv_magic
== CV_MAGIC
);
198 ASSERT(mutex_owned(mp
));
200 /* XXX - Does not handle jiffie wrap properly */
201 time_left
= expire_time
- jiffies
;
205 atomic_inc(&cvp
->cv_refs
);
206 m
= READ_ONCE(cvp
->cv_mutex
);
208 m
= xchg(&cvp
->cv_mutex
, mp
);
209 /* Ensure the same mutex is used by all callers */
210 ASSERT(m
== NULL
|| m
== mp
);
212 prepare_to_wait_exclusive(&cvp
->cv_event
, &wait
, state
);
213 atomic_inc(&cvp
->cv_waiters
);
216 * Mutex should be dropped after prepare_to_wait() this
217 * ensures we're linked in to the waiters list and avoids the
218 * race where 'cvp->cv_waiters > 0' but the list is empty.
222 time_left
= spl_io_schedule_timeout(time_left
);
224 time_left
= schedule_timeout(time_left
);
226 /* No more waiters a different mutex could be used */
227 if (atomic_dec_and_test(&cvp
->cv_waiters
)) {
229 * This is set without any lock, so it's racy. But this is
230 * just for debug anyway, so make it best-effort
232 cvp
->cv_mutex
= NULL
;
233 wake_up(&cvp
->cv_destroy
);
236 finish_wait(&cvp
->cv_event
, &wait
);
237 atomic_dec(&cvp
->cv_refs
);
240 * Hold mutex after we release the cvp, otherwise we could dead lock
241 * with a thread holding the mutex and call cv_destroy.
244 return (time_left
> 0 ? time_left
: -1);
248 __cv_timedwait(kcondvar_t
*cvp
, kmutex_t
*mp
, clock_t exp_time
)
250 return (__cv_timedwait_common(cvp
, mp
, exp_time
,
251 TASK_UNINTERRUPTIBLE
, 0));
253 EXPORT_SYMBOL(__cv_timedwait
);
256 __cv_timedwait_io(kcondvar_t
*cvp
, kmutex_t
*mp
, clock_t exp_time
)
258 return (__cv_timedwait_common(cvp
, mp
, exp_time
,
259 TASK_UNINTERRUPTIBLE
, 1));
261 EXPORT_SYMBOL(__cv_timedwait_io
);
264 __cv_timedwait_sig(kcondvar_t
*cvp
, kmutex_t
*mp
, clock_t exp_time
)
266 return (__cv_timedwait_common(cvp
, mp
, exp_time
,
267 TASK_INTERRUPTIBLE
, 0));
269 EXPORT_SYMBOL(__cv_timedwait_sig
);
272 * 'expire_time' argument is an absolute clock time in nanoseconds.
273 * Return value is time left (expire_time - now) or -1 if timeout occurred.
276 __cv_timedwait_hires(kcondvar_t
*cvp
, kmutex_t
*mp
, hrtime_t expire_time
,
286 ASSERT(cvp
->cv_magic
== CV_MAGIC
);
287 ASSERT(mutex_owned(mp
));
289 time_left
= expire_time
- gethrtime();
293 atomic_inc(&cvp
->cv_refs
);
294 m
= READ_ONCE(cvp
->cv_mutex
);
296 m
= xchg(&cvp
->cv_mutex
, mp
);
297 /* Ensure the same mutex is used by all callers */
298 ASSERT(m
== NULL
|| m
== mp
);
300 prepare_to_wait_exclusive(&cvp
->cv_event
, &wait
, state
);
301 atomic_inc(&cvp
->cv_waiters
);
304 * Mutex should be dropped after prepare_to_wait() this
305 * ensures we're linked in to the waiters list and avoids the
306 * race where 'cvp->cv_waiters > 0' but the list is empty.
310 * Allow a 100 us range to give kernel an opportunity to coalesce
313 ktime_left
= ktime_set(0, time_left
);
314 schedule_hrtimeout_range(&ktime_left
, 100 * NSEC_PER_USEC
,
317 /* No more waiters a different mutex could be used */
318 if (atomic_dec_and_test(&cvp
->cv_waiters
)) {
320 * This is set without any lock, so it's racy. But this is
321 * just for debug anyway, so make it best-effort
323 cvp
->cv_mutex
= NULL
;
324 wake_up(&cvp
->cv_destroy
);
327 finish_wait(&cvp
->cv_event
, &wait
);
328 atomic_dec(&cvp
->cv_refs
);
331 time_left
= expire_time
- gethrtime();
332 return (time_left
> 0 ? NSEC_TO_TICK(time_left
) : -1);
336 * Compatibility wrapper for the cv_timedwait_hires() Illumos interface.
339 cv_timedwait_hires_common(kcondvar_t
*cvp
, kmutex_t
*mp
, hrtime_t tim
,
340 hrtime_t res
, int flag
, int state
)
344 * Align expiration to the specified resolution.
346 if (flag
& CALLOUT_FLAG_ROUNDUP
)
348 tim
= (tim
/ res
) * res
;
351 if (!(flag
& CALLOUT_FLAG_ABSOLUTE
))
354 return (__cv_timedwait_hires(cvp
, mp
, tim
, state
));
358 cv_timedwait_hires(kcondvar_t
*cvp
, kmutex_t
*mp
, hrtime_t tim
, hrtime_t res
,
361 return (cv_timedwait_hires_common(cvp
, mp
, tim
, res
, flag
,
362 TASK_UNINTERRUPTIBLE
));
364 EXPORT_SYMBOL(cv_timedwait_hires
);
367 cv_timedwait_sig_hires(kcondvar_t
*cvp
, kmutex_t
*mp
, hrtime_t tim
,
368 hrtime_t res
, int flag
)
370 return (cv_timedwait_hires_common(cvp
, mp
, tim
, res
, flag
,
371 TASK_INTERRUPTIBLE
));
373 EXPORT_SYMBOL(cv_timedwait_sig_hires
);
376 __cv_signal(kcondvar_t
*cvp
)
379 ASSERT(cvp
->cv_magic
== CV_MAGIC
);
380 atomic_inc(&cvp
->cv_refs
);
383 * All waiters are added with WQ_FLAG_EXCLUSIVE so only one
384 * waiter will be set runable with each call to wake_up().
385 * Additionally wake_up() holds a spin_lock assoicated with
386 * the wait queue to ensure we don't race waking up processes.
388 if (atomic_read(&cvp
->cv_waiters
) > 0)
389 wake_up(&cvp
->cv_event
);
391 atomic_dec(&cvp
->cv_refs
);
393 EXPORT_SYMBOL(__cv_signal
);
396 __cv_broadcast(kcondvar_t
*cvp
)
399 ASSERT(cvp
->cv_magic
== CV_MAGIC
);
400 atomic_inc(&cvp
->cv_refs
);
403 * Wake_up_all() will wake up all waiters even those which
404 * have the WQ_FLAG_EXCLUSIVE flag set.
406 if (atomic_read(&cvp
->cv_waiters
) > 0)
407 wake_up_all(&cvp
->cv_event
);
409 atomic_dec(&cvp
->cv_refs
);
411 EXPORT_SYMBOL(__cv_broadcast
);