Define timestruc_t for Lustre compatibility
[zfs.git] / module / spl / spl-condvar.c
blob1e6e38b7874b1dd2683dc5dfaf56a4fcef02b7f9
1 /*
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
24 * Solaris Porting Layer (SPL) Credential Implementation.
27 #include <sys/condvar.h>
28 #include <sys/time.h>
29 #include <linux/hrtimer.h>
30 #include <linux/compiler_compat.h>
32 void
33 __cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
35 ASSERT(cvp);
36 ASSERT(name == NULL);
37 ASSERT(type == CV_DEFAULT);
38 ASSERT(arg == NULL);
40 cvp->cv_magic = CV_MAGIC;
41 init_waitqueue_head(&cvp->cv_event);
42 init_waitqueue_head(&cvp->cv_destroy);
43 atomic_set(&cvp->cv_waiters, 0);
44 atomic_set(&cvp->cv_refs, 1);
45 cvp->cv_mutex = NULL;
47 EXPORT_SYMBOL(__cv_init);
49 static int
50 cv_destroy_wakeup(kcondvar_t *cvp)
52 if (!atomic_read(&cvp->cv_waiters) && !atomic_read(&cvp->cv_refs)) {
53 ASSERT(cvp->cv_mutex == NULL);
54 ASSERT(!waitqueue_active(&cvp->cv_event));
55 return (1);
58 return (0);
61 void
62 __cv_destroy(kcondvar_t *cvp)
64 ASSERT(cvp);
65 ASSERT(cvp->cv_magic == CV_MAGIC);
67 cvp->cv_magic = CV_DESTROY;
68 atomic_dec(&cvp->cv_refs);
70 /* Block until all waiters are woken and references dropped. */
71 while (cv_destroy_wakeup(cvp) == 0)
72 wait_event_timeout(cvp->cv_destroy, cv_destroy_wakeup(cvp), 1);
74 ASSERT3P(cvp->cv_mutex, ==, NULL);
75 ASSERT3S(atomic_read(&cvp->cv_refs), ==, 0);
76 ASSERT3S(atomic_read(&cvp->cv_waiters), ==, 0);
77 ASSERT3S(waitqueue_active(&cvp->cv_event), ==, 0);
79 EXPORT_SYMBOL(__cv_destroy);
81 static void
82 cv_wait_common(kcondvar_t *cvp, kmutex_t *mp, int state, int io)
84 DEFINE_WAIT(wait);
85 kmutex_t *m;
87 ASSERT(cvp);
88 ASSERT(mp);
89 ASSERT(cvp->cv_magic == CV_MAGIC);
90 ASSERT(mutex_owned(mp));
91 atomic_inc(&cvp->cv_refs);
93 m = READ_ONCE(cvp->cv_mutex);
94 if (!m)
95 m = xchg(&cvp->cv_mutex, mp);
96 /* Ensure the same mutex is used by all callers */
97 ASSERT(m == NULL || m == mp);
99 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
100 atomic_inc(&cvp->cv_waiters);
103 * Mutex should be dropped after prepare_to_wait() this
104 * ensures we're linked in to the waiters list and avoids the
105 * race where 'cvp->cv_waiters > 0' but the list is empty.
107 mutex_exit(mp);
108 if (io)
109 io_schedule();
110 else
111 schedule();
113 /* No more waiters a different mutex could be used */
114 if (atomic_dec_and_test(&cvp->cv_waiters)) {
116 * This is set without any lock, so it's racy. But this is
117 * just for debug anyway, so make it best-effort
119 cvp->cv_mutex = NULL;
120 wake_up(&cvp->cv_destroy);
123 finish_wait(&cvp->cv_event, &wait);
124 atomic_dec(&cvp->cv_refs);
127 * Hold mutex after we release the cvp, otherwise we could dead lock
128 * with a thread holding the mutex and call cv_destroy.
130 mutex_enter(mp);
133 void
134 __cv_wait(kcondvar_t *cvp, kmutex_t *mp)
136 cv_wait_common(cvp, mp, TASK_UNINTERRUPTIBLE, 0);
138 EXPORT_SYMBOL(__cv_wait);
140 void
141 __cv_wait_io(kcondvar_t *cvp, kmutex_t *mp)
143 cv_wait_common(cvp, mp, TASK_UNINTERRUPTIBLE, 1);
145 EXPORT_SYMBOL(__cv_wait_io);
147 void
148 __cv_wait_sig(kcondvar_t *cvp, kmutex_t *mp)
150 cv_wait_common(cvp, mp, TASK_INTERRUPTIBLE, 0);
152 EXPORT_SYMBOL(__cv_wait_sig);
154 #if defined(HAVE_IO_SCHEDULE_TIMEOUT)
155 #define spl_io_schedule_timeout(t) io_schedule_timeout(t)
156 #else
157 static void
158 __cv_wakeup(unsigned long data)
160 wake_up_process((struct task_struct *)data);
163 static long
164 spl_io_schedule_timeout(long time_left)
166 long expire_time = jiffies + time_left;
167 struct timer_list timer;
169 init_timer(&timer);
170 setup_timer(&timer, __cv_wakeup, (unsigned long)current);
171 timer.expires = expire_time;
172 add_timer(&timer);
174 io_schedule();
176 del_timer_sync(&timer);
177 time_left = expire_time - jiffies;
179 return (time_left < 0 ? 0 : time_left);
181 #endif
184 * 'expire_time' argument is an absolute wall clock time in jiffies.
185 * Return value is time left (expire_time - now) or -1 if timeout occurred.
187 static clock_t
188 __cv_timedwait_common(kcondvar_t *cvp, kmutex_t *mp, clock_t expire_time,
189 int state, int io)
191 DEFINE_WAIT(wait);
192 kmutex_t *m;
193 clock_t time_left;
195 ASSERT(cvp);
196 ASSERT(mp);
197 ASSERT(cvp->cv_magic == CV_MAGIC);
198 ASSERT(mutex_owned(mp));
200 /* XXX - Does not handle jiffie wrap properly */
201 time_left = expire_time - jiffies;
202 if (time_left <= 0)
203 return (-1);
205 atomic_inc(&cvp->cv_refs);
206 m = READ_ONCE(cvp->cv_mutex);
207 if (!m)
208 m = xchg(&cvp->cv_mutex, mp);
209 /* Ensure the same mutex is used by all callers */
210 ASSERT(m == NULL || m == mp);
212 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
213 atomic_inc(&cvp->cv_waiters);
216 * Mutex should be dropped after prepare_to_wait() this
217 * ensures we're linked in to the waiters list and avoids the
218 * race where 'cvp->cv_waiters > 0' but the list is empty.
220 mutex_exit(mp);
221 if (io)
222 time_left = spl_io_schedule_timeout(time_left);
223 else
224 time_left = schedule_timeout(time_left);
226 /* No more waiters a different mutex could be used */
227 if (atomic_dec_and_test(&cvp->cv_waiters)) {
229 * This is set without any lock, so it's racy. But this is
230 * just for debug anyway, so make it best-effort
232 cvp->cv_mutex = NULL;
233 wake_up(&cvp->cv_destroy);
236 finish_wait(&cvp->cv_event, &wait);
237 atomic_dec(&cvp->cv_refs);
240 * Hold mutex after we release the cvp, otherwise we could dead lock
241 * with a thread holding the mutex and call cv_destroy.
243 mutex_enter(mp);
244 return (time_left > 0 ? time_left : -1);
247 clock_t
248 __cv_timedwait(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
250 return (__cv_timedwait_common(cvp, mp, exp_time,
251 TASK_UNINTERRUPTIBLE, 0));
253 EXPORT_SYMBOL(__cv_timedwait);
255 clock_t
256 __cv_timedwait_io(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
258 return (__cv_timedwait_common(cvp, mp, exp_time,
259 TASK_UNINTERRUPTIBLE, 1));
261 EXPORT_SYMBOL(__cv_timedwait_io);
263 clock_t
264 __cv_timedwait_sig(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
266 return (__cv_timedwait_common(cvp, mp, exp_time,
267 TASK_INTERRUPTIBLE, 0));
269 EXPORT_SYMBOL(__cv_timedwait_sig);
272 * 'expire_time' argument is an absolute clock time in nanoseconds.
273 * Return value is time left (expire_time - now) or -1 if timeout occurred.
275 static clock_t
276 __cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t expire_time,
277 int state)
279 DEFINE_WAIT(wait);
280 kmutex_t *m;
281 hrtime_t time_left;
282 ktime_t ktime_left;
284 ASSERT(cvp);
285 ASSERT(mp);
286 ASSERT(cvp->cv_magic == CV_MAGIC);
287 ASSERT(mutex_owned(mp));
289 time_left = expire_time - gethrtime();
290 if (time_left <= 0)
291 return (-1);
293 atomic_inc(&cvp->cv_refs);
294 m = READ_ONCE(cvp->cv_mutex);
295 if (!m)
296 m = xchg(&cvp->cv_mutex, mp);
297 /* Ensure the same mutex is used by all callers */
298 ASSERT(m == NULL || m == mp);
300 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
301 atomic_inc(&cvp->cv_waiters);
304 * Mutex should be dropped after prepare_to_wait() this
305 * ensures we're linked in to the waiters list and avoids the
306 * race where 'cvp->cv_waiters > 0' but the list is empty.
308 mutex_exit(mp);
310 * Allow a 100 us range to give kernel an opportunity to coalesce
311 * interrupts
313 ktime_left = ktime_set(0, time_left);
314 schedule_hrtimeout_range(&ktime_left, 100 * NSEC_PER_USEC,
315 HRTIMER_MODE_REL);
317 /* No more waiters a different mutex could be used */
318 if (atomic_dec_and_test(&cvp->cv_waiters)) {
320 * This is set without any lock, so it's racy. But this is
321 * just for debug anyway, so make it best-effort
323 cvp->cv_mutex = NULL;
324 wake_up(&cvp->cv_destroy);
327 finish_wait(&cvp->cv_event, &wait);
328 atomic_dec(&cvp->cv_refs);
330 mutex_enter(mp);
331 time_left = expire_time - gethrtime();
332 return (time_left > 0 ? NSEC_TO_TICK(time_left) : -1);
336 * Compatibility wrapper for the cv_timedwait_hires() Illumos interface.
338 static clock_t
339 cv_timedwait_hires_common(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim,
340 hrtime_t res, int flag, int state)
342 if (res > 1) {
344 * Align expiration to the specified resolution.
346 if (flag & CALLOUT_FLAG_ROUNDUP)
347 tim += res - 1;
348 tim = (tim / res) * res;
351 if (!(flag & CALLOUT_FLAG_ABSOLUTE))
352 tim += gethrtime();
354 return (__cv_timedwait_hires(cvp, mp, tim, state));
357 clock_t
358 cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim, hrtime_t res,
359 int flag)
361 return (cv_timedwait_hires_common(cvp, mp, tim, res, flag,
362 TASK_UNINTERRUPTIBLE));
364 EXPORT_SYMBOL(cv_timedwait_hires);
366 clock_t
367 cv_timedwait_sig_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim,
368 hrtime_t res, int flag)
370 return (cv_timedwait_hires_common(cvp, mp, tim, res, flag,
371 TASK_INTERRUPTIBLE));
373 EXPORT_SYMBOL(cv_timedwait_sig_hires);
375 void
376 __cv_signal(kcondvar_t *cvp)
378 ASSERT(cvp);
379 ASSERT(cvp->cv_magic == CV_MAGIC);
380 atomic_inc(&cvp->cv_refs);
383 * All waiters are added with WQ_FLAG_EXCLUSIVE so only one
384 * waiter will be set runable with each call to wake_up().
385 * Additionally wake_up() holds a spin_lock assoicated with
386 * the wait queue to ensure we don't race waking up processes.
388 if (atomic_read(&cvp->cv_waiters) > 0)
389 wake_up(&cvp->cv_event);
391 atomic_dec(&cvp->cv_refs);
393 EXPORT_SYMBOL(__cv_signal);
395 void
396 __cv_broadcast(kcondvar_t *cvp)
398 ASSERT(cvp);
399 ASSERT(cvp->cv_magic == CV_MAGIC);
400 atomic_inc(&cvp->cv_refs);
403 * Wake_up_all() will wake up all waiters even those which
404 * have the WQ_FLAG_EXCLUSIVE flag set.
406 if (atomic_read(&cvp->cv_waiters) > 0)
407 wake_up_all(&cvp->cv_event);
409 atomic_dec(&cvp->cv_refs);
411 EXPORT_SYMBOL(__cv_broadcast);