Print "(repairing)" in zpool status again
[zfs.git] / module / spl / spl-rwlock.c
blob4ffebc8ea646c12ce575c5065ed95539c98a8323
1 /*
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
24 * Solaris Porting Layer (SPL) Reader/Writer Lock Implementation.
27 #include <sys/rwlock.h>
28 #include <linux/module.h>
30 #if defined(CONFIG_PREEMPT_RT_FULL)
32 #include <linux/rtmutex.h>
33 #define RT_MUTEX_OWNER_MASKALL 1UL
35 static int
36 __rwsem_tryupgrade(struct rw_semaphore *rwsem)
38 #if defined(READER_BIAS) && defined(WRITER_BIAS)
40 * After the 4.9.20-rt16 kernel the realtime patch series lifted the
41 * single reader restriction. While this could be accommodated by
42 * adding additional compatibility code assume the rwsem can never
43 * be upgraded. All caller must already cleanly handle this case.
45 return (0);
46 #else
47 ASSERT((struct task_struct *)
48 ((unsigned long)rwsem->lock.owner & ~RT_MUTEX_OWNER_MASKALL) ==
49 current);
52 * Prior to 4.9.20-rt16 kernel the realtime patch series, rwsem is
53 * implemented as a single mutex held by readers and writers alike.
54 * However, this implementation would prevent a thread from taking
55 * a read lock twice, as the mutex would already be locked on
56 * the second attempt. Therefore the implementation allows a
57 * single thread to take a rwsem as read lock multiple times
58 * tracking that nesting as read_depth counter.
60 if (rwsem->read_depth <= 1) {
62 * In case, the current thread has not taken the lock
63 * more than once as read lock, we can allow an
64 * upgrade to a write lock. rwsem_rt.h implements
65 * write locks as read_depth == 0.
67 rwsem->read_depth = 0;
68 return (1);
70 return (0);
71 #endif
73 #elif defined(CONFIG_RWSEM_GENERIC_SPINLOCK)
74 static int
75 __rwsem_tryupgrade(struct rw_semaphore *rwsem)
77 int ret = 0;
78 unsigned long flags;
79 spl_rwsem_lock_irqsave(&rwsem->wait_lock, flags);
80 if (RWSEM_COUNT(rwsem) == SPL_RWSEM_SINGLE_READER_VALUE &&
81 list_empty(&rwsem->wait_list)) {
82 ret = 1;
83 RWSEM_COUNT(rwsem) = SPL_RWSEM_SINGLE_WRITER_VALUE;
85 spl_rwsem_unlock_irqrestore(&rwsem->wait_lock, flags);
86 return (ret);
88 #elif defined(HAVE_RWSEM_ATOMIC_LONG_COUNT)
89 static int
90 __rwsem_tryupgrade(struct rw_semaphore *rwsem)
92 long val;
93 val = atomic_long_cmpxchg(&rwsem->count, SPL_RWSEM_SINGLE_READER_VALUE,
94 SPL_RWSEM_SINGLE_WRITER_VALUE);
95 return (val == SPL_RWSEM_SINGLE_READER_VALUE);
97 #else
98 static int
99 __rwsem_tryupgrade(struct rw_semaphore *rwsem)
101 typeof(rwsem->count) val;
102 val = cmpxchg(&rwsem->count, SPL_RWSEM_SINGLE_READER_VALUE,
103 SPL_RWSEM_SINGLE_WRITER_VALUE);
104 return (val == SPL_RWSEM_SINGLE_READER_VALUE);
106 #endif
109 rwsem_tryupgrade(struct rw_semaphore *rwsem)
111 if (__rwsem_tryupgrade(rwsem)) {
112 rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
113 rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
114 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
115 rwsem->owner = current;
116 #endif
117 return (1);
119 return (0);
121 EXPORT_SYMBOL(rwsem_tryupgrade);
123 int spl_rw_init(void) { return 0; }
124 void spl_rw_fini(void) { }