hwmon: (adt7475) fan stall prevention
[linux/fpc-iii.git] / drivers / md / dm-bio-prison-v2.c
blobc9b11f799cd8e7a32f370cfcd04d39bddbcbe2d2
1 /*
2 * Copyright (C) 2012-2017 Red Hat, Inc.
4 * This file is released under the GPL.
5 */
7 #include "dm.h"
8 #include "dm-bio-prison-v2.h"
10 #include <linux/spinlock.h>
11 #include <linux/mempool.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/rwsem.h>
16 /*----------------------------------------------------------------*/
18 #define MIN_CELLS 1024
20 struct dm_bio_prison_v2 {
21 struct workqueue_struct *wq;
23 spinlock_t lock;
24 mempool_t *cell_pool;
25 struct rb_root cells;
28 static struct kmem_cache *_cell_cache;
30 /*----------------------------------------------------------------*/
33 * @nr_cells should be the number of cells you want in use _concurrently_.
34 * Don't confuse it with the number of distinct keys.
36 struct dm_bio_prison_v2 *dm_bio_prison_create_v2(struct workqueue_struct *wq)
38 struct dm_bio_prison_v2 *prison = kmalloc(sizeof(*prison), GFP_KERNEL);
40 if (!prison)
41 return NULL;
43 prison->wq = wq;
44 spin_lock_init(&prison->lock);
46 prison->cell_pool = mempool_create_slab_pool(MIN_CELLS, _cell_cache);
47 if (!prison->cell_pool) {
48 kfree(prison);
49 return NULL;
52 prison->cells = RB_ROOT;
54 return prison;
56 EXPORT_SYMBOL_GPL(dm_bio_prison_create_v2);
58 void dm_bio_prison_destroy_v2(struct dm_bio_prison_v2 *prison)
60 mempool_destroy(prison->cell_pool);
61 kfree(prison);
63 EXPORT_SYMBOL_GPL(dm_bio_prison_destroy_v2);
65 struct dm_bio_prison_cell_v2 *dm_bio_prison_alloc_cell_v2(struct dm_bio_prison_v2 *prison, gfp_t gfp)
67 return mempool_alloc(prison->cell_pool, gfp);
69 EXPORT_SYMBOL_GPL(dm_bio_prison_alloc_cell_v2);
71 void dm_bio_prison_free_cell_v2(struct dm_bio_prison_v2 *prison,
72 struct dm_bio_prison_cell_v2 *cell)
74 mempool_free(cell, prison->cell_pool);
76 EXPORT_SYMBOL_GPL(dm_bio_prison_free_cell_v2);
78 static void __setup_new_cell(struct dm_cell_key_v2 *key,
79 struct dm_bio_prison_cell_v2 *cell)
81 memset(cell, 0, sizeof(*cell));
82 memcpy(&cell->key, key, sizeof(cell->key));
83 bio_list_init(&cell->bios);
86 static int cmp_keys(struct dm_cell_key_v2 *lhs,
87 struct dm_cell_key_v2 *rhs)
89 if (lhs->virtual < rhs->virtual)
90 return -1;
92 if (lhs->virtual > rhs->virtual)
93 return 1;
95 if (lhs->dev < rhs->dev)
96 return -1;
98 if (lhs->dev > rhs->dev)
99 return 1;
101 if (lhs->block_end <= rhs->block_begin)
102 return -1;
104 if (lhs->block_begin >= rhs->block_end)
105 return 1;
107 return 0;
111 * Returns true if node found, otherwise it inserts a new one.
113 static bool __find_or_insert(struct dm_bio_prison_v2 *prison,
114 struct dm_cell_key_v2 *key,
115 struct dm_bio_prison_cell_v2 *cell_prealloc,
116 struct dm_bio_prison_cell_v2 **result)
118 int r;
119 struct rb_node **new = &prison->cells.rb_node, *parent = NULL;
121 while (*new) {
122 struct dm_bio_prison_cell_v2 *cell =
123 container_of(*new, struct dm_bio_prison_cell_v2, node);
125 r = cmp_keys(key, &cell->key);
127 parent = *new;
128 if (r < 0)
129 new = &((*new)->rb_left);
131 else if (r > 0)
132 new = &((*new)->rb_right);
134 else {
135 *result = cell;
136 return true;
140 __setup_new_cell(key, cell_prealloc);
141 *result = cell_prealloc;
142 rb_link_node(&cell_prealloc->node, parent, new);
143 rb_insert_color(&cell_prealloc->node, &prison->cells);
145 return false;
148 static bool __get(struct dm_bio_prison_v2 *prison,
149 struct dm_cell_key_v2 *key,
150 unsigned lock_level,
151 struct bio *inmate,
152 struct dm_bio_prison_cell_v2 *cell_prealloc,
153 struct dm_bio_prison_cell_v2 **cell)
155 if (__find_or_insert(prison, key, cell_prealloc, cell)) {
156 if ((*cell)->exclusive_lock) {
157 if (lock_level <= (*cell)->exclusive_level) {
158 bio_list_add(&(*cell)->bios, inmate);
159 return false;
163 (*cell)->shared_count++;
165 } else
166 (*cell)->shared_count = 1;
168 return true;
171 bool dm_cell_get_v2(struct dm_bio_prison_v2 *prison,
172 struct dm_cell_key_v2 *key,
173 unsigned lock_level,
174 struct bio *inmate,
175 struct dm_bio_prison_cell_v2 *cell_prealloc,
176 struct dm_bio_prison_cell_v2 **cell_result)
178 int r;
179 unsigned long flags;
181 spin_lock_irqsave(&prison->lock, flags);
182 r = __get(prison, key, lock_level, inmate, cell_prealloc, cell_result);
183 spin_unlock_irqrestore(&prison->lock, flags);
185 return r;
187 EXPORT_SYMBOL_GPL(dm_cell_get_v2);
189 static bool __put(struct dm_bio_prison_v2 *prison,
190 struct dm_bio_prison_cell_v2 *cell)
192 BUG_ON(!cell->shared_count);
193 cell->shared_count--;
195 // FIXME: shared locks granted above the lock level could starve this
196 if (!cell->shared_count) {
197 if (cell->exclusive_lock){
198 if (cell->quiesce_continuation) {
199 queue_work(prison->wq, cell->quiesce_continuation);
200 cell->quiesce_continuation = NULL;
202 } else {
203 rb_erase(&cell->node, &prison->cells);
204 return true;
208 return false;
211 bool dm_cell_put_v2(struct dm_bio_prison_v2 *prison,
212 struct dm_bio_prison_cell_v2 *cell)
214 bool r;
215 unsigned long flags;
217 spin_lock_irqsave(&prison->lock, flags);
218 r = __put(prison, cell);
219 spin_unlock_irqrestore(&prison->lock, flags);
221 return r;
223 EXPORT_SYMBOL_GPL(dm_cell_put_v2);
225 static int __lock(struct dm_bio_prison_v2 *prison,
226 struct dm_cell_key_v2 *key,
227 unsigned lock_level,
228 struct dm_bio_prison_cell_v2 *cell_prealloc,
229 struct dm_bio_prison_cell_v2 **cell_result)
231 struct dm_bio_prison_cell_v2 *cell;
233 if (__find_or_insert(prison, key, cell_prealloc, &cell)) {
234 if (cell->exclusive_lock)
235 return -EBUSY;
237 cell->exclusive_lock = true;
238 cell->exclusive_level = lock_level;
239 *cell_result = cell;
241 // FIXME: we don't yet know what level these shared locks
242 // were taken at, so have to quiesce them all.
243 return cell->shared_count > 0;
245 } else {
246 cell = cell_prealloc;
247 cell->shared_count = 0;
248 cell->exclusive_lock = true;
249 cell->exclusive_level = lock_level;
250 *cell_result = cell;
253 return 0;
256 int dm_cell_lock_v2(struct dm_bio_prison_v2 *prison,
257 struct dm_cell_key_v2 *key,
258 unsigned lock_level,
259 struct dm_bio_prison_cell_v2 *cell_prealloc,
260 struct dm_bio_prison_cell_v2 **cell_result)
262 int r;
263 unsigned long flags;
265 spin_lock_irqsave(&prison->lock, flags);
266 r = __lock(prison, key, lock_level, cell_prealloc, cell_result);
267 spin_unlock_irqrestore(&prison->lock, flags);
269 return r;
271 EXPORT_SYMBOL_GPL(dm_cell_lock_v2);
273 static void __quiesce(struct dm_bio_prison_v2 *prison,
274 struct dm_bio_prison_cell_v2 *cell,
275 struct work_struct *continuation)
277 if (!cell->shared_count)
278 queue_work(prison->wq, continuation);
279 else
280 cell->quiesce_continuation = continuation;
283 void dm_cell_quiesce_v2(struct dm_bio_prison_v2 *prison,
284 struct dm_bio_prison_cell_v2 *cell,
285 struct work_struct *continuation)
287 unsigned long flags;
289 spin_lock_irqsave(&prison->lock, flags);
290 __quiesce(prison, cell, continuation);
291 spin_unlock_irqrestore(&prison->lock, flags);
293 EXPORT_SYMBOL_GPL(dm_cell_quiesce_v2);
295 static int __promote(struct dm_bio_prison_v2 *prison,
296 struct dm_bio_prison_cell_v2 *cell,
297 unsigned new_lock_level)
299 if (!cell->exclusive_lock)
300 return -EINVAL;
302 cell->exclusive_level = new_lock_level;
303 return cell->shared_count > 0;
306 int dm_cell_lock_promote_v2(struct dm_bio_prison_v2 *prison,
307 struct dm_bio_prison_cell_v2 *cell,
308 unsigned new_lock_level)
310 int r;
311 unsigned long flags;
313 spin_lock_irqsave(&prison->lock, flags);
314 r = __promote(prison, cell, new_lock_level);
315 spin_unlock_irqrestore(&prison->lock, flags);
317 return r;
319 EXPORT_SYMBOL_GPL(dm_cell_lock_promote_v2);
321 static bool __unlock(struct dm_bio_prison_v2 *prison,
322 struct dm_bio_prison_cell_v2 *cell,
323 struct bio_list *bios)
325 BUG_ON(!cell->exclusive_lock);
327 bio_list_merge(bios, &cell->bios);
328 bio_list_init(&cell->bios);
330 if (cell->shared_count) {
331 cell->exclusive_lock = 0;
332 return false;
335 rb_erase(&cell->node, &prison->cells);
336 return true;
339 bool dm_cell_unlock_v2(struct dm_bio_prison_v2 *prison,
340 struct dm_bio_prison_cell_v2 *cell,
341 struct bio_list *bios)
343 bool r;
344 unsigned long flags;
346 spin_lock_irqsave(&prison->lock, flags);
347 r = __unlock(prison, cell, bios);
348 spin_unlock_irqrestore(&prison->lock, flags);
350 return r;
352 EXPORT_SYMBOL_GPL(dm_cell_unlock_v2);
354 /*----------------------------------------------------------------*/
356 int __init dm_bio_prison_init_v2(void)
358 _cell_cache = KMEM_CACHE(dm_bio_prison_cell_v2, 0);
359 if (!_cell_cache)
360 return -ENOMEM;
362 return 0;
365 void dm_bio_prison_exit_v2(void)
367 kmem_cache_destroy(_cell_cache);
368 _cell_cache = NULL;