Linux 4.19.133
[linux/fpc-iii.git] / drivers / md / dm-bio-prison-v2.c
blobb092cdc8e1ae3405610cb07f95234bb4040b2d2e
1 /*
2 * Copyright (C) 2012-2017 Red Hat, Inc.
4 * This file is released under the GPL.
5 */
7 #include "dm.h"
8 #include "dm-bio-prison-v2.h"
10 #include <linux/spinlock.h>
11 #include <linux/mempool.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/rwsem.h>
16 /*----------------------------------------------------------------*/
18 #define MIN_CELLS 1024
20 struct dm_bio_prison_v2 {
21 struct workqueue_struct *wq;
23 spinlock_t lock;
24 struct rb_root cells;
25 mempool_t cell_pool;
28 static struct kmem_cache *_cell_cache;
30 /*----------------------------------------------------------------*/
33 * @nr_cells should be the number of cells you want in use _concurrently_.
34 * Don't confuse it with the number of distinct keys.
36 struct dm_bio_prison_v2 *dm_bio_prison_create_v2(struct workqueue_struct *wq)
38 struct dm_bio_prison_v2 *prison = kzalloc(sizeof(*prison), GFP_KERNEL);
39 int ret;
41 if (!prison)
42 return NULL;
44 prison->wq = wq;
45 spin_lock_init(&prison->lock);
47 ret = mempool_init_slab_pool(&prison->cell_pool, MIN_CELLS, _cell_cache);
48 if (ret) {
49 kfree(prison);
50 return NULL;
53 prison->cells = RB_ROOT;
55 return prison;
57 EXPORT_SYMBOL_GPL(dm_bio_prison_create_v2);
59 void dm_bio_prison_destroy_v2(struct dm_bio_prison_v2 *prison)
61 mempool_exit(&prison->cell_pool);
62 kfree(prison);
64 EXPORT_SYMBOL_GPL(dm_bio_prison_destroy_v2);
66 struct dm_bio_prison_cell_v2 *dm_bio_prison_alloc_cell_v2(struct dm_bio_prison_v2 *prison, gfp_t gfp)
68 return mempool_alloc(&prison->cell_pool, gfp);
70 EXPORT_SYMBOL_GPL(dm_bio_prison_alloc_cell_v2);
72 void dm_bio_prison_free_cell_v2(struct dm_bio_prison_v2 *prison,
73 struct dm_bio_prison_cell_v2 *cell)
75 mempool_free(cell, &prison->cell_pool);
77 EXPORT_SYMBOL_GPL(dm_bio_prison_free_cell_v2);
79 static void __setup_new_cell(struct dm_cell_key_v2 *key,
80 struct dm_bio_prison_cell_v2 *cell)
82 memset(cell, 0, sizeof(*cell));
83 memcpy(&cell->key, key, sizeof(cell->key));
84 bio_list_init(&cell->bios);
87 static int cmp_keys(struct dm_cell_key_v2 *lhs,
88 struct dm_cell_key_v2 *rhs)
90 if (lhs->virtual < rhs->virtual)
91 return -1;
93 if (lhs->virtual > rhs->virtual)
94 return 1;
96 if (lhs->dev < rhs->dev)
97 return -1;
99 if (lhs->dev > rhs->dev)
100 return 1;
102 if (lhs->block_end <= rhs->block_begin)
103 return -1;
105 if (lhs->block_begin >= rhs->block_end)
106 return 1;
108 return 0;
112 * Returns true if node found, otherwise it inserts a new one.
114 static bool __find_or_insert(struct dm_bio_prison_v2 *prison,
115 struct dm_cell_key_v2 *key,
116 struct dm_bio_prison_cell_v2 *cell_prealloc,
117 struct dm_bio_prison_cell_v2 **result)
119 int r;
120 struct rb_node **new = &prison->cells.rb_node, *parent = NULL;
122 while (*new) {
123 struct dm_bio_prison_cell_v2 *cell =
124 rb_entry(*new, struct dm_bio_prison_cell_v2, node);
126 r = cmp_keys(key, &cell->key);
128 parent = *new;
129 if (r < 0)
130 new = &((*new)->rb_left);
132 else if (r > 0)
133 new = &((*new)->rb_right);
135 else {
136 *result = cell;
137 return true;
141 __setup_new_cell(key, cell_prealloc);
142 *result = cell_prealloc;
143 rb_link_node(&cell_prealloc->node, parent, new);
144 rb_insert_color(&cell_prealloc->node, &prison->cells);
146 return false;
149 static bool __get(struct dm_bio_prison_v2 *prison,
150 struct dm_cell_key_v2 *key,
151 unsigned lock_level,
152 struct bio *inmate,
153 struct dm_bio_prison_cell_v2 *cell_prealloc,
154 struct dm_bio_prison_cell_v2 **cell)
156 if (__find_or_insert(prison, key, cell_prealloc, cell)) {
157 if ((*cell)->exclusive_lock) {
158 if (lock_level <= (*cell)->exclusive_level) {
159 bio_list_add(&(*cell)->bios, inmate);
160 return false;
164 (*cell)->shared_count++;
166 } else
167 (*cell)->shared_count = 1;
169 return true;
172 bool dm_cell_get_v2(struct dm_bio_prison_v2 *prison,
173 struct dm_cell_key_v2 *key,
174 unsigned lock_level,
175 struct bio *inmate,
176 struct dm_bio_prison_cell_v2 *cell_prealloc,
177 struct dm_bio_prison_cell_v2 **cell_result)
179 int r;
180 unsigned long flags;
182 spin_lock_irqsave(&prison->lock, flags);
183 r = __get(prison, key, lock_level, inmate, cell_prealloc, cell_result);
184 spin_unlock_irqrestore(&prison->lock, flags);
186 return r;
188 EXPORT_SYMBOL_GPL(dm_cell_get_v2);
190 static bool __put(struct dm_bio_prison_v2 *prison,
191 struct dm_bio_prison_cell_v2 *cell)
193 BUG_ON(!cell->shared_count);
194 cell->shared_count--;
196 // FIXME: shared locks granted above the lock level could starve this
197 if (!cell->shared_count) {
198 if (cell->exclusive_lock){
199 if (cell->quiesce_continuation) {
200 queue_work(prison->wq, cell->quiesce_continuation);
201 cell->quiesce_continuation = NULL;
203 } else {
204 rb_erase(&cell->node, &prison->cells);
205 return true;
209 return false;
212 bool dm_cell_put_v2(struct dm_bio_prison_v2 *prison,
213 struct dm_bio_prison_cell_v2 *cell)
215 bool r;
216 unsigned long flags;
218 spin_lock_irqsave(&prison->lock, flags);
219 r = __put(prison, cell);
220 spin_unlock_irqrestore(&prison->lock, flags);
222 return r;
224 EXPORT_SYMBOL_GPL(dm_cell_put_v2);
226 static int __lock(struct dm_bio_prison_v2 *prison,
227 struct dm_cell_key_v2 *key,
228 unsigned lock_level,
229 struct dm_bio_prison_cell_v2 *cell_prealloc,
230 struct dm_bio_prison_cell_v2 **cell_result)
232 struct dm_bio_prison_cell_v2 *cell;
234 if (__find_or_insert(prison, key, cell_prealloc, &cell)) {
235 if (cell->exclusive_lock)
236 return -EBUSY;
238 cell->exclusive_lock = true;
239 cell->exclusive_level = lock_level;
240 *cell_result = cell;
242 // FIXME: we don't yet know what level these shared locks
243 // were taken at, so have to quiesce them all.
244 return cell->shared_count > 0;
246 } else {
247 cell = cell_prealloc;
248 cell->shared_count = 0;
249 cell->exclusive_lock = true;
250 cell->exclusive_level = lock_level;
251 *cell_result = cell;
254 return 0;
257 int dm_cell_lock_v2(struct dm_bio_prison_v2 *prison,
258 struct dm_cell_key_v2 *key,
259 unsigned lock_level,
260 struct dm_bio_prison_cell_v2 *cell_prealloc,
261 struct dm_bio_prison_cell_v2 **cell_result)
263 int r;
264 unsigned long flags;
266 spin_lock_irqsave(&prison->lock, flags);
267 r = __lock(prison, key, lock_level, cell_prealloc, cell_result);
268 spin_unlock_irqrestore(&prison->lock, flags);
270 return r;
272 EXPORT_SYMBOL_GPL(dm_cell_lock_v2);
274 static void __quiesce(struct dm_bio_prison_v2 *prison,
275 struct dm_bio_prison_cell_v2 *cell,
276 struct work_struct *continuation)
278 if (!cell->shared_count)
279 queue_work(prison->wq, continuation);
280 else
281 cell->quiesce_continuation = continuation;
284 void dm_cell_quiesce_v2(struct dm_bio_prison_v2 *prison,
285 struct dm_bio_prison_cell_v2 *cell,
286 struct work_struct *continuation)
288 unsigned long flags;
290 spin_lock_irqsave(&prison->lock, flags);
291 __quiesce(prison, cell, continuation);
292 spin_unlock_irqrestore(&prison->lock, flags);
294 EXPORT_SYMBOL_GPL(dm_cell_quiesce_v2);
296 static int __promote(struct dm_bio_prison_v2 *prison,
297 struct dm_bio_prison_cell_v2 *cell,
298 unsigned new_lock_level)
300 if (!cell->exclusive_lock)
301 return -EINVAL;
303 cell->exclusive_level = new_lock_level;
304 return cell->shared_count > 0;
307 int dm_cell_lock_promote_v2(struct dm_bio_prison_v2 *prison,
308 struct dm_bio_prison_cell_v2 *cell,
309 unsigned new_lock_level)
311 int r;
312 unsigned long flags;
314 spin_lock_irqsave(&prison->lock, flags);
315 r = __promote(prison, cell, new_lock_level);
316 spin_unlock_irqrestore(&prison->lock, flags);
318 return r;
320 EXPORT_SYMBOL_GPL(dm_cell_lock_promote_v2);
322 static bool __unlock(struct dm_bio_prison_v2 *prison,
323 struct dm_bio_prison_cell_v2 *cell,
324 struct bio_list *bios)
326 BUG_ON(!cell->exclusive_lock);
328 bio_list_merge(bios, &cell->bios);
329 bio_list_init(&cell->bios);
331 if (cell->shared_count) {
332 cell->exclusive_lock = 0;
333 return false;
336 rb_erase(&cell->node, &prison->cells);
337 return true;
340 bool dm_cell_unlock_v2(struct dm_bio_prison_v2 *prison,
341 struct dm_bio_prison_cell_v2 *cell,
342 struct bio_list *bios)
344 bool r;
345 unsigned long flags;
347 spin_lock_irqsave(&prison->lock, flags);
348 r = __unlock(prison, cell, bios);
349 spin_unlock_irqrestore(&prison->lock, flags);
351 return r;
353 EXPORT_SYMBOL_GPL(dm_cell_unlock_v2);
355 /*----------------------------------------------------------------*/
357 int __init dm_bio_prison_init_v2(void)
359 _cell_cache = KMEM_CACHE(dm_bio_prison_cell_v2, 0);
360 if (!_cell_cache)
361 return -ENOMEM;
363 return 0;
366 void dm_bio_prison_exit_v2(void)
368 kmem_cache_destroy(_cell_cache);
369 _cell_cache = NULL;