2 * Copyright (C) 2012-2017 Red Hat, Inc.
4 * This file is released under the GPL.
8 #include "dm-bio-prison-v2.h"
10 #include <linux/spinlock.h>
11 #include <linux/mempool.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/rwsem.h>
16 /*----------------------------------------------------------------*/
18 #define MIN_CELLS 1024
20 struct dm_bio_prison_v2
{
21 struct workqueue_struct
*wq
;
28 static struct kmem_cache
*_cell_cache
;
30 /*----------------------------------------------------------------*/
33 * @nr_cells should be the number of cells you want in use _concurrently_.
34 * Don't confuse it with the number of distinct keys.
36 struct dm_bio_prison_v2
*dm_bio_prison_create_v2(struct workqueue_struct
*wq
)
38 struct dm_bio_prison_v2
*prison
= kzalloc(sizeof(*prison
), GFP_KERNEL
);
45 spin_lock_init(&prison
->lock
);
47 ret
= mempool_init_slab_pool(&prison
->cell_pool
, MIN_CELLS
, _cell_cache
);
53 prison
->cells
= RB_ROOT
;
57 EXPORT_SYMBOL_GPL(dm_bio_prison_create_v2
);
59 void dm_bio_prison_destroy_v2(struct dm_bio_prison_v2
*prison
)
61 mempool_exit(&prison
->cell_pool
);
64 EXPORT_SYMBOL_GPL(dm_bio_prison_destroy_v2
);
66 struct dm_bio_prison_cell_v2
*dm_bio_prison_alloc_cell_v2(struct dm_bio_prison_v2
*prison
, gfp_t gfp
)
68 return mempool_alloc(&prison
->cell_pool
, gfp
);
70 EXPORT_SYMBOL_GPL(dm_bio_prison_alloc_cell_v2
);
72 void dm_bio_prison_free_cell_v2(struct dm_bio_prison_v2
*prison
,
73 struct dm_bio_prison_cell_v2
*cell
)
75 mempool_free(cell
, &prison
->cell_pool
);
77 EXPORT_SYMBOL_GPL(dm_bio_prison_free_cell_v2
);
79 static void __setup_new_cell(struct dm_cell_key_v2
*key
,
80 struct dm_bio_prison_cell_v2
*cell
)
82 memset(cell
, 0, sizeof(*cell
));
83 memcpy(&cell
->key
, key
, sizeof(cell
->key
));
84 bio_list_init(&cell
->bios
);
87 static int cmp_keys(struct dm_cell_key_v2
*lhs
,
88 struct dm_cell_key_v2
*rhs
)
90 if (lhs
->virtual < rhs
->virtual)
93 if (lhs
->virtual > rhs
->virtual)
96 if (lhs
->dev
< rhs
->dev
)
99 if (lhs
->dev
> rhs
->dev
)
102 if (lhs
->block_end
<= rhs
->block_begin
)
105 if (lhs
->block_begin
>= rhs
->block_end
)
112 * Returns true if node found, otherwise it inserts a new one.
114 static bool __find_or_insert(struct dm_bio_prison_v2
*prison
,
115 struct dm_cell_key_v2
*key
,
116 struct dm_bio_prison_cell_v2
*cell_prealloc
,
117 struct dm_bio_prison_cell_v2
**result
)
120 struct rb_node
**new = &prison
->cells
.rb_node
, *parent
= NULL
;
123 struct dm_bio_prison_cell_v2
*cell
=
124 rb_entry(*new, struct dm_bio_prison_cell_v2
, node
);
126 r
= cmp_keys(key
, &cell
->key
);
130 new = &((*new)->rb_left
);
133 new = &((*new)->rb_right
);
141 __setup_new_cell(key
, cell_prealloc
);
142 *result
= cell_prealloc
;
143 rb_link_node(&cell_prealloc
->node
, parent
, new);
144 rb_insert_color(&cell_prealloc
->node
, &prison
->cells
);
149 static bool __get(struct dm_bio_prison_v2
*prison
,
150 struct dm_cell_key_v2
*key
,
153 struct dm_bio_prison_cell_v2
*cell_prealloc
,
154 struct dm_bio_prison_cell_v2
**cell
)
156 if (__find_or_insert(prison
, key
, cell_prealloc
, cell
)) {
157 if ((*cell
)->exclusive_lock
) {
158 if (lock_level
<= (*cell
)->exclusive_level
) {
159 bio_list_add(&(*cell
)->bios
, inmate
);
164 (*cell
)->shared_count
++;
167 (*cell
)->shared_count
= 1;
172 bool dm_cell_get_v2(struct dm_bio_prison_v2
*prison
,
173 struct dm_cell_key_v2
*key
,
176 struct dm_bio_prison_cell_v2
*cell_prealloc
,
177 struct dm_bio_prison_cell_v2
**cell_result
)
182 spin_lock_irqsave(&prison
->lock
, flags
);
183 r
= __get(prison
, key
, lock_level
, inmate
, cell_prealloc
, cell_result
);
184 spin_unlock_irqrestore(&prison
->lock
, flags
);
188 EXPORT_SYMBOL_GPL(dm_cell_get_v2
);
190 static bool __put(struct dm_bio_prison_v2
*prison
,
191 struct dm_bio_prison_cell_v2
*cell
)
193 BUG_ON(!cell
->shared_count
);
194 cell
->shared_count
--;
196 // FIXME: shared locks granted above the lock level could starve this
197 if (!cell
->shared_count
) {
198 if (cell
->exclusive_lock
){
199 if (cell
->quiesce_continuation
) {
200 queue_work(prison
->wq
, cell
->quiesce_continuation
);
201 cell
->quiesce_continuation
= NULL
;
204 rb_erase(&cell
->node
, &prison
->cells
);
212 bool dm_cell_put_v2(struct dm_bio_prison_v2
*prison
,
213 struct dm_bio_prison_cell_v2
*cell
)
218 spin_lock_irqsave(&prison
->lock
, flags
);
219 r
= __put(prison
, cell
);
220 spin_unlock_irqrestore(&prison
->lock
, flags
);
224 EXPORT_SYMBOL_GPL(dm_cell_put_v2
);
226 static int __lock(struct dm_bio_prison_v2
*prison
,
227 struct dm_cell_key_v2
*key
,
229 struct dm_bio_prison_cell_v2
*cell_prealloc
,
230 struct dm_bio_prison_cell_v2
**cell_result
)
232 struct dm_bio_prison_cell_v2
*cell
;
234 if (__find_or_insert(prison
, key
, cell_prealloc
, &cell
)) {
235 if (cell
->exclusive_lock
)
238 cell
->exclusive_lock
= true;
239 cell
->exclusive_level
= lock_level
;
242 // FIXME: we don't yet know what level these shared locks
243 // were taken at, so have to quiesce them all.
244 return cell
->shared_count
> 0;
247 cell
= cell_prealloc
;
248 cell
->shared_count
= 0;
249 cell
->exclusive_lock
= true;
250 cell
->exclusive_level
= lock_level
;
257 int dm_cell_lock_v2(struct dm_bio_prison_v2
*prison
,
258 struct dm_cell_key_v2
*key
,
260 struct dm_bio_prison_cell_v2
*cell_prealloc
,
261 struct dm_bio_prison_cell_v2
**cell_result
)
266 spin_lock_irqsave(&prison
->lock
, flags
);
267 r
= __lock(prison
, key
, lock_level
, cell_prealloc
, cell_result
);
268 spin_unlock_irqrestore(&prison
->lock
, flags
);
272 EXPORT_SYMBOL_GPL(dm_cell_lock_v2
);
274 static void __quiesce(struct dm_bio_prison_v2
*prison
,
275 struct dm_bio_prison_cell_v2
*cell
,
276 struct work_struct
*continuation
)
278 if (!cell
->shared_count
)
279 queue_work(prison
->wq
, continuation
);
281 cell
->quiesce_continuation
= continuation
;
284 void dm_cell_quiesce_v2(struct dm_bio_prison_v2
*prison
,
285 struct dm_bio_prison_cell_v2
*cell
,
286 struct work_struct
*continuation
)
290 spin_lock_irqsave(&prison
->lock
, flags
);
291 __quiesce(prison
, cell
, continuation
);
292 spin_unlock_irqrestore(&prison
->lock
, flags
);
294 EXPORT_SYMBOL_GPL(dm_cell_quiesce_v2
);
296 static int __promote(struct dm_bio_prison_v2
*prison
,
297 struct dm_bio_prison_cell_v2
*cell
,
298 unsigned new_lock_level
)
300 if (!cell
->exclusive_lock
)
303 cell
->exclusive_level
= new_lock_level
;
304 return cell
->shared_count
> 0;
307 int dm_cell_lock_promote_v2(struct dm_bio_prison_v2
*prison
,
308 struct dm_bio_prison_cell_v2
*cell
,
309 unsigned new_lock_level
)
314 spin_lock_irqsave(&prison
->lock
, flags
);
315 r
= __promote(prison
, cell
, new_lock_level
);
316 spin_unlock_irqrestore(&prison
->lock
, flags
);
320 EXPORT_SYMBOL_GPL(dm_cell_lock_promote_v2
);
322 static bool __unlock(struct dm_bio_prison_v2
*prison
,
323 struct dm_bio_prison_cell_v2
*cell
,
324 struct bio_list
*bios
)
326 BUG_ON(!cell
->exclusive_lock
);
328 bio_list_merge(bios
, &cell
->bios
);
329 bio_list_init(&cell
->bios
);
331 if (cell
->shared_count
) {
332 cell
->exclusive_lock
= 0;
336 rb_erase(&cell
->node
, &prison
->cells
);
340 bool dm_cell_unlock_v2(struct dm_bio_prison_v2
*prison
,
341 struct dm_bio_prison_cell_v2
*cell
,
342 struct bio_list
*bios
)
347 spin_lock_irqsave(&prison
->lock
, flags
);
348 r
= __unlock(prison
, cell
, bios
);
349 spin_unlock_irqrestore(&prison
->lock
, flags
);
353 EXPORT_SYMBOL_GPL(dm_cell_unlock_v2
);
355 /*----------------------------------------------------------------*/
357 int __init
dm_bio_prison_init_v2(void)
359 _cell_cache
= KMEM_CACHE(dm_bio_prison_cell_v2
, 0);
366 void dm_bio_prison_exit_v2(void)
368 kmem_cache_destroy(_cell_cache
);