2 * Copyright (C) 2012-2017 Red Hat, Inc.
4 * This file is released under the GPL.
8 #include "dm-bio-prison-v2.h"
10 #include <linux/spinlock.h>
11 #include <linux/mempool.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/rwsem.h>
16 /*----------------------------------------------------------------*/
18 #define MIN_CELLS 1024
20 struct dm_bio_prison_v2
{
21 struct workqueue_struct
*wq
;
28 static struct kmem_cache
*_cell_cache
;
30 /*----------------------------------------------------------------*/
33 * @nr_cells should be the number of cells you want in use _concurrently_.
34 * Don't confuse it with the number of distinct keys.
36 struct dm_bio_prison_v2
*dm_bio_prison_create_v2(struct workqueue_struct
*wq
)
38 struct dm_bio_prison_v2
*prison
= kzalloc(sizeof(*prison
), GFP_KERNEL
);
45 spin_lock_init(&prison
->lock
);
47 ret
= mempool_init_slab_pool(&prison
->cell_pool
, MIN_CELLS
, _cell_cache
);
53 prison
->cells
= RB_ROOT
;
57 EXPORT_SYMBOL_GPL(dm_bio_prison_create_v2
);
59 void dm_bio_prison_destroy_v2(struct dm_bio_prison_v2
*prison
)
61 mempool_exit(&prison
->cell_pool
);
64 EXPORT_SYMBOL_GPL(dm_bio_prison_destroy_v2
);
66 struct dm_bio_prison_cell_v2
*dm_bio_prison_alloc_cell_v2(struct dm_bio_prison_v2
*prison
, gfp_t gfp
)
68 return mempool_alloc(&prison
->cell_pool
, gfp
);
70 EXPORT_SYMBOL_GPL(dm_bio_prison_alloc_cell_v2
);
72 void dm_bio_prison_free_cell_v2(struct dm_bio_prison_v2
*prison
,
73 struct dm_bio_prison_cell_v2
*cell
)
75 mempool_free(cell
, &prison
->cell_pool
);
77 EXPORT_SYMBOL_GPL(dm_bio_prison_free_cell_v2
);
79 static void __setup_new_cell(struct dm_cell_key_v2
*key
,
80 struct dm_bio_prison_cell_v2
*cell
)
82 memset(cell
, 0, sizeof(*cell
));
83 memcpy(&cell
->key
, key
, sizeof(cell
->key
));
84 bio_list_init(&cell
->bios
);
87 static int cmp_keys(struct dm_cell_key_v2
*lhs
,
88 struct dm_cell_key_v2
*rhs
)
90 if (lhs
->virtual < rhs
->virtual)
93 if (lhs
->virtual > rhs
->virtual)
96 if (lhs
->dev
< rhs
->dev
)
99 if (lhs
->dev
> rhs
->dev
)
102 if (lhs
->block_end
<= rhs
->block_begin
)
105 if (lhs
->block_begin
>= rhs
->block_end
)
112 * Returns true if node found, otherwise it inserts a new one.
114 static bool __find_or_insert(struct dm_bio_prison_v2
*prison
,
115 struct dm_cell_key_v2
*key
,
116 struct dm_bio_prison_cell_v2
*cell_prealloc
,
117 struct dm_bio_prison_cell_v2
**result
)
120 struct rb_node
**new = &prison
->cells
.rb_node
, *parent
= NULL
;
123 struct dm_bio_prison_cell_v2
*cell
=
124 rb_entry(*new, struct dm_bio_prison_cell_v2
, node
);
126 r
= cmp_keys(key
, &cell
->key
);
130 new = &((*new)->rb_left
);
133 new = &((*new)->rb_right
);
141 __setup_new_cell(key
, cell_prealloc
);
142 *result
= cell_prealloc
;
143 rb_link_node(&cell_prealloc
->node
, parent
, new);
144 rb_insert_color(&cell_prealloc
->node
, &prison
->cells
);
149 static bool __get(struct dm_bio_prison_v2
*prison
,
150 struct dm_cell_key_v2
*key
,
153 struct dm_bio_prison_cell_v2
*cell_prealloc
,
154 struct dm_bio_prison_cell_v2
**cell
)
156 if (__find_or_insert(prison
, key
, cell_prealloc
, cell
)) {
157 if ((*cell
)->exclusive_lock
) {
158 if (lock_level
<= (*cell
)->exclusive_level
) {
159 bio_list_add(&(*cell
)->bios
, inmate
);
164 (*cell
)->shared_count
++;
167 (*cell
)->shared_count
= 1;
172 bool dm_cell_get_v2(struct dm_bio_prison_v2
*prison
,
173 struct dm_cell_key_v2
*key
,
176 struct dm_bio_prison_cell_v2
*cell_prealloc
,
177 struct dm_bio_prison_cell_v2
**cell_result
)
181 spin_lock_irq(&prison
->lock
);
182 r
= __get(prison
, key
, lock_level
, inmate
, cell_prealloc
, cell_result
);
183 spin_unlock_irq(&prison
->lock
);
187 EXPORT_SYMBOL_GPL(dm_cell_get_v2
);
189 static bool __put(struct dm_bio_prison_v2
*prison
,
190 struct dm_bio_prison_cell_v2
*cell
)
192 BUG_ON(!cell
->shared_count
);
193 cell
->shared_count
--;
195 // FIXME: shared locks granted above the lock level could starve this
196 if (!cell
->shared_count
) {
197 if (cell
->exclusive_lock
){
198 if (cell
->quiesce_continuation
) {
199 queue_work(prison
->wq
, cell
->quiesce_continuation
);
200 cell
->quiesce_continuation
= NULL
;
203 rb_erase(&cell
->node
, &prison
->cells
);
211 bool dm_cell_put_v2(struct dm_bio_prison_v2
*prison
,
212 struct dm_bio_prison_cell_v2
*cell
)
217 spin_lock_irqsave(&prison
->lock
, flags
);
218 r
= __put(prison
, cell
);
219 spin_unlock_irqrestore(&prison
->lock
, flags
);
223 EXPORT_SYMBOL_GPL(dm_cell_put_v2
);
225 static int __lock(struct dm_bio_prison_v2
*prison
,
226 struct dm_cell_key_v2
*key
,
228 struct dm_bio_prison_cell_v2
*cell_prealloc
,
229 struct dm_bio_prison_cell_v2
**cell_result
)
231 struct dm_bio_prison_cell_v2
*cell
;
233 if (__find_or_insert(prison
, key
, cell_prealloc
, &cell
)) {
234 if (cell
->exclusive_lock
)
237 cell
->exclusive_lock
= true;
238 cell
->exclusive_level
= lock_level
;
241 // FIXME: we don't yet know what level these shared locks
242 // were taken at, so have to quiesce them all.
243 return cell
->shared_count
> 0;
246 cell
= cell_prealloc
;
247 cell
->shared_count
= 0;
248 cell
->exclusive_lock
= true;
249 cell
->exclusive_level
= lock_level
;
256 int dm_cell_lock_v2(struct dm_bio_prison_v2
*prison
,
257 struct dm_cell_key_v2
*key
,
259 struct dm_bio_prison_cell_v2
*cell_prealloc
,
260 struct dm_bio_prison_cell_v2
**cell_result
)
264 spin_lock_irq(&prison
->lock
);
265 r
= __lock(prison
, key
, lock_level
, cell_prealloc
, cell_result
);
266 spin_unlock_irq(&prison
->lock
);
270 EXPORT_SYMBOL_GPL(dm_cell_lock_v2
);
272 static void __quiesce(struct dm_bio_prison_v2
*prison
,
273 struct dm_bio_prison_cell_v2
*cell
,
274 struct work_struct
*continuation
)
276 if (!cell
->shared_count
)
277 queue_work(prison
->wq
, continuation
);
279 cell
->quiesce_continuation
= continuation
;
282 void dm_cell_quiesce_v2(struct dm_bio_prison_v2
*prison
,
283 struct dm_bio_prison_cell_v2
*cell
,
284 struct work_struct
*continuation
)
286 spin_lock_irq(&prison
->lock
);
287 __quiesce(prison
, cell
, continuation
);
288 spin_unlock_irq(&prison
->lock
);
290 EXPORT_SYMBOL_GPL(dm_cell_quiesce_v2
);
292 static int __promote(struct dm_bio_prison_v2
*prison
,
293 struct dm_bio_prison_cell_v2
*cell
,
294 unsigned new_lock_level
)
296 if (!cell
->exclusive_lock
)
299 cell
->exclusive_level
= new_lock_level
;
300 return cell
->shared_count
> 0;
303 int dm_cell_lock_promote_v2(struct dm_bio_prison_v2
*prison
,
304 struct dm_bio_prison_cell_v2
*cell
,
305 unsigned new_lock_level
)
309 spin_lock_irq(&prison
->lock
);
310 r
= __promote(prison
, cell
, new_lock_level
);
311 spin_unlock_irq(&prison
->lock
);
315 EXPORT_SYMBOL_GPL(dm_cell_lock_promote_v2
);
317 static bool __unlock(struct dm_bio_prison_v2
*prison
,
318 struct dm_bio_prison_cell_v2
*cell
,
319 struct bio_list
*bios
)
321 BUG_ON(!cell
->exclusive_lock
);
323 bio_list_merge(bios
, &cell
->bios
);
324 bio_list_init(&cell
->bios
);
326 if (cell
->shared_count
) {
327 cell
->exclusive_lock
= false;
331 rb_erase(&cell
->node
, &prison
->cells
);
335 bool dm_cell_unlock_v2(struct dm_bio_prison_v2
*prison
,
336 struct dm_bio_prison_cell_v2
*cell
,
337 struct bio_list
*bios
)
341 spin_lock_irq(&prison
->lock
);
342 r
= __unlock(prison
, cell
, bios
);
343 spin_unlock_irq(&prison
->lock
);
347 EXPORT_SYMBOL_GPL(dm_cell_unlock_v2
);
349 /*----------------------------------------------------------------*/
351 int __init
dm_bio_prison_init_v2(void)
353 _cell_cache
= KMEM_CACHE(dm_bio_prison_cell_v2
, 0);
360 void dm_bio_prison_exit_v2(void)
362 kmem_cache_destroy(_cell_cache
);