2 * Copyright (C) 2012-2017 Red Hat, Inc.
4 * This file is released under the GPL.
8 #include "dm-bio-prison-v2.h"
10 #include <linux/spinlock.h>
11 #include <linux/mempool.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/rwsem.h>
16 /*----------------------------------------------------------------*/
18 #define MIN_CELLS 1024
20 struct dm_bio_prison_v2
{
21 struct workqueue_struct
*wq
;
28 static struct kmem_cache
*_cell_cache
;
30 /*----------------------------------------------------------------*/
33 * @nr_cells should be the number of cells you want in use _concurrently_.
34 * Don't confuse it with the number of distinct keys.
36 struct dm_bio_prison_v2
*dm_bio_prison_create_v2(struct workqueue_struct
*wq
)
38 struct dm_bio_prison_v2
*prison
= kmalloc(sizeof(*prison
), GFP_KERNEL
);
44 spin_lock_init(&prison
->lock
);
46 prison
->cell_pool
= mempool_create_slab_pool(MIN_CELLS
, _cell_cache
);
47 if (!prison
->cell_pool
) {
52 prison
->cells
= RB_ROOT
;
56 EXPORT_SYMBOL_GPL(dm_bio_prison_create_v2
);
58 void dm_bio_prison_destroy_v2(struct dm_bio_prison_v2
*prison
)
60 mempool_destroy(prison
->cell_pool
);
63 EXPORT_SYMBOL_GPL(dm_bio_prison_destroy_v2
);
65 struct dm_bio_prison_cell_v2
*dm_bio_prison_alloc_cell_v2(struct dm_bio_prison_v2
*prison
, gfp_t gfp
)
67 return mempool_alloc(prison
->cell_pool
, gfp
);
69 EXPORT_SYMBOL_GPL(dm_bio_prison_alloc_cell_v2
);
71 void dm_bio_prison_free_cell_v2(struct dm_bio_prison_v2
*prison
,
72 struct dm_bio_prison_cell_v2
*cell
)
74 mempool_free(cell
, prison
->cell_pool
);
76 EXPORT_SYMBOL_GPL(dm_bio_prison_free_cell_v2
);
78 static void __setup_new_cell(struct dm_cell_key_v2
*key
,
79 struct dm_bio_prison_cell_v2
*cell
)
81 memset(cell
, 0, sizeof(*cell
));
82 memcpy(&cell
->key
, key
, sizeof(cell
->key
));
83 bio_list_init(&cell
->bios
);
86 static int cmp_keys(struct dm_cell_key_v2
*lhs
,
87 struct dm_cell_key_v2
*rhs
)
89 if (lhs
->virtual < rhs
->virtual)
92 if (lhs
->virtual > rhs
->virtual)
95 if (lhs
->dev
< rhs
->dev
)
98 if (lhs
->dev
> rhs
->dev
)
101 if (lhs
->block_end
<= rhs
->block_begin
)
104 if (lhs
->block_begin
>= rhs
->block_end
)
111 * Returns true if node found, otherwise it inserts a new one.
113 static bool __find_or_insert(struct dm_bio_prison_v2
*prison
,
114 struct dm_cell_key_v2
*key
,
115 struct dm_bio_prison_cell_v2
*cell_prealloc
,
116 struct dm_bio_prison_cell_v2
**result
)
119 struct rb_node
**new = &prison
->cells
.rb_node
, *parent
= NULL
;
122 struct dm_bio_prison_cell_v2
*cell
=
123 rb_entry(*new, struct dm_bio_prison_cell_v2
, node
);
125 r
= cmp_keys(key
, &cell
->key
);
129 new = &((*new)->rb_left
);
132 new = &((*new)->rb_right
);
140 __setup_new_cell(key
, cell_prealloc
);
141 *result
= cell_prealloc
;
142 rb_link_node(&cell_prealloc
->node
, parent
, new);
143 rb_insert_color(&cell_prealloc
->node
, &prison
->cells
);
148 static bool __get(struct dm_bio_prison_v2
*prison
,
149 struct dm_cell_key_v2
*key
,
152 struct dm_bio_prison_cell_v2
*cell_prealloc
,
153 struct dm_bio_prison_cell_v2
**cell
)
155 if (__find_or_insert(prison
, key
, cell_prealloc
, cell
)) {
156 if ((*cell
)->exclusive_lock
) {
157 if (lock_level
<= (*cell
)->exclusive_level
) {
158 bio_list_add(&(*cell
)->bios
, inmate
);
163 (*cell
)->shared_count
++;
166 (*cell
)->shared_count
= 1;
171 bool dm_cell_get_v2(struct dm_bio_prison_v2
*prison
,
172 struct dm_cell_key_v2
*key
,
175 struct dm_bio_prison_cell_v2
*cell_prealloc
,
176 struct dm_bio_prison_cell_v2
**cell_result
)
181 spin_lock_irqsave(&prison
->lock
, flags
);
182 r
= __get(prison
, key
, lock_level
, inmate
, cell_prealloc
, cell_result
);
183 spin_unlock_irqrestore(&prison
->lock
, flags
);
187 EXPORT_SYMBOL_GPL(dm_cell_get_v2
);
189 static bool __put(struct dm_bio_prison_v2
*prison
,
190 struct dm_bio_prison_cell_v2
*cell
)
192 BUG_ON(!cell
->shared_count
);
193 cell
->shared_count
--;
195 // FIXME: shared locks granted above the lock level could starve this
196 if (!cell
->shared_count
) {
197 if (cell
->exclusive_lock
){
198 if (cell
->quiesce_continuation
) {
199 queue_work(prison
->wq
, cell
->quiesce_continuation
);
200 cell
->quiesce_continuation
= NULL
;
203 rb_erase(&cell
->node
, &prison
->cells
);
211 bool dm_cell_put_v2(struct dm_bio_prison_v2
*prison
,
212 struct dm_bio_prison_cell_v2
*cell
)
217 spin_lock_irqsave(&prison
->lock
, flags
);
218 r
= __put(prison
, cell
);
219 spin_unlock_irqrestore(&prison
->lock
, flags
);
223 EXPORT_SYMBOL_GPL(dm_cell_put_v2
);
225 static int __lock(struct dm_bio_prison_v2
*prison
,
226 struct dm_cell_key_v2
*key
,
228 struct dm_bio_prison_cell_v2
*cell_prealloc
,
229 struct dm_bio_prison_cell_v2
**cell_result
)
231 struct dm_bio_prison_cell_v2
*cell
;
233 if (__find_or_insert(prison
, key
, cell_prealloc
, &cell
)) {
234 if (cell
->exclusive_lock
)
237 cell
->exclusive_lock
= true;
238 cell
->exclusive_level
= lock_level
;
241 // FIXME: we don't yet know what level these shared locks
242 // were taken at, so have to quiesce them all.
243 return cell
->shared_count
> 0;
246 cell
= cell_prealloc
;
247 cell
->shared_count
= 0;
248 cell
->exclusive_lock
= true;
249 cell
->exclusive_level
= lock_level
;
256 int dm_cell_lock_v2(struct dm_bio_prison_v2
*prison
,
257 struct dm_cell_key_v2
*key
,
259 struct dm_bio_prison_cell_v2
*cell_prealloc
,
260 struct dm_bio_prison_cell_v2
**cell_result
)
265 spin_lock_irqsave(&prison
->lock
, flags
);
266 r
= __lock(prison
, key
, lock_level
, cell_prealloc
, cell_result
);
267 spin_unlock_irqrestore(&prison
->lock
, flags
);
271 EXPORT_SYMBOL_GPL(dm_cell_lock_v2
);
273 static void __quiesce(struct dm_bio_prison_v2
*prison
,
274 struct dm_bio_prison_cell_v2
*cell
,
275 struct work_struct
*continuation
)
277 if (!cell
->shared_count
)
278 queue_work(prison
->wq
, continuation
);
280 cell
->quiesce_continuation
= continuation
;
283 void dm_cell_quiesce_v2(struct dm_bio_prison_v2
*prison
,
284 struct dm_bio_prison_cell_v2
*cell
,
285 struct work_struct
*continuation
)
289 spin_lock_irqsave(&prison
->lock
, flags
);
290 __quiesce(prison
, cell
, continuation
);
291 spin_unlock_irqrestore(&prison
->lock
, flags
);
293 EXPORT_SYMBOL_GPL(dm_cell_quiesce_v2
);
295 static int __promote(struct dm_bio_prison_v2
*prison
,
296 struct dm_bio_prison_cell_v2
*cell
,
297 unsigned new_lock_level
)
299 if (!cell
->exclusive_lock
)
302 cell
->exclusive_level
= new_lock_level
;
303 return cell
->shared_count
> 0;
306 int dm_cell_lock_promote_v2(struct dm_bio_prison_v2
*prison
,
307 struct dm_bio_prison_cell_v2
*cell
,
308 unsigned new_lock_level
)
313 spin_lock_irqsave(&prison
->lock
, flags
);
314 r
= __promote(prison
, cell
, new_lock_level
);
315 spin_unlock_irqrestore(&prison
->lock
, flags
);
319 EXPORT_SYMBOL_GPL(dm_cell_lock_promote_v2
);
321 static bool __unlock(struct dm_bio_prison_v2
*prison
,
322 struct dm_bio_prison_cell_v2
*cell
,
323 struct bio_list
*bios
)
325 BUG_ON(!cell
->exclusive_lock
);
327 bio_list_merge(bios
, &cell
->bios
);
328 bio_list_init(&cell
->bios
);
330 if (cell
->shared_count
) {
331 cell
->exclusive_lock
= 0;
335 rb_erase(&cell
->node
, &prison
->cells
);
339 bool dm_cell_unlock_v2(struct dm_bio_prison_v2
*prison
,
340 struct dm_bio_prison_cell_v2
*cell
,
341 struct bio_list
*bios
)
346 spin_lock_irqsave(&prison
->lock
, flags
);
347 r
= __unlock(prison
, cell
, bios
);
348 spin_unlock_irqrestore(&prison
->lock
, flags
);
352 EXPORT_SYMBOL_GPL(dm_cell_unlock_v2
);
354 /*----------------------------------------------------------------*/
356 int __init
dm_bio_prison_init_v2(void)
358 _cell_cache
= KMEM_CACHE(dm_bio_prison_cell_v2
, 0);
365 void dm_bio_prison_exit_v2(void)
367 kmem_cache_destroy(_cell_cache
);