2 * Copyright (C) 2012 Red Hat, Inc.
4 * This file is released under the GPL.
8 #include "dm-bio-prison.h"
10 #include <linux/spinlock.h>
11 #include <linux/mempool.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
15 /*----------------------------------------------------------------*/
17 struct dm_bio_prison
{
23 struct hlist_head
*cells
;
26 /*----------------------------------------------------------------*/
28 static uint32_t calc_nr_buckets(unsigned nr_cells
)
33 nr_cells
= min(nr_cells
, 8192u);
41 static struct kmem_cache
*_cell_cache
;
44 * @nr_cells should be the number of cells you want in use _concurrently_.
45 * Don't confuse it with the number of distinct keys.
47 struct dm_bio_prison
*dm_bio_prison_create(unsigned nr_cells
)
50 uint32_t nr_buckets
= calc_nr_buckets(nr_cells
);
51 size_t len
= sizeof(struct dm_bio_prison
) +
52 (sizeof(struct hlist_head
) * nr_buckets
);
53 struct dm_bio_prison
*prison
= kmalloc(len
, GFP_KERNEL
);
58 spin_lock_init(&prison
->lock
);
59 prison
->cell_pool
= mempool_create_slab_pool(nr_cells
, _cell_cache
);
60 if (!prison
->cell_pool
) {
65 prison
->nr_buckets
= nr_buckets
;
66 prison
->hash_mask
= nr_buckets
- 1;
67 prison
->cells
= (struct hlist_head
*) (prison
+ 1);
68 for (i
= 0; i
< nr_buckets
; i
++)
69 INIT_HLIST_HEAD(prison
->cells
+ i
);
73 EXPORT_SYMBOL_GPL(dm_bio_prison_create
);
75 void dm_bio_prison_destroy(struct dm_bio_prison
*prison
)
77 mempool_destroy(prison
->cell_pool
);
80 EXPORT_SYMBOL_GPL(dm_bio_prison_destroy
);
82 struct dm_bio_prison_cell
*dm_bio_prison_alloc_cell(struct dm_bio_prison
*prison
, gfp_t gfp
)
84 return mempool_alloc(prison
->cell_pool
, gfp
);
86 EXPORT_SYMBOL_GPL(dm_bio_prison_alloc_cell
);
88 void dm_bio_prison_free_cell(struct dm_bio_prison
*prison
,
89 struct dm_bio_prison_cell
*cell
)
91 mempool_free(cell
, prison
->cell_pool
);
93 EXPORT_SYMBOL_GPL(dm_bio_prison_free_cell
);
95 static uint32_t hash_key(struct dm_bio_prison
*prison
, struct dm_cell_key
*key
)
97 const unsigned long BIG_PRIME
= 4294967291UL;
98 uint64_t hash
= key
->block
* BIG_PRIME
;
100 return (uint32_t) (hash
& prison
->hash_mask
);
103 static int keys_equal(struct dm_cell_key
*lhs
, struct dm_cell_key
*rhs
)
105 return (lhs
->virtual == rhs
->virtual) &&
106 (lhs
->dev
== rhs
->dev
) &&
107 (lhs
->block
== rhs
->block
);
110 static struct dm_bio_prison_cell
*__search_bucket(struct hlist_head
*bucket
,
111 struct dm_cell_key
*key
)
113 struct dm_bio_prison_cell
*cell
;
115 hlist_for_each_entry(cell
, bucket
, list
)
116 if (keys_equal(&cell
->key
, key
))
122 static void __setup_new_cell(struct dm_bio_prison
*prison
,
123 struct dm_cell_key
*key
,
126 struct dm_bio_prison_cell
*cell
)
128 memcpy(&cell
->key
, key
, sizeof(cell
->key
));
129 cell
->holder
= holder
;
130 bio_list_init(&cell
->bios
);
131 hlist_add_head(&cell
->list
, prison
->cells
+ hash
);
134 static int __bio_detain(struct dm_bio_prison
*prison
,
135 struct dm_cell_key
*key
,
137 struct dm_bio_prison_cell
*cell_prealloc
,
138 struct dm_bio_prison_cell
**cell_result
)
140 uint32_t hash
= hash_key(prison
, key
);
141 struct dm_bio_prison_cell
*cell
;
143 cell
= __search_bucket(prison
->cells
+ hash
, key
);
146 bio_list_add(&cell
->bios
, inmate
);
151 __setup_new_cell(prison
, key
, inmate
, hash
, cell_prealloc
);
152 *cell_result
= cell_prealloc
;
156 static int bio_detain(struct dm_bio_prison
*prison
,
157 struct dm_cell_key
*key
,
159 struct dm_bio_prison_cell
*cell_prealloc
,
160 struct dm_bio_prison_cell
**cell_result
)
165 spin_lock_irqsave(&prison
->lock
, flags
);
166 r
= __bio_detain(prison
, key
, inmate
, cell_prealloc
, cell_result
);
167 spin_unlock_irqrestore(&prison
->lock
, flags
);
172 int dm_bio_detain(struct dm_bio_prison
*prison
,
173 struct dm_cell_key
*key
,
175 struct dm_bio_prison_cell
*cell_prealloc
,
176 struct dm_bio_prison_cell
**cell_result
)
178 return bio_detain(prison
, key
, inmate
, cell_prealloc
, cell_result
);
180 EXPORT_SYMBOL_GPL(dm_bio_detain
);
182 int dm_get_cell(struct dm_bio_prison
*prison
,
183 struct dm_cell_key
*key
,
184 struct dm_bio_prison_cell
*cell_prealloc
,
185 struct dm_bio_prison_cell
**cell_result
)
187 return bio_detain(prison
, key
, NULL
, cell_prealloc
, cell_result
);
189 EXPORT_SYMBOL_GPL(dm_get_cell
);
192 * @inmates must have been initialised prior to this call
194 static void __cell_release(struct dm_bio_prison_cell
*cell
,
195 struct bio_list
*inmates
)
197 hlist_del(&cell
->list
);
201 bio_list_add(inmates
, cell
->holder
);
202 bio_list_merge(inmates
, &cell
->bios
);
206 void dm_cell_release(struct dm_bio_prison
*prison
,
207 struct dm_bio_prison_cell
*cell
,
208 struct bio_list
*bios
)
212 spin_lock_irqsave(&prison
->lock
, flags
);
213 __cell_release(cell
, bios
);
214 spin_unlock_irqrestore(&prison
->lock
, flags
);
216 EXPORT_SYMBOL_GPL(dm_cell_release
);
219 * Sometimes we don't want the holder, just the additional bios.
221 static void __cell_release_no_holder(struct dm_bio_prison_cell
*cell
,
222 struct bio_list
*inmates
)
224 hlist_del(&cell
->list
);
225 bio_list_merge(inmates
, &cell
->bios
);
228 void dm_cell_release_no_holder(struct dm_bio_prison
*prison
,
229 struct dm_bio_prison_cell
*cell
,
230 struct bio_list
*inmates
)
234 spin_lock_irqsave(&prison
->lock
, flags
);
235 __cell_release_no_holder(cell
, inmates
);
236 spin_unlock_irqrestore(&prison
->lock
, flags
);
238 EXPORT_SYMBOL_GPL(dm_cell_release_no_holder
);
240 void dm_cell_error(struct dm_bio_prison
*prison
,
241 struct dm_bio_prison_cell
*cell
)
243 struct bio_list bios
;
247 bio_list_init(&bios
);
249 spin_lock_irqsave(&prison
->lock
, flags
);
250 __cell_release(cell
, &bios
);
251 spin_unlock_irqrestore(&prison
->lock
, flags
);
253 while ((bio
= bio_list_pop(&bios
)))
256 EXPORT_SYMBOL_GPL(dm_cell_error
);
258 /*----------------------------------------------------------------*/
260 #define DEFERRED_SET_SIZE 64
262 struct dm_deferred_entry
{
263 struct dm_deferred_set
*ds
;
265 struct list_head work_items
;
268 struct dm_deferred_set
{
270 unsigned current_entry
;
272 struct dm_deferred_entry entries
[DEFERRED_SET_SIZE
];
275 struct dm_deferred_set
*dm_deferred_set_create(void)
278 struct dm_deferred_set
*ds
;
280 ds
= kmalloc(sizeof(*ds
), GFP_KERNEL
);
284 spin_lock_init(&ds
->lock
);
285 ds
->current_entry
= 0;
287 for (i
= 0; i
< DEFERRED_SET_SIZE
; i
++) {
288 ds
->entries
[i
].ds
= ds
;
289 ds
->entries
[i
].count
= 0;
290 INIT_LIST_HEAD(&ds
->entries
[i
].work_items
);
295 EXPORT_SYMBOL_GPL(dm_deferred_set_create
);
297 void dm_deferred_set_destroy(struct dm_deferred_set
*ds
)
301 EXPORT_SYMBOL_GPL(dm_deferred_set_destroy
);
303 struct dm_deferred_entry
*dm_deferred_entry_inc(struct dm_deferred_set
*ds
)
306 struct dm_deferred_entry
*entry
;
308 spin_lock_irqsave(&ds
->lock
, flags
);
309 entry
= ds
->entries
+ ds
->current_entry
;
311 spin_unlock_irqrestore(&ds
->lock
, flags
);
315 EXPORT_SYMBOL_GPL(dm_deferred_entry_inc
);
317 static unsigned ds_next(unsigned index
)
319 return (index
+ 1) % DEFERRED_SET_SIZE
;
322 static void __sweep(struct dm_deferred_set
*ds
, struct list_head
*head
)
324 while ((ds
->sweeper
!= ds
->current_entry
) &&
325 !ds
->entries
[ds
->sweeper
].count
) {
326 list_splice_init(&ds
->entries
[ds
->sweeper
].work_items
, head
);
327 ds
->sweeper
= ds_next(ds
->sweeper
);
330 if ((ds
->sweeper
== ds
->current_entry
) && !ds
->entries
[ds
->sweeper
].count
)
331 list_splice_init(&ds
->entries
[ds
->sweeper
].work_items
, head
);
334 void dm_deferred_entry_dec(struct dm_deferred_entry
*entry
, struct list_head
*head
)
338 spin_lock_irqsave(&entry
->ds
->lock
, flags
);
339 BUG_ON(!entry
->count
);
341 __sweep(entry
->ds
, head
);
342 spin_unlock_irqrestore(&entry
->ds
->lock
, flags
);
344 EXPORT_SYMBOL_GPL(dm_deferred_entry_dec
);
347 * Returns 1 if deferred or 0 if no pending items to delay job.
349 int dm_deferred_set_add_work(struct dm_deferred_set
*ds
, struct list_head
*work
)
355 spin_lock_irqsave(&ds
->lock
, flags
);
356 if ((ds
->sweeper
== ds
->current_entry
) &&
357 !ds
->entries
[ds
->current_entry
].count
)
360 list_add(work
, &ds
->entries
[ds
->current_entry
].work_items
);
361 next_entry
= ds_next(ds
->current_entry
);
362 if (!ds
->entries
[next_entry
].count
)
363 ds
->current_entry
= next_entry
;
365 spin_unlock_irqrestore(&ds
->lock
, flags
);
369 EXPORT_SYMBOL_GPL(dm_deferred_set_add_work
);
371 /*----------------------------------------------------------------*/
373 static int __init
dm_bio_prison_init(void)
375 _cell_cache
= KMEM_CACHE(dm_bio_prison_cell
, 0);
382 static void __exit
dm_bio_prison_exit(void)
384 kmem_cache_destroy(_cell_cache
);
391 module_init(dm_bio_prison_init
);
392 module_exit(dm_bio_prison_exit
);
394 MODULE_DESCRIPTION(DM_NAME
" bio prison");
395 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
396 MODULE_LICENSE("GPL");