Merge tag 'trace-v5.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[linux/fpc-iii.git] / drivers / md / dm-bio-prison-v1.c
blob1f8f98efd97a03ad840f6b58cf33f587b9181179
1 /*
2 * Copyright (C) 2012 Red Hat, Inc.
4 * This file is released under the GPL.
5 */
7 #include "dm.h"
8 #include "dm-bio-prison-v1.h"
9 #include "dm-bio-prison-v2.h"
11 #include <linux/spinlock.h>
12 #include <linux/mempool.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
16 /*----------------------------------------------------------------*/
18 #define MIN_CELLS 1024
20 struct dm_bio_prison {
21 spinlock_t lock;
22 struct rb_root cells;
23 mempool_t cell_pool;
26 static struct kmem_cache *_cell_cache;
28 /*----------------------------------------------------------------*/
31 * @nr_cells should be the number of cells you want in use _concurrently_.
32 * Don't confuse it with the number of distinct keys.
34 struct dm_bio_prison *dm_bio_prison_create(void)
36 struct dm_bio_prison *prison = kzalloc(sizeof(*prison), GFP_KERNEL);
37 int ret;
39 if (!prison)
40 return NULL;
42 spin_lock_init(&prison->lock);
44 ret = mempool_init_slab_pool(&prison->cell_pool, MIN_CELLS, _cell_cache);
45 if (ret) {
46 kfree(prison);
47 return NULL;
50 prison->cells = RB_ROOT;
52 return prison;
54 EXPORT_SYMBOL_GPL(dm_bio_prison_create);
56 void dm_bio_prison_destroy(struct dm_bio_prison *prison)
58 mempool_exit(&prison->cell_pool);
59 kfree(prison);
61 EXPORT_SYMBOL_GPL(dm_bio_prison_destroy);
63 struct dm_bio_prison_cell *dm_bio_prison_alloc_cell(struct dm_bio_prison *prison, gfp_t gfp)
65 return mempool_alloc(&prison->cell_pool, gfp);
67 EXPORT_SYMBOL_GPL(dm_bio_prison_alloc_cell);
69 void dm_bio_prison_free_cell(struct dm_bio_prison *prison,
70 struct dm_bio_prison_cell *cell)
72 mempool_free(cell, &prison->cell_pool);
74 EXPORT_SYMBOL_GPL(dm_bio_prison_free_cell);
76 static void __setup_new_cell(struct dm_cell_key *key,
77 struct bio *holder,
78 struct dm_bio_prison_cell *cell)
80 memcpy(&cell->key, key, sizeof(cell->key));
81 cell->holder = holder;
82 bio_list_init(&cell->bios);
85 static int cmp_keys(struct dm_cell_key *lhs,
86 struct dm_cell_key *rhs)
88 if (lhs->virtual < rhs->virtual)
89 return -1;
91 if (lhs->virtual > rhs->virtual)
92 return 1;
94 if (lhs->dev < rhs->dev)
95 return -1;
97 if (lhs->dev > rhs->dev)
98 return 1;
100 if (lhs->block_end <= rhs->block_begin)
101 return -1;
103 if (lhs->block_begin >= rhs->block_end)
104 return 1;
106 return 0;
109 static int __bio_detain(struct dm_bio_prison *prison,
110 struct dm_cell_key *key,
111 struct bio *inmate,
112 struct dm_bio_prison_cell *cell_prealloc,
113 struct dm_bio_prison_cell **cell_result)
115 int r;
116 struct rb_node **new = &prison->cells.rb_node, *parent = NULL;
118 while (*new) {
119 struct dm_bio_prison_cell *cell =
120 rb_entry(*new, struct dm_bio_prison_cell, node);
122 r = cmp_keys(key, &cell->key);
124 parent = *new;
125 if (r < 0)
126 new = &((*new)->rb_left);
127 else if (r > 0)
128 new = &((*new)->rb_right);
129 else {
130 if (inmate)
131 bio_list_add(&cell->bios, inmate);
132 *cell_result = cell;
133 return 1;
137 __setup_new_cell(key, inmate, cell_prealloc);
138 *cell_result = cell_prealloc;
140 rb_link_node(&cell_prealloc->node, parent, new);
141 rb_insert_color(&cell_prealloc->node, &prison->cells);
143 return 0;
146 static int bio_detain(struct dm_bio_prison *prison,
147 struct dm_cell_key *key,
148 struct bio *inmate,
149 struct dm_bio_prison_cell *cell_prealloc,
150 struct dm_bio_prison_cell **cell_result)
152 int r;
154 spin_lock_irq(&prison->lock);
155 r = __bio_detain(prison, key, inmate, cell_prealloc, cell_result);
156 spin_unlock_irq(&prison->lock);
158 return r;
161 int dm_bio_detain(struct dm_bio_prison *prison,
162 struct dm_cell_key *key,
163 struct bio *inmate,
164 struct dm_bio_prison_cell *cell_prealloc,
165 struct dm_bio_prison_cell **cell_result)
167 return bio_detain(prison, key, inmate, cell_prealloc, cell_result);
169 EXPORT_SYMBOL_GPL(dm_bio_detain);
171 int dm_get_cell(struct dm_bio_prison *prison,
172 struct dm_cell_key *key,
173 struct dm_bio_prison_cell *cell_prealloc,
174 struct dm_bio_prison_cell **cell_result)
176 return bio_detain(prison, key, NULL, cell_prealloc, cell_result);
178 EXPORT_SYMBOL_GPL(dm_get_cell);
181 * @inmates must have been initialised prior to this call
183 static void __cell_release(struct dm_bio_prison *prison,
184 struct dm_bio_prison_cell *cell,
185 struct bio_list *inmates)
187 rb_erase(&cell->node, &prison->cells);
189 if (inmates) {
190 if (cell->holder)
191 bio_list_add(inmates, cell->holder);
192 bio_list_merge(inmates, &cell->bios);
196 void dm_cell_release(struct dm_bio_prison *prison,
197 struct dm_bio_prison_cell *cell,
198 struct bio_list *bios)
200 spin_lock_irq(&prison->lock);
201 __cell_release(prison, cell, bios);
202 spin_unlock_irq(&prison->lock);
204 EXPORT_SYMBOL_GPL(dm_cell_release);
207 * Sometimes we don't want the holder, just the additional bios.
209 static void __cell_release_no_holder(struct dm_bio_prison *prison,
210 struct dm_bio_prison_cell *cell,
211 struct bio_list *inmates)
213 rb_erase(&cell->node, &prison->cells);
214 bio_list_merge(inmates, &cell->bios);
217 void dm_cell_release_no_holder(struct dm_bio_prison *prison,
218 struct dm_bio_prison_cell *cell,
219 struct bio_list *inmates)
221 unsigned long flags;
223 spin_lock_irqsave(&prison->lock, flags);
224 __cell_release_no_holder(prison, cell, inmates);
225 spin_unlock_irqrestore(&prison->lock, flags);
227 EXPORT_SYMBOL_GPL(dm_cell_release_no_holder);
229 void dm_cell_error(struct dm_bio_prison *prison,
230 struct dm_bio_prison_cell *cell, blk_status_t error)
232 struct bio_list bios;
233 struct bio *bio;
235 bio_list_init(&bios);
236 dm_cell_release(prison, cell, &bios);
238 while ((bio = bio_list_pop(&bios))) {
239 bio->bi_status = error;
240 bio_endio(bio);
243 EXPORT_SYMBOL_GPL(dm_cell_error);
245 void dm_cell_visit_release(struct dm_bio_prison *prison,
246 void (*visit_fn)(void *, struct dm_bio_prison_cell *),
247 void *context,
248 struct dm_bio_prison_cell *cell)
250 spin_lock_irq(&prison->lock);
251 visit_fn(context, cell);
252 rb_erase(&cell->node, &prison->cells);
253 spin_unlock_irq(&prison->lock);
255 EXPORT_SYMBOL_GPL(dm_cell_visit_release);
257 static int __promote_or_release(struct dm_bio_prison *prison,
258 struct dm_bio_prison_cell *cell)
260 if (bio_list_empty(&cell->bios)) {
261 rb_erase(&cell->node, &prison->cells);
262 return 1;
265 cell->holder = bio_list_pop(&cell->bios);
266 return 0;
269 int dm_cell_promote_or_release(struct dm_bio_prison *prison,
270 struct dm_bio_prison_cell *cell)
272 int r;
274 spin_lock_irq(&prison->lock);
275 r = __promote_or_release(prison, cell);
276 spin_unlock_irq(&prison->lock);
278 return r;
280 EXPORT_SYMBOL_GPL(dm_cell_promote_or_release);
282 /*----------------------------------------------------------------*/
284 #define DEFERRED_SET_SIZE 64
286 struct dm_deferred_entry {
287 struct dm_deferred_set *ds;
288 unsigned count;
289 struct list_head work_items;
292 struct dm_deferred_set {
293 spinlock_t lock;
294 unsigned current_entry;
295 unsigned sweeper;
296 struct dm_deferred_entry entries[DEFERRED_SET_SIZE];
299 struct dm_deferred_set *dm_deferred_set_create(void)
301 int i;
302 struct dm_deferred_set *ds;
304 ds = kmalloc(sizeof(*ds), GFP_KERNEL);
305 if (!ds)
306 return NULL;
308 spin_lock_init(&ds->lock);
309 ds->current_entry = 0;
310 ds->sweeper = 0;
311 for (i = 0; i < DEFERRED_SET_SIZE; i++) {
312 ds->entries[i].ds = ds;
313 ds->entries[i].count = 0;
314 INIT_LIST_HEAD(&ds->entries[i].work_items);
317 return ds;
319 EXPORT_SYMBOL_GPL(dm_deferred_set_create);
321 void dm_deferred_set_destroy(struct dm_deferred_set *ds)
323 kfree(ds);
325 EXPORT_SYMBOL_GPL(dm_deferred_set_destroy);
327 struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds)
329 unsigned long flags;
330 struct dm_deferred_entry *entry;
332 spin_lock_irqsave(&ds->lock, flags);
333 entry = ds->entries + ds->current_entry;
334 entry->count++;
335 spin_unlock_irqrestore(&ds->lock, flags);
337 return entry;
339 EXPORT_SYMBOL_GPL(dm_deferred_entry_inc);
341 static unsigned ds_next(unsigned index)
343 return (index + 1) % DEFERRED_SET_SIZE;
346 static void __sweep(struct dm_deferred_set *ds, struct list_head *head)
348 while ((ds->sweeper != ds->current_entry) &&
349 !ds->entries[ds->sweeper].count) {
350 list_splice_init(&ds->entries[ds->sweeper].work_items, head);
351 ds->sweeper = ds_next(ds->sweeper);
354 if ((ds->sweeper == ds->current_entry) && !ds->entries[ds->sweeper].count)
355 list_splice_init(&ds->entries[ds->sweeper].work_items, head);
358 void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head)
360 unsigned long flags;
362 spin_lock_irqsave(&entry->ds->lock, flags);
363 BUG_ON(!entry->count);
364 --entry->count;
365 __sweep(entry->ds, head);
366 spin_unlock_irqrestore(&entry->ds->lock, flags);
368 EXPORT_SYMBOL_GPL(dm_deferred_entry_dec);
371 * Returns 1 if deferred or 0 if no pending items to delay job.
373 int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work)
375 int r = 1;
376 unsigned next_entry;
378 spin_lock_irq(&ds->lock);
379 if ((ds->sweeper == ds->current_entry) &&
380 !ds->entries[ds->current_entry].count)
381 r = 0;
382 else {
383 list_add(work, &ds->entries[ds->current_entry].work_items);
384 next_entry = ds_next(ds->current_entry);
385 if (!ds->entries[next_entry].count)
386 ds->current_entry = next_entry;
388 spin_unlock_irq(&ds->lock);
390 return r;
392 EXPORT_SYMBOL_GPL(dm_deferred_set_add_work);
394 /*----------------------------------------------------------------*/
396 static int __init dm_bio_prison_init_v1(void)
398 _cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0);
399 if (!_cell_cache)
400 return -ENOMEM;
402 return 0;
405 static void dm_bio_prison_exit_v1(void)
407 kmem_cache_destroy(_cell_cache);
408 _cell_cache = NULL;
411 static int (*_inits[])(void) __initdata = {
412 dm_bio_prison_init_v1,
413 dm_bio_prison_init_v2,
416 static void (*_exits[])(void) = {
417 dm_bio_prison_exit_v1,
418 dm_bio_prison_exit_v2,
421 static int __init dm_bio_prison_init(void)
423 const int count = ARRAY_SIZE(_inits);
425 int r, i;
427 for (i = 0; i < count; i++) {
428 r = _inits[i]();
429 if (r)
430 goto bad;
433 return 0;
435 bad:
436 while (i--)
437 _exits[i]();
439 return r;
442 static void __exit dm_bio_prison_exit(void)
444 int i = ARRAY_SIZE(_exits);
446 while (i--)
447 _exits[i]();
451 * module hooks
453 module_init(dm_bio_prison_init);
454 module_exit(dm_bio_prison_exit);
456 MODULE_DESCRIPTION(DM_NAME " bio prison");
457 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
458 MODULE_LICENSE("GPL");