Merge tag 'trace-v5.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[linux/fpc-iii.git] / drivers / md / dm-bio-prison-v1.h
blobcec52ac5e1ae76a4836ac2ee24b6e71fd56d4530
1 /*
2 * Copyright (C) 2011-2017 Red Hat, Inc.
4 * This file is released under the GPL.
5 */
7 #ifndef DM_BIO_PRISON_H
8 #define DM_BIO_PRISON_H
10 #include "persistent-data/dm-block-manager.h" /* FIXME: for dm_block_t */
11 #include "dm-thin-metadata.h" /* FIXME: for dm_thin_id */
13 #include <linux/bio.h>
14 #include <linux/rbtree.h>
16 /*----------------------------------------------------------------*/
19 * Sometimes we can't deal with a bio straight away. We put them in prison
20 * where they can't cause any mischief. Bios are put in a cell identified
21 * by a key, multiple bios can be in the same cell. When the cell is
22 * subsequently unlocked the bios become available.
24 struct dm_bio_prison;
27 * Keys define a range of blocks within either a virtual or physical
28 * device.
30 struct dm_cell_key {
31 int virtual;
32 dm_thin_id dev;
33 dm_block_t block_begin, block_end;
37 * Treat this as opaque, only in header so callers can manage allocation
38 * themselves.
40 struct dm_bio_prison_cell {
41 struct list_head user_list; /* for client use */
42 struct rb_node node;
44 struct dm_cell_key key;
45 struct bio *holder;
46 struct bio_list bios;
49 struct dm_bio_prison *dm_bio_prison_create(void);
50 void dm_bio_prison_destroy(struct dm_bio_prison *prison);
53 * These two functions just wrap a mempool. This is a transitory step:
54 * Eventually all bio prison clients should manage their own cell memory.
56 * Like mempool_alloc(), dm_bio_prison_alloc_cell() can only fail if called
57 * in interrupt context or passed GFP_NOWAIT.
59 struct dm_bio_prison_cell *dm_bio_prison_alloc_cell(struct dm_bio_prison *prison,
60 gfp_t gfp);
61 void dm_bio_prison_free_cell(struct dm_bio_prison *prison,
62 struct dm_bio_prison_cell *cell);
65 * Creates, or retrieves a cell that overlaps the given key.
67 * Returns 1 if pre-existing cell returned, zero if new cell created using
68 * @cell_prealloc.
70 int dm_get_cell(struct dm_bio_prison *prison,
71 struct dm_cell_key *key,
72 struct dm_bio_prison_cell *cell_prealloc,
73 struct dm_bio_prison_cell **cell_result);
76 * An atomic op that combines retrieving or creating a cell, and adding a
77 * bio to it.
79 * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
81 int dm_bio_detain(struct dm_bio_prison *prison,
82 struct dm_cell_key *key,
83 struct bio *inmate,
84 struct dm_bio_prison_cell *cell_prealloc,
85 struct dm_bio_prison_cell **cell_result);
87 void dm_cell_release(struct dm_bio_prison *prison,
88 struct dm_bio_prison_cell *cell,
89 struct bio_list *bios);
90 void dm_cell_release_no_holder(struct dm_bio_prison *prison,
91 struct dm_bio_prison_cell *cell,
92 struct bio_list *inmates);
93 void dm_cell_error(struct dm_bio_prison *prison,
94 struct dm_bio_prison_cell *cell, blk_status_t error);
97 * Visits the cell and then releases. Guarantees no new inmates are
98 * inserted between the visit and release.
100 void dm_cell_visit_release(struct dm_bio_prison *prison,
101 void (*visit_fn)(void *, struct dm_bio_prison_cell *),
102 void *context, struct dm_bio_prison_cell *cell);
105 * Rather than always releasing the prisoners in a cell, the client may
106 * want to promote one of them to be the new holder. There is a race here
107 * though between releasing an empty cell, and other threads adding new
108 * inmates. So this function makes the decision with its lock held.
110 * This function can have two outcomes:
111 * i) An inmate is promoted to be the holder of the cell (return value of 0).
112 * ii) The cell has no inmate for promotion and is released (return value of 1).
114 int dm_cell_promote_or_release(struct dm_bio_prison *prison,
115 struct dm_bio_prison_cell *cell);
117 /*----------------------------------------------------------------*/
120 * We use the deferred set to keep track of pending reads to shared blocks.
121 * We do this to ensure the new mapping caused by a write isn't performed
122 * until these prior reads have completed. Otherwise the insertion of the
123 * new mapping could free the old block that the read bios are mapped to.
126 struct dm_deferred_set;
127 struct dm_deferred_entry;
129 struct dm_deferred_set *dm_deferred_set_create(void);
130 void dm_deferred_set_destroy(struct dm_deferred_set *ds);
132 struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds);
133 void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head);
134 int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work);
136 /*----------------------------------------------------------------*/
138 #endif