4 typedef struct mirror_info mirror_info_t
;
8 sector_t head_position
;
12 * memory pools need a pointer to the mddev, so they can force an unplug
13 * when memory is tight, and a count of the number of drives that the
14 * pool was allocated for, so they know how much to allocate and free.
15 * mddev->raid_disks cannot be used, as it can change while a pool is active
16 * These two datums are stored in a kmalloced struct.
25 typedef struct r1bio_s r1bio_t
;
27 struct r1_private_data_s
{
29 mirror_info_t
*mirrors
;
32 sector_t next_seq_sect
;
33 spinlock_t device_lock
;
35 struct list_head retry_list
;
36 /* queue pending writes and submit them on unplug */
37 struct bio_list pending_bio_list
;
38 /* queue of writes that have been unplugged */
39 struct bio_list flushing_bio_list
;
41 /* for use when syncing mirrors: */
43 spinlock_t resync_lock
;
49 int fullsync
; /* set to 1 if a full sync is needed,
50 * (fresh device added).
51 * Cleared when a sync completes.
54 wait_queue_head_t wait_barrier
;
56 struct pool_info
*poolinfo
;
60 mempool_t
*r1bio_pool
;
61 mempool_t
*r1buf_pool
;
64 typedef struct r1_private_data_s conf_t
;
67 * this is the only point in the RAID code where we violate
68 * C type safety. mddev->private is an 'opaque' pointer.
70 #define mddev_to_conf(mddev) ((conf_t *) mddev->private)
73 * this is our 'private' RAID1 bio.
75 * it contains information about what kind of IO operations were started
76 * for this RAID1 operation, and about their status:
80 atomic_t remaining
; /* 'have we finished' count,
81 * used from IRQ handlers
83 atomic_t behind_remaining
; /* number of write-behind ios remaining
84 * in this BehindIO request
91 * original bio going to /dev/mdx
93 struct bio
*master_bio
;
95 * if the IO is in READ direction, then this is where we read
99 struct list_head retry_list
;
100 struct bitmap_update
*bitmap_update
;
102 * if the IO is in WRITE direction, then multiple bios are used.
103 * We choose the number when they are allocated.
106 /* DO NOT PUT ANY NEW FIELDS HERE - bios array is contiguously alloced*/
109 /* when we get a read error on a read-only array, we redirect to another
110 * device without failing the first device, or trying to over-write to
111 * correct the read error. To keep track of bad blocks on a per-bio
112 * level, we store IO_BLOCKED in the appropriate 'bios' pointer
114 #define IO_BLOCKED ((struct bio*)1)
116 /* bits for r1bio.state */
117 #define R1BIO_Uptodate 0
118 #define R1BIO_IsSync 1
119 #define R1BIO_Degraded 2
120 #define R1BIO_BehindIO 3
121 #define R1BIO_Barrier 4
122 #define R1BIO_BarrierRetry 5
123 /* For write-behind requests, we call bi_end_io when
124 * the last non-write-behind device completes, providing
125 * any write was successful. Otherwise we call when
126 * any write-behind write succeeds, otherwise we call
127 * with failure when last write completes (and all failed).
128 * Record that bi_end_io was called with this flag...
130 #define R1BIO_Returned 6