[CONNECTOR]: Replace delayed work with usual work queue.
[linux-2.6/verdex.git] / include / linux / raid / raid1.h
blob0a9ba7c3302e2393e881244983a3397807e29dfb
1 #ifndef _RAID1_H
2 #define _RAID1_H
4 #include <linux/raid/md.h>
6 typedef struct mirror_info mirror_info_t;
8 struct mirror_info {
9 mdk_rdev_t *rdev;
10 sector_t head_position;
14 * memory pools need a pointer to the mddev, so they can force an unplug
15 * when memory is tight, and a count of the number of drives that the
16 * pool was allocated for, so they know how much to allocate and free.
17 * mddev->raid_disks cannot be used, as it can change while a pool is active
18 * These two datums are stored in a kmalloced struct.
21 struct pool_info {
22 mddev_t *mddev;
23 int raid_disks;
27 typedef struct r1bio_s r1bio_t;
29 struct r1_private_data_s {
30 mddev_t *mddev;
31 mirror_info_t *mirrors;
32 int raid_disks;
33 int last_used;
34 sector_t next_seq_sect;
35 spinlock_t device_lock;
37 struct list_head retry_list;
38 /* queue pending writes and submit them on unplug */
39 struct bio_list pending_bio_list;
40 /* queue of writes that have been unplugged */
41 struct bio_list flushing_bio_list;
43 /* for use when syncing mirrors: */
45 spinlock_t resync_lock;
46 int nr_pending;
47 int nr_waiting;
48 int nr_queued;
49 int barrier;
50 sector_t next_resync;
51 int fullsync; /* set to 1 if a full sync is needed,
52 * (fresh device added).
53 * Cleared when a sync completes.
56 wait_queue_head_t wait_barrier;
58 struct pool_info *poolinfo;
60 struct page *tmppage;
62 mempool_t *r1bio_pool;
63 mempool_t *r1buf_pool;
66 typedef struct r1_private_data_s conf_t;
69 * this is the only point in the RAID code where we violate
70 * C type safety. mddev->private is an 'opaque' pointer.
72 #define mddev_to_conf(mddev) ((conf_t *) mddev->private)
75 * this is our 'private' RAID1 bio.
77 * it contains information about what kind of IO operations were started
78 * for this RAID1 operation, and about their status:
81 struct r1bio_s {
82 atomic_t remaining; /* 'have we finished' count,
83 * used from IRQ handlers
85 atomic_t behind_remaining; /* number of write-behind ios remaining
86 * in this BehindIO request
88 sector_t sector;
89 int sectors;
90 unsigned long state;
91 mddev_t *mddev;
93 * original bio going to /dev/mdx
95 struct bio *master_bio;
97 * if the IO is in READ direction, then this is where we read
99 int read_disk;
101 struct list_head retry_list;
102 struct bitmap_update *bitmap_update;
104 * if the IO is in WRITE direction, then multiple bios are used.
105 * We choose the number when they are allocated.
107 struct bio *bios[0];
108 /* DO NOT PUT ANY NEW FIELDS HERE - bios array is contiguously alloced*/
111 /* when we get a read error on a read-only array, we redirect to another
112 * device without failing the first device, or trying to over-write to
113 * correct the read error. To keep track of bad blocks on a per-bio
114 * level, we store IO_BLOCKED in the appropriate 'bios' pointer
116 #define IO_BLOCKED ((struct bio*)1)
118 /* bits for r1bio.state */
119 #define R1BIO_Uptodate 0
120 #define R1BIO_IsSync 1
121 #define R1BIO_Degraded 2
122 #define R1BIO_BehindIO 3
123 #define R1BIO_Barrier 4
124 #define R1BIO_BarrierRetry 5
125 /* For write-behind requests, we call bi_end_io when
126 * the last non-write-behind device completes, providing
127 * any write was successful. Otherwise we call when
128 * any write-behind write succeeds, otherwise we call
129 * with failure when last write completes (and all failed).
130 * Record that bi_end_io was called with this flag...
132 #define R1BIO_Returned 6
134 #endif