2 * Block data types and constants. Directly include this file only to
3 * break include dependency loop.
5 #ifndef __LINUX_BLK_TYPES_H
6 #define __LINUX_BLK_TYPES_H
8 #include <linux/types.h>
9 #include <linux/bvec.h>
13 struct bio_integrity_payload
;
17 struct cgroup_subsys_state
;
18 typedef void (bio_end_io_t
) (struct bio
*);
21 * main unit of I/O for the block layer and lower layers (ie drivers and
25 struct bio
*bi_next
; /* request queue link */
26 struct block_device
*bi_bdev
;
28 unsigned int bi_opf
; /* bottom bits req flags,
29 * top bits REQ_OP. Use
32 unsigned short bi_flags
; /* status, command, etc */
33 unsigned short bi_ioprio
;
35 struct bvec_iter bi_iter
;
37 /* Number of segments in this BIO after
38 * physical address coalescing is performed.
40 unsigned int bi_phys_segments
;
43 * To keep track of the max segment size, we account for the
44 * sizes of the first and last mergeable segments in this bio.
46 unsigned int bi_seg_front_size
;
47 unsigned int bi_seg_back_size
;
49 atomic_t __bi_remaining
;
51 bio_end_io_t
*bi_end_io
;
54 #ifdef CONFIG_BLK_CGROUP
56 * Optional ioc and css associated with this bio. Put on bio
57 * release. Read comment on top of bio_associate_current().
59 struct io_context
*bi_ioc
;
60 struct cgroup_subsys_state
*bi_css
;
63 #if defined(CONFIG_BLK_DEV_INTEGRITY)
64 struct bio_integrity_payload
*bi_integrity
; /* data integrity */
68 unsigned short bi_vcnt
; /* how many bio_vec's */
71 * Everything starting with bi_max_vecs will be preserved by bio_reset()
74 unsigned short bi_max_vecs
; /* max bvl_vecs we can hold */
76 atomic_t __bi_cnt
; /* pin count */
78 struct bio_vec
*bi_io_vec
; /* the actual vec list */
80 struct bio_set
*bi_pool
;
83 * We can inline a number of vecs at the end of the bio, to avoid
84 * double allocations for a small number of bio_vecs. This member
85 * MUST obviously be kept at the very end of the bio.
87 struct bio_vec bi_inline_vecs
[0];
90 #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
95 #define BIO_SEG_VALID 1 /* bi_phys_segments valid */
96 #define BIO_CLONED 2 /* doesn't own data */
97 #define BIO_BOUNCED 3 /* bio is a bounce bio */
98 #define BIO_USER_MAPPED 4 /* contains user pages */
99 #define BIO_NULL_MAPPED 5 /* contains invalid user pages */
100 #define BIO_QUIET 6 /* Make BIO Quiet */
101 #define BIO_CHAIN 7 /* chained bio, ->bi_remaining in effect */
102 #define BIO_REFFED 8 /* bio has elevated ->bi_cnt */
103 #define BIO_THROTTLED 9 /* This bio has already been subjected to
104 * throttling rules. Don't do it again. */
107 * Flags starting here get preserved by bio_reset() - this includes
110 #define BIO_RESET_BITS 10
113 * We support 6 different bvec pools, the last one is magic in that it
114 * is backed by a mempool.
116 #define BVEC_POOL_NR 6
117 #define BVEC_POOL_MAX (BVEC_POOL_NR - 1)
120 * Top 4 bits of bio flags indicate the pool the bvecs came from. We add
121 * 1 to the actual index so that 0 indicates that there are no bvecs to be
124 #define BVEC_POOL_BITS (4)
125 #define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS)
126 #define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET)
129 * Operations and flags common to the bio and request structures.
130 * We use 8 bits for encoding the operation, and the remaining 24 for flags.
132 * The least significant bit of the operation number indicates the data
133 * transfer direction:
135 * - if the least significant bit is set transfers are TO the device
136 * - if the least significant bit is not set transfers are FROM the device
138 * If a operation does not transfer data the least significant bit has no
141 #define REQ_OP_BITS 8
142 #define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1)
143 #define REQ_FLAG_BITS 24
146 /* read sectors from the device */
148 /* write sectors to the device */
150 /* flush the volatile write cache */
152 /* discard sectors */
154 /* get zone information */
155 REQ_OP_ZONE_REPORT
= 4,
156 /* securely erase sectors */
157 REQ_OP_SECURE_ERASE
= 5,
158 /* seset a zone write pointer */
159 REQ_OP_ZONE_RESET
= 6,
160 /* write the same sector many times */
161 REQ_OP_WRITE_SAME
= 7,
162 /* write the zero filled sector many times */
163 REQ_OP_WRITE_ZEROES
= 8,
169 __REQ_FAILFAST_DEV
= /* no driver retries of device errors */
171 __REQ_FAILFAST_TRANSPORT
, /* no driver retries of transport errors */
172 __REQ_FAILFAST_DRIVER
, /* no driver retries of driver errors */
173 __REQ_SYNC
, /* request is sync (sync write or read) */
174 __REQ_META
, /* metadata io request */
175 __REQ_PRIO
, /* boost priority in cfq */
176 __REQ_NOMERGE
, /* don't touch this for merging */
177 __REQ_IDLE
, /* anticipate more IO after this one */
178 __REQ_INTEGRITY
, /* I/O includes block integrity payload */
179 __REQ_FUA
, /* forced unit access */
180 __REQ_PREFLUSH
, /* request for cache flush */
181 __REQ_RAHEAD
, /* read ahead, can fail anytime */
182 __REQ_BACKGROUND
, /* background IO */
183 __REQ_NR_BITS
, /* stops here */
186 #define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV)
187 #define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT)
188 #define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER)
189 #define REQ_SYNC (1ULL << __REQ_SYNC)
190 #define REQ_META (1ULL << __REQ_META)
191 #define REQ_PRIO (1ULL << __REQ_PRIO)
192 #define REQ_NOMERGE (1ULL << __REQ_NOMERGE)
193 #define REQ_IDLE (1ULL << __REQ_IDLE)
194 #define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY)
195 #define REQ_FUA (1ULL << __REQ_FUA)
196 #define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH)
197 #define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
198 #define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
200 #define REQ_FAILFAST_MASK \
201 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
203 #define REQ_NOMERGE_FLAGS \
204 (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
206 #define bio_op(bio) \
207 ((bio)->bi_opf & REQ_OP_MASK)
208 #define req_op(req) \
209 ((req)->cmd_flags & REQ_OP_MASK)
211 /* obsolete, don't use in new code */
212 static inline void bio_set_op_attrs(struct bio
*bio
, unsigned op
,
215 bio
->bi_opf
= op
| op_flags
;
218 static inline bool op_is_write(unsigned int op
)
224 * Reads are always treated as synchronous, as are requests with the FUA or
225 * PREFLUSH flag. Other operations may be marked as synchronous using the
228 static inline bool op_is_sync(unsigned int op
)
230 return (op
& REQ_OP_MASK
) == REQ_OP_READ
||
231 (op
& (REQ_SYNC
| REQ_FUA
| REQ_PREFLUSH
));
234 typedef unsigned int blk_qc_t
;
235 #define BLK_QC_T_NONE -1U
236 #define BLK_QC_T_SHIFT 16
238 static inline bool blk_qc_t_valid(blk_qc_t cookie
)
240 return cookie
!= BLK_QC_T_NONE
;
243 static inline blk_qc_t
blk_tag_to_qc_t(unsigned int tag
, unsigned int queue_num
)
245 return tag
| (queue_num
<< BLK_QC_T_SHIFT
);
248 static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie
)
250 return cookie
>> BLK_QC_T_SHIFT
;
253 static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie
)
255 return cookie
& ((1u << BLK_QC_T_SHIFT
) - 1);
258 struct blk_issue_stat
{
262 #define BLK_RQ_STAT_BATCH 64
274 #endif /* __LINUX_BLK_TYPES_H */