1 // SPDX-License-Identifier: GPL-2.0
3 * blk-integrity.c - Block layer data integrity extensions
5 * Copyright (C) 2007, 2008 Oracle Corporation
6 * Written by: Martin K. Petersen <martin.petersen@oracle.com>
9 #include <linux/blk-integrity.h>
10 #include <linux/backing-dev.h>
11 #include <linux/mempool.h>
12 #include <linux/bio.h>
13 #include <linux/scatterlist.h>
14 #include <linux/export.h>
15 #include <linux/slab.h>
20 * blk_rq_count_integrity_sg - Count number of integrity scatterlist elements
22 * @bio: bio with integrity metadata attached
24 * Description: Returns the number of elements required in a
25 * scatterlist corresponding to the integrity metadata in a bio.
27 int blk_rq_count_integrity_sg(struct request_queue
*q
, struct bio
*bio
)
29 struct bio_vec iv
, ivprv
= { NULL
};
30 unsigned int segments
= 0;
31 unsigned int seg_size
= 0;
32 struct bvec_iter iter
;
35 bio_for_each_integrity_vec(iv
, bio
, iter
) {
38 if (!biovec_phys_mergeable(q
, &ivprv
, &iv
))
40 if (seg_size
+ iv
.bv_len
> queue_max_segment_size(q
))
43 seg_size
+= iv
.bv_len
;
58 * blk_rq_map_integrity_sg - Map integrity metadata into a scatterlist
60 * @sglist: target scatterlist
62 * Description: Map the integrity vectors in request into a
63 * scatterlist. The scatterlist must be big enough to hold all
64 * elements. I.e. sized using blk_rq_count_integrity_sg() or
65 * rq->nr_integrity_segments.
67 int blk_rq_map_integrity_sg(struct request
*rq
, struct scatterlist
*sglist
)
69 struct bio_vec iv
, ivprv
= { NULL
};
70 struct request_queue
*q
= rq
->q
;
71 struct scatterlist
*sg
= NULL
;
72 struct bio
*bio
= rq
->bio
;
73 unsigned int segments
= 0;
74 struct bvec_iter iter
;
77 bio_for_each_integrity_vec(iv
, bio
, iter
) {
79 if (!biovec_phys_mergeable(q
, &ivprv
, &iv
))
81 if (sg
->length
+ iv
.bv_len
> queue_max_segment_size(q
))
84 sg
->length
+= iv
.bv_len
;
94 sg_set_page(sg
, iv
.bv_page
, iv
.bv_len
, iv
.bv_offset
);
106 * Something must have been wrong if the figured number of segment
107 * is bigger than number of req's physical integrity segments
109 BUG_ON(segments
> rq
->nr_integrity_segments
);
110 BUG_ON(segments
> queue_max_integrity_segments(q
));
113 EXPORT_SYMBOL(blk_rq_map_integrity_sg
);
115 int blk_rq_integrity_map_user(struct request
*rq
, void __user
*ubuf
,
118 int ret
= bio_integrity_map_user(rq
->bio
, ubuf
, bytes
);
123 rq
->nr_integrity_segments
= blk_rq_count_integrity_sg(rq
->q
, rq
->bio
);
124 rq
->cmd_flags
|= REQ_INTEGRITY
;
127 EXPORT_SYMBOL_GPL(blk_rq_integrity_map_user
);
129 bool blk_integrity_merge_rq(struct request_queue
*q
, struct request
*req
,
130 struct request
*next
)
132 if (blk_integrity_rq(req
) == 0 && blk_integrity_rq(next
) == 0)
135 if (blk_integrity_rq(req
) == 0 || blk_integrity_rq(next
) == 0)
138 if (bio_integrity(req
->bio
)->bip_flags
!=
139 bio_integrity(next
->bio
)->bip_flags
)
142 if (req
->nr_integrity_segments
+ next
->nr_integrity_segments
>
143 q
->limits
.max_integrity_segments
)
146 if (integrity_req_gap_back_merge(req
, next
->bio
))
152 bool blk_integrity_merge_bio(struct request_queue
*q
, struct request
*req
,
155 int nr_integrity_segs
;
157 if (blk_integrity_rq(req
) == 0 && bio_integrity(bio
) == NULL
)
160 if (blk_integrity_rq(req
) == 0 || bio_integrity(bio
) == NULL
)
163 if (bio_integrity(req
->bio
)->bip_flags
!= bio_integrity(bio
)->bip_flags
)
166 nr_integrity_segs
= blk_rq_count_integrity_sg(q
, bio
);
167 if (req
->nr_integrity_segments
+ nr_integrity_segs
>
168 q
->limits
.max_integrity_segments
)
174 static inline struct blk_integrity
*dev_to_bi(struct device
*dev
)
176 return &dev_to_disk(dev
)->queue
->limits
.integrity
;
179 const char *blk_integrity_profile_name(struct blk_integrity
*bi
)
181 switch (bi
->csum_type
) {
182 case BLK_INTEGRITY_CSUM_IP
:
183 if (bi
->flags
& BLK_INTEGRITY_REF_TAG
)
184 return "T10-DIF-TYPE1-IP";
185 return "T10-DIF-TYPE3-IP";
186 case BLK_INTEGRITY_CSUM_CRC
:
187 if (bi
->flags
& BLK_INTEGRITY_REF_TAG
)
188 return "T10-DIF-TYPE1-CRC";
189 return "T10-DIF-TYPE3-CRC";
190 case BLK_INTEGRITY_CSUM_CRC64
:
191 if (bi
->flags
& BLK_INTEGRITY_REF_TAG
)
192 return "EXT-DIF-TYPE1-CRC64";
193 return "EXT-DIF-TYPE3-CRC64";
194 case BLK_INTEGRITY_CSUM_NONE
:
200 EXPORT_SYMBOL_GPL(blk_integrity_profile_name
);
202 static ssize_t
flag_store(struct device
*dev
, const char *page
, size_t count
,
205 struct request_queue
*q
= dev_to_disk(dev
)->queue
;
206 struct queue_limits lim
;
210 err
= kstrtoul(page
, 10, &val
);
214 /* note that the flags are inverted vs the values in the sysfs files */
215 lim
= queue_limits_start_update(q
);
217 lim
.integrity
.flags
&= ~flag
;
219 lim
.integrity
.flags
|= flag
;
221 blk_mq_freeze_queue(q
);
222 err
= queue_limits_commit_update(q
, &lim
);
223 blk_mq_unfreeze_queue(q
);
229 static ssize_t
flag_show(struct device
*dev
, char *page
, unsigned char flag
)
231 struct blk_integrity
*bi
= dev_to_bi(dev
);
233 return sysfs_emit(page
, "%d\n", !(bi
->flags
& flag
));
236 static ssize_t
format_show(struct device
*dev
, struct device_attribute
*attr
,
239 struct blk_integrity
*bi
= dev_to_bi(dev
);
242 return sysfs_emit(page
, "none\n");
243 return sysfs_emit(page
, "%s\n", blk_integrity_profile_name(bi
));
246 static ssize_t
tag_size_show(struct device
*dev
, struct device_attribute
*attr
,
249 struct blk_integrity
*bi
= dev_to_bi(dev
);
251 return sysfs_emit(page
, "%u\n", bi
->tag_size
);
254 static ssize_t
protection_interval_bytes_show(struct device
*dev
,
255 struct device_attribute
*attr
,
258 struct blk_integrity
*bi
= dev_to_bi(dev
);
260 return sysfs_emit(page
, "%u\n",
261 bi
->interval_exp
? 1 << bi
->interval_exp
: 0);
264 static ssize_t
read_verify_store(struct device
*dev
,
265 struct device_attribute
*attr
,
266 const char *page
, size_t count
)
268 return flag_store(dev
, page
, count
, BLK_INTEGRITY_NOVERIFY
);
271 static ssize_t
read_verify_show(struct device
*dev
,
272 struct device_attribute
*attr
, char *page
)
274 return flag_show(dev
, page
, BLK_INTEGRITY_NOVERIFY
);
277 static ssize_t
write_generate_store(struct device
*dev
,
278 struct device_attribute
*attr
,
279 const char *page
, size_t count
)
281 return flag_store(dev
, page
, count
, BLK_INTEGRITY_NOGENERATE
);
284 static ssize_t
write_generate_show(struct device
*dev
,
285 struct device_attribute
*attr
, char *page
)
287 return flag_show(dev
, page
, BLK_INTEGRITY_NOGENERATE
);
290 static ssize_t
device_is_integrity_capable_show(struct device
*dev
,
291 struct device_attribute
*attr
,
294 struct blk_integrity
*bi
= dev_to_bi(dev
);
296 return sysfs_emit(page
, "%u\n",
297 !!(bi
->flags
& BLK_INTEGRITY_DEVICE_CAPABLE
));
300 static DEVICE_ATTR_RO(format
);
301 static DEVICE_ATTR_RO(tag_size
);
302 static DEVICE_ATTR_RO(protection_interval_bytes
);
303 static DEVICE_ATTR_RW(read_verify
);
304 static DEVICE_ATTR_RW(write_generate
);
305 static DEVICE_ATTR_RO(device_is_integrity_capable
);
307 static struct attribute
*integrity_attrs
[] = {
308 &dev_attr_format
.attr
,
309 &dev_attr_tag_size
.attr
,
310 &dev_attr_protection_interval_bytes
.attr
,
311 &dev_attr_read_verify
.attr
,
312 &dev_attr_write_generate
.attr
,
313 &dev_attr_device_is_integrity_capable
.attr
,
317 const struct attribute_group blk_integrity_attr_group
= {
319 .attrs
= integrity_attrs
,