Merge tag 'block-5.11-2021-01-10' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / block / t10-pi.c
blobd910534b3a410a62763e2f8fd09a8f2976b62d48
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * t10_pi.c - Functions for generating and verifying T10 Protection
4 * Information.
5 */
7 #include <linux/t10-pi.h>
8 #include <linux/blkdev.h>
9 #include <linux/crc-t10dif.h>
10 #include <linux/module.h>
11 #include <net/checksum.h>
13 typedef __be16 (csum_fn) (void *, unsigned int);
15 static __be16 t10_pi_crc_fn(void *data, unsigned int len)
17 return cpu_to_be16(crc_t10dif(data, len));
20 static __be16 t10_pi_ip_fn(void *data, unsigned int len)
22 return (__force __be16)ip_compute_csum(data, len);
26 * Type 1 and Type 2 protection use the same format: 16 bit guard tag,
27 * 16 bit app tag, 32 bit reference tag. Type 3 does not define the ref
28 * tag.
30 static blk_status_t t10_pi_generate(struct blk_integrity_iter *iter,
31 csum_fn *fn, enum t10_dif_type type)
33 unsigned int i;
35 for (i = 0 ; i < iter->data_size ; i += iter->interval) {
36 struct t10_pi_tuple *pi = iter->prot_buf;
38 pi->guard_tag = fn(iter->data_buf, iter->interval);
39 pi->app_tag = 0;
41 if (type == T10_PI_TYPE1_PROTECTION)
42 pi->ref_tag = cpu_to_be32(lower_32_bits(iter->seed));
43 else
44 pi->ref_tag = 0;
46 iter->data_buf += iter->interval;
47 iter->prot_buf += sizeof(struct t10_pi_tuple);
48 iter->seed++;
51 return BLK_STS_OK;
54 static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter,
55 csum_fn *fn, enum t10_dif_type type)
57 unsigned int i;
59 BUG_ON(type == T10_PI_TYPE0_PROTECTION);
61 for (i = 0 ; i < iter->data_size ; i += iter->interval) {
62 struct t10_pi_tuple *pi = iter->prot_buf;
63 __be16 csum;
65 if (type == T10_PI_TYPE1_PROTECTION ||
66 type == T10_PI_TYPE2_PROTECTION) {
67 if (pi->app_tag == T10_PI_APP_ESCAPE)
68 goto next;
70 if (be32_to_cpu(pi->ref_tag) !=
71 lower_32_bits(iter->seed)) {
72 pr_err("%s: ref tag error at location %llu " \
73 "(rcvd %u)\n", iter->disk_name,
74 (unsigned long long)
75 iter->seed, be32_to_cpu(pi->ref_tag));
76 return BLK_STS_PROTECTION;
78 } else if (type == T10_PI_TYPE3_PROTECTION) {
79 if (pi->app_tag == T10_PI_APP_ESCAPE &&
80 pi->ref_tag == T10_PI_REF_ESCAPE)
81 goto next;
84 csum = fn(iter->data_buf, iter->interval);
86 if (pi->guard_tag != csum) {
87 pr_err("%s: guard tag error at sector %llu " \
88 "(rcvd %04x, want %04x)\n", iter->disk_name,
89 (unsigned long long)iter->seed,
90 be16_to_cpu(pi->guard_tag), be16_to_cpu(csum));
91 return BLK_STS_PROTECTION;
94 next:
95 iter->data_buf += iter->interval;
96 iter->prot_buf += sizeof(struct t10_pi_tuple);
97 iter->seed++;
100 return BLK_STS_OK;
103 static blk_status_t t10_pi_type1_generate_crc(struct blk_integrity_iter *iter)
105 return t10_pi_generate(iter, t10_pi_crc_fn, T10_PI_TYPE1_PROTECTION);
108 static blk_status_t t10_pi_type1_generate_ip(struct blk_integrity_iter *iter)
110 return t10_pi_generate(iter, t10_pi_ip_fn, T10_PI_TYPE1_PROTECTION);
113 static blk_status_t t10_pi_type1_verify_crc(struct blk_integrity_iter *iter)
115 return t10_pi_verify(iter, t10_pi_crc_fn, T10_PI_TYPE1_PROTECTION);
118 static blk_status_t t10_pi_type1_verify_ip(struct blk_integrity_iter *iter)
120 return t10_pi_verify(iter, t10_pi_ip_fn, T10_PI_TYPE1_PROTECTION);
124 * t10_pi_type1_prepare - prepare PI prior submitting request to device
125 * @rq: request with PI that should be prepared
127 * For Type 1/Type 2, the virtual start sector is the one that was
128 * originally submitted by the block layer for the ref_tag usage. Due to
129 * partitioning, MD/DM cloning, etc. the actual physical start sector is
130 * likely to be different. Remap protection information to match the
131 * physical LBA.
133 static void t10_pi_type1_prepare(struct request *rq)
135 const int tuple_sz = rq->q->integrity.tuple_size;
136 u32 ref_tag = t10_pi_ref_tag(rq);
137 struct bio *bio;
139 __rq_for_each_bio(bio, rq) {
140 struct bio_integrity_payload *bip = bio_integrity(bio);
141 u32 virt = bip_get_seed(bip) & 0xffffffff;
142 struct bio_vec iv;
143 struct bvec_iter iter;
145 /* Already remapped? */
146 if (bip->bip_flags & BIP_MAPPED_INTEGRITY)
147 break;
149 bip_for_each_vec(iv, bip, iter) {
150 void *p, *pmap;
151 unsigned int j;
153 pmap = kmap_atomic(iv.bv_page);
154 p = pmap + iv.bv_offset;
155 for (j = 0; j < iv.bv_len; j += tuple_sz) {
156 struct t10_pi_tuple *pi = p;
158 if (be32_to_cpu(pi->ref_tag) == virt)
159 pi->ref_tag = cpu_to_be32(ref_tag);
160 virt++;
161 ref_tag++;
162 p += tuple_sz;
165 kunmap_atomic(pmap);
168 bip->bip_flags |= BIP_MAPPED_INTEGRITY;
173 * t10_pi_type1_complete - prepare PI prior returning request to the blk layer
174 * @rq: request with PI that should be prepared
175 * @nr_bytes: total bytes to prepare
177 * For Type 1/Type 2, the virtual start sector is the one that was
178 * originally submitted by the block layer for the ref_tag usage. Due to
179 * partitioning, MD/DM cloning, etc. the actual physical start sector is
180 * likely to be different. Since the physical start sector was submitted
181 * to the device, we should remap it back to virtual values expected by the
182 * block layer.
184 static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
186 unsigned intervals = nr_bytes >> rq->q->integrity.interval_exp;
187 const int tuple_sz = rq->q->integrity.tuple_size;
188 u32 ref_tag = t10_pi_ref_tag(rq);
189 struct bio *bio;
191 __rq_for_each_bio(bio, rq) {
192 struct bio_integrity_payload *bip = bio_integrity(bio);
193 u32 virt = bip_get_seed(bip) & 0xffffffff;
194 struct bio_vec iv;
195 struct bvec_iter iter;
197 bip_for_each_vec(iv, bip, iter) {
198 void *p, *pmap;
199 unsigned int j;
201 pmap = kmap_atomic(iv.bv_page);
202 p = pmap + iv.bv_offset;
203 for (j = 0; j < iv.bv_len && intervals; j += tuple_sz) {
204 struct t10_pi_tuple *pi = p;
206 if (be32_to_cpu(pi->ref_tag) == ref_tag)
207 pi->ref_tag = cpu_to_be32(virt);
208 virt++;
209 ref_tag++;
210 intervals--;
211 p += tuple_sz;
214 kunmap_atomic(pmap);
219 static blk_status_t t10_pi_type3_generate_crc(struct blk_integrity_iter *iter)
221 return t10_pi_generate(iter, t10_pi_crc_fn, T10_PI_TYPE3_PROTECTION);
224 static blk_status_t t10_pi_type3_generate_ip(struct blk_integrity_iter *iter)
226 return t10_pi_generate(iter, t10_pi_ip_fn, T10_PI_TYPE3_PROTECTION);
229 static blk_status_t t10_pi_type3_verify_crc(struct blk_integrity_iter *iter)
231 return t10_pi_verify(iter, t10_pi_crc_fn, T10_PI_TYPE3_PROTECTION);
234 static blk_status_t t10_pi_type3_verify_ip(struct blk_integrity_iter *iter)
236 return t10_pi_verify(iter, t10_pi_ip_fn, T10_PI_TYPE3_PROTECTION);
239 /* Type 3 does not have a reference tag so no remapping is required. */
240 static void t10_pi_type3_prepare(struct request *rq)
244 /* Type 3 does not have a reference tag so no remapping is required. */
245 static void t10_pi_type3_complete(struct request *rq, unsigned int nr_bytes)
249 const struct blk_integrity_profile t10_pi_type1_crc = {
250 .name = "T10-DIF-TYPE1-CRC",
251 .generate_fn = t10_pi_type1_generate_crc,
252 .verify_fn = t10_pi_type1_verify_crc,
253 .prepare_fn = t10_pi_type1_prepare,
254 .complete_fn = t10_pi_type1_complete,
256 EXPORT_SYMBOL(t10_pi_type1_crc);
258 const struct blk_integrity_profile t10_pi_type1_ip = {
259 .name = "T10-DIF-TYPE1-IP",
260 .generate_fn = t10_pi_type1_generate_ip,
261 .verify_fn = t10_pi_type1_verify_ip,
262 .prepare_fn = t10_pi_type1_prepare,
263 .complete_fn = t10_pi_type1_complete,
265 EXPORT_SYMBOL(t10_pi_type1_ip);
267 const struct blk_integrity_profile t10_pi_type3_crc = {
268 .name = "T10-DIF-TYPE3-CRC",
269 .generate_fn = t10_pi_type3_generate_crc,
270 .verify_fn = t10_pi_type3_verify_crc,
271 .prepare_fn = t10_pi_type3_prepare,
272 .complete_fn = t10_pi_type3_complete,
274 EXPORT_SYMBOL(t10_pi_type3_crc);
276 const struct blk_integrity_profile t10_pi_type3_ip = {
277 .name = "T10-DIF-TYPE3-IP",
278 .generate_fn = t10_pi_type3_generate_ip,
279 .verify_fn = t10_pi_type3_verify_ip,
280 .prepare_fn = t10_pi_type3_prepare,
281 .complete_fn = t10_pi_type3_complete,
283 EXPORT_SYMBOL(t10_pi_type3_ip);
285 MODULE_LICENSE("GPL");