1 // SPDX-License-Identifier: GPL-2.0
3 * t10_pi.c - Functions for generating and verifying T10 Protection
7 #include <linux/t10-pi.h>
8 #include <linux/blk-integrity.h>
9 #include <linux/crc-t10dif.h>
10 #include <linux/crc64.h>
11 #include <net/checksum.h>
12 #include <asm/unaligned.h>
15 struct blk_integrity_iter
{
19 unsigned int data_size
;
20 unsigned short interval
;
21 const char *disk_name
;
24 static __be16
t10_pi_csum(__be16 csum
, void *data
, unsigned int len
,
25 unsigned char csum_type
)
27 if (csum_type
== BLK_INTEGRITY_CSUM_IP
)
28 return (__force __be16
)ip_compute_csum(data
, len
);
29 return cpu_to_be16(crc_t10dif_update(be16_to_cpu(csum
), data
, len
));
33 * Type 1 and Type 2 protection use the same format: 16 bit guard tag,
34 * 16 bit app tag, 32 bit reference tag. Type 3 does not define the ref
37 static void t10_pi_generate(struct blk_integrity_iter
*iter
,
38 struct blk_integrity
*bi
)
40 u8 offset
= bi
->pi_offset
;
43 for (i
= 0 ; i
< iter
->data_size
; i
+= iter
->interval
) {
44 struct t10_pi_tuple
*pi
= iter
->prot_buf
+ offset
;
46 pi
->guard_tag
= t10_pi_csum(0, iter
->data_buf
, iter
->interval
,
49 pi
->guard_tag
= t10_pi_csum(pi
->guard_tag
,
50 iter
->prot_buf
, offset
, bi
->csum_type
);
53 if (bi
->flags
& BLK_INTEGRITY_REF_TAG
)
54 pi
->ref_tag
= cpu_to_be32(lower_32_bits(iter
->seed
));
58 iter
->data_buf
+= iter
->interval
;
59 iter
->prot_buf
+= bi
->tuple_size
;
64 static blk_status_t
t10_pi_verify(struct blk_integrity_iter
*iter
,
65 struct blk_integrity
*bi
)
67 u8 offset
= bi
->pi_offset
;
70 for (i
= 0 ; i
< iter
->data_size
; i
+= iter
->interval
) {
71 struct t10_pi_tuple
*pi
= iter
->prot_buf
+ offset
;
74 if (bi
->flags
& BLK_INTEGRITY_REF_TAG
) {
75 if (pi
->app_tag
== T10_PI_APP_ESCAPE
)
78 if (be32_to_cpu(pi
->ref_tag
) !=
79 lower_32_bits(iter
->seed
)) {
80 pr_err("%s: ref tag error at location %llu " \
81 "(rcvd %u)\n", iter
->disk_name
,
83 iter
->seed
, be32_to_cpu(pi
->ref_tag
));
84 return BLK_STS_PROTECTION
;
87 if (pi
->app_tag
== T10_PI_APP_ESCAPE
&&
88 pi
->ref_tag
== T10_PI_REF_ESCAPE
)
92 csum
= t10_pi_csum(0, iter
->data_buf
, iter
->interval
,
95 csum
= t10_pi_csum(csum
, iter
->prot_buf
, offset
,
98 if (pi
->guard_tag
!= csum
) {
99 pr_err("%s: guard tag error at sector %llu " \
100 "(rcvd %04x, want %04x)\n", iter
->disk_name
,
101 (unsigned long long)iter
->seed
,
102 be16_to_cpu(pi
->guard_tag
), be16_to_cpu(csum
));
103 return BLK_STS_PROTECTION
;
107 iter
->data_buf
+= iter
->interval
;
108 iter
->prot_buf
+= bi
->tuple_size
;
116 * t10_pi_type1_prepare - prepare PI prior submitting request to device
117 * @rq: request with PI that should be prepared
119 * For Type 1/Type 2, the virtual start sector is the one that was
120 * originally submitted by the block layer for the ref_tag usage. Due to
121 * partitioning, MD/DM cloning, etc. the actual physical start sector is
122 * likely to be different. Remap protection information to match the
125 static void t10_pi_type1_prepare(struct request
*rq
)
127 struct blk_integrity
*bi
= &rq
->q
->limits
.integrity
;
128 const int tuple_sz
= bi
->tuple_size
;
129 u32 ref_tag
= t10_pi_ref_tag(rq
);
130 u8 offset
= bi
->pi_offset
;
133 __rq_for_each_bio(bio
, rq
) {
134 struct bio_integrity_payload
*bip
= bio_integrity(bio
);
135 u32 virt
= bip_get_seed(bip
) & 0xffffffff;
137 struct bvec_iter iter
;
139 /* Already remapped? */
140 if (bip
->bip_flags
& BIP_MAPPED_INTEGRITY
)
143 bip_for_each_vec(iv
, bip
, iter
) {
147 p
= bvec_kmap_local(&iv
);
148 for (j
= 0; j
< iv
.bv_len
; j
+= tuple_sz
) {
149 struct t10_pi_tuple
*pi
= p
+ offset
;
151 if (be32_to_cpu(pi
->ref_tag
) == virt
)
152 pi
->ref_tag
= cpu_to_be32(ref_tag
);
160 bip
->bip_flags
|= BIP_MAPPED_INTEGRITY
;
165 * t10_pi_type1_complete - prepare PI prior returning request to the blk layer
166 * @rq: request with PI that should be prepared
167 * @nr_bytes: total bytes to prepare
169 * For Type 1/Type 2, the virtual start sector is the one that was
170 * originally submitted by the block layer for the ref_tag usage. Due to
171 * partitioning, MD/DM cloning, etc. the actual physical start sector is
172 * likely to be different. Since the physical start sector was submitted
173 * to the device, we should remap it back to virtual values expected by the
176 static void t10_pi_type1_complete(struct request
*rq
, unsigned int nr_bytes
)
178 struct blk_integrity
*bi
= &rq
->q
->limits
.integrity
;
179 unsigned intervals
= nr_bytes
>> bi
->interval_exp
;
180 const int tuple_sz
= bi
->tuple_size
;
181 u32 ref_tag
= t10_pi_ref_tag(rq
);
182 u8 offset
= bi
->pi_offset
;
185 __rq_for_each_bio(bio
, rq
) {
186 struct bio_integrity_payload
*bip
= bio_integrity(bio
);
187 u32 virt
= bip_get_seed(bip
) & 0xffffffff;
189 struct bvec_iter iter
;
191 bip_for_each_vec(iv
, bip
, iter
) {
195 p
= bvec_kmap_local(&iv
);
196 for (j
= 0; j
< iv
.bv_len
&& intervals
; j
+= tuple_sz
) {
197 struct t10_pi_tuple
*pi
= p
+ offset
;
199 if (be32_to_cpu(pi
->ref_tag
) == ref_tag
)
200 pi
->ref_tag
= cpu_to_be32(virt
);
211 static __be64
ext_pi_crc64(u64 crc
, void *data
, unsigned int len
)
213 return cpu_to_be64(crc64_rocksoft_update(crc
, data
, len
));
216 static void ext_pi_crc64_generate(struct blk_integrity_iter
*iter
,
217 struct blk_integrity
*bi
)
219 u8 offset
= bi
->pi_offset
;
222 for (i
= 0 ; i
< iter
->data_size
; i
+= iter
->interval
) {
223 struct crc64_pi_tuple
*pi
= iter
->prot_buf
+ offset
;
225 pi
->guard_tag
= ext_pi_crc64(0, iter
->data_buf
, iter
->interval
);
227 pi
->guard_tag
= ext_pi_crc64(be64_to_cpu(pi
->guard_tag
),
228 iter
->prot_buf
, offset
);
231 if (bi
->flags
& BLK_INTEGRITY_REF_TAG
)
232 put_unaligned_be48(iter
->seed
, pi
->ref_tag
);
234 put_unaligned_be48(0ULL, pi
->ref_tag
);
236 iter
->data_buf
+= iter
->interval
;
237 iter
->prot_buf
+= bi
->tuple_size
;
242 static bool ext_pi_ref_escape(const u8 ref_tag
[6])
244 static const u8 ref_escape
[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
246 return memcmp(ref_tag
, ref_escape
, sizeof(ref_escape
)) == 0;
249 static blk_status_t
ext_pi_crc64_verify(struct blk_integrity_iter
*iter
,
250 struct blk_integrity
*bi
)
252 u8 offset
= bi
->pi_offset
;
255 for (i
= 0; i
< iter
->data_size
; i
+= iter
->interval
) {
256 struct crc64_pi_tuple
*pi
= iter
->prot_buf
+ offset
;
260 if (bi
->flags
& BLK_INTEGRITY_REF_TAG
) {
261 if (pi
->app_tag
== T10_PI_APP_ESCAPE
)
264 ref
= get_unaligned_be48(pi
->ref_tag
);
265 seed
= lower_48_bits(iter
->seed
);
267 pr_err("%s: ref tag error at location %llu (rcvd %llu)\n",
268 iter
->disk_name
, seed
, ref
);
269 return BLK_STS_PROTECTION
;
272 if (pi
->app_tag
== T10_PI_APP_ESCAPE
&&
273 ext_pi_ref_escape(pi
->ref_tag
))
277 csum
= ext_pi_crc64(0, iter
->data_buf
, iter
->interval
);
279 csum
= ext_pi_crc64(be64_to_cpu(csum
), iter
->prot_buf
,
282 if (pi
->guard_tag
!= csum
) {
283 pr_err("%s: guard tag error at sector %llu " \
284 "(rcvd %016llx, want %016llx)\n",
285 iter
->disk_name
, (unsigned long long)iter
->seed
,
286 be64_to_cpu(pi
->guard_tag
), be64_to_cpu(csum
));
287 return BLK_STS_PROTECTION
;
291 iter
->data_buf
+= iter
->interval
;
292 iter
->prot_buf
+= bi
->tuple_size
;
299 static void ext_pi_type1_prepare(struct request
*rq
)
301 struct blk_integrity
*bi
= &rq
->q
->limits
.integrity
;
302 const int tuple_sz
= bi
->tuple_size
;
303 u64 ref_tag
= ext_pi_ref_tag(rq
);
304 u8 offset
= bi
->pi_offset
;
307 __rq_for_each_bio(bio
, rq
) {
308 struct bio_integrity_payload
*bip
= bio_integrity(bio
);
309 u64 virt
= lower_48_bits(bip_get_seed(bip
));
311 struct bvec_iter iter
;
313 /* Already remapped? */
314 if (bip
->bip_flags
& BIP_MAPPED_INTEGRITY
)
317 bip_for_each_vec(iv
, bip
, iter
) {
321 p
= bvec_kmap_local(&iv
);
322 for (j
= 0; j
< iv
.bv_len
; j
+= tuple_sz
) {
323 struct crc64_pi_tuple
*pi
= p
+ offset
;
324 u64 ref
= get_unaligned_be48(pi
->ref_tag
);
327 put_unaligned_be48(ref_tag
, pi
->ref_tag
);
335 bip
->bip_flags
|= BIP_MAPPED_INTEGRITY
;
339 static void ext_pi_type1_complete(struct request
*rq
, unsigned int nr_bytes
)
341 struct blk_integrity
*bi
= &rq
->q
->limits
.integrity
;
342 unsigned intervals
= nr_bytes
>> bi
->interval_exp
;
343 const int tuple_sz
= bi
->tuple_size
;
344 u64 ref_tag
= ext_pi_ref_tag(rq
);
345 u8 offset
= bi
->pi_offset
;
348 __rq_for_each_bio(bio
, rq
) {
349 struct bio_integrity_payload
*bip
= bio_integrity(bio
);
350 u64 virt
= lower_48_bits(bip_get_seed(bip
));
352 struct bvec_iter iter
;
354 bip_for_each_vec(iv
, bip
, iter
) {
358 p
= bvec_kmap_local(&iv
);
359 for (j
= 0; j
< iv
.bv_len
&& intervals
; j
+= tuple_sz
) {
360 struct crc64_pi_tuple
*pi
= p
+ offset
;
361 u64 ref
= get_unaligned_be48(pi
->ref_tag
);
364 put_unaligned_be48(virt
, pi
->ref_tag
);
375 void blk_integrity_generate(struct bio
*bio
)
377 struct blk_integrity
*bi
= blk_get_integrity(bio
->bi_bdev
->bd_disk
);
378 struct bio_integrity_payload
*bip
= bio_integrity(bio
);
379 struct blk_integrity_iter iter
;
380 struct bvec_iter bviter
;
383 iter
.disk_name
= bio
->bi_bdev
->bd_disk
->disk_name
;
384 iter
.interval
= 1 << bi
->interval_exp
;
385 iter
.seed
= bio
->bi_iter
.bi_sector
;
386 iter
.prot_buf
= bvec_virt(bip
->bip_vec
);
387 bio_for_each_segment(bv
, bio
, bviter
) {
388 void *kaddr
= bvec_kmap_local(&bv
);
390 iter
.data_buf
= kaddr
;
391 iter
.data_size
= bv
.bv_len
;
392 switch (bi
->csum_type
) {
393 case BLK_INTEGRITY_CSUM_CRC64
:
394 ext_pi_crc64_generate(&iter
, bi
);
396 case BLK_INTEGRITY_CSUM_CRC
:
397 case BLK_INTEGRITY_CSUM_IP
:
398 t10_pi_generate(&iter
, bi
);
407 void blk_integrity_verify(struct bio
*bio
)
409 struct blk_integrity
*bi
= blk_get_integrity(bio
->bi_bdev
->bd_disk
);
410 struct bio_integrity_payload
*bip
= bio_integrity(bio
);
411 struct blk_integrity_iter iter
;
412 struct bvec_iter bviter
;
416 * At the moment verify is called bi_iter has been advanced during split
417 * and completion, so use the copy created during submission here.
419 iter
.disk_name
= bio
->bi_bdev
->bd_disk
->disk_name
;
420 iter
.interval
= 1 << bi
->interval_exp
;
421 iter
.seed
= bip
->bio_iter
.bi_sector
;
422 iter
.prot_buf
= bvec_virt(bip
->bip_vec
);
423 __bio_for_each_segment(bv
, bio
, bviter
, bip
->bio_iter
) {
424 void *kaddr
= bvec_kmap_local(&bv
);
425 blk_status_t ret
= BLK_STS_OK
;
427 iter
.data_buf
= kaddr
;
428 iter
.data_size
= bv
.bv_len
;
429 switch (bi
->csum_type
) {
430 case BLK_INTEGRITY_CSUM_CRC64
:
431 ret
= ext_pi_crc64_verify(&iter
, bi
);
433 case BLK_INTEGRITY_CSUM_CRC
:
434 case BLK_INTEGRITY_CSUM_IP
:
435 ret
= t10_pi_verify(&iter
, bi
);
443 bio
->bi_status
= ret
;
449 void blk_integrity_prepare(struct request
*rq
)
451 struct blk_integrity
*bi
= &rq
->q
->limits
.integrity
;
453 if (!(bi
->flags
& BLK_INTEGRITY_REF_TAG
))
456 if (bi
->csum_type
== BLK_INTEGRITY_CSUM_CRC64
)
457 ext_pi_type1_prepare(rq
);
459 t10_pi_type1_prepare(rq
);
462 void blk_integrity_complete(struct request
*rq
, unsigned int nr_bytes
)
464 struct blk_integrity
*bi
= &rq
->q
->limits
.integrity
;
466 if (!(bi
->flags
& BLK_INTEGRITY_REF_TAG
))
469 if (bi
->csum_type
== BLK_INTEGRITY_CSUM_CRC64
)
470 ext_pi_type1_complete(rq
, nr_bytes
);
472 t10_pi_type1_complete(rq
, nr_bytes
);