1 // SPDX-License-Identifier: GPL-2.0
3 * t10_pi.c - Functions for generating and verifying T10 Protection
7 #include <linux/t10-pi.h>
8 #include <linux/blk-integrity.h>
9 #include <linux/crc-t10dif.h>
10 #include <linux/crc64.h>
11 #include <linux/module.h>
12 #include <net/checksum.h>
13 #include <asm/unaligned.h>
15 typedef __be16 (csum_fn
) (void *, unsigned int);
17 static __be16
t10_pi_crc_fn(void *data
, unsigned int len
)
19 return cpu_to_be16(crc_t10dif(data
, len
));
22 static __be16
t10_pi_ip_fn(void *data
, unsigned int len
)
24 return (__force __be16
)ip_compute_csum(data
, len
);
28 * Type 1 and Type 2 protection use the same format: 16 bit guard tag,
29 * 16 bit app tag, 32 bit reference tag. Type 3 does not define the ref
32 static blk_status_t
t10_pi_generate(struct blk_integrity_iter
*iter
,
33 csum_fn
*fn
, enum t10_dif_type type
)
37 for (i
= 0 ; i
< iter
->data_size
; i
+= iter
->interval
) {
38 struct t10_pi_tuple
*pi
= iter
->prot_buf
;
40 pi
->guard_tag
= fn(iter
->data_buf
, iter
->interval
);
43 if (type
== T10_PI_TYPE1_PROTECTION
)
44 pi
->ref_tag
= cpu_to_be32(lower_32_bits(iter
->seed
));
48 iter
->data_buf
+= iter
->interval
;
49 iter
->prot_buf
+= iter
->tuple_size
;
56 static blk_status_t
t10_pi_verify(struct blk_integrity_iter
*iter
,
57 csum_fn
*fn
, enum t10_dif_type type
)
61 BUG_ON(type
== T10_PI_TYPE0_PROTECTION
);
63 for (i
= 0 ; i
< iter
->data_size
; i
+= iter
->interval
) {
64 struct t10_pi_tuple
*pi
= iter
->prot_buf
;
67 if (type
== T10_PI_TYPE1_PROTECTION
||
68 type
== T10_PI_TYPE2_PROTECTION
) {
69 if (pi
->app_tag
== T10_PI_APP_ESCAPE
)
72 if (be32_to_cpu(pi
->ref_tag
) !=
73 lower_32_bits(iter
->seed
)) {
74 pr_err("%s: ref tag error at location %llu " \
75 "(rcvd %u)\n", iter
->disk_name
,
77 iter
->seed
, be32_to_cpu(pi
->ref_tag
));
78 return BLK_STS_PROTECTION
;
80 } else if (type
== T10_PI_TYPE3_PROTECTION
) {
81 if (pi
->app_tag
== T10_PI_APP_ESCAPE
&&
82 pi
->ref_tag
== T10_PI_REF_ESCAPE
)
86 csum
= fn(iter
->data_buf
, iter
->interval
);
88 if (pi
->guard_tag
!= csum
) {
89 pr_err("%s: guard tag error at sector %llu " \
90 "(rcvd %04x, want %04x)\n", iter
->disk_name
,
91 (unsigned long long)iter
->seed
,
92 be16_to_cpu(pi
->guard_tag
), be16_to_cpu(csum
));
93 return BLK_STS_PROTECTION
;
97 iter
->data_buf
+= iter
->interval
;
98 iter
->prot_buf
+= iter
->tuple_size
;
105 static blk_status_t
t10_pi_type1_generate_crc(struct blk_integrity_iter
*iter
)
107 return t10_pi_generate(iter
, t10_pi_crc_fn
, T10_PI_TYPE1_PROTECTION
);
110 static blk_status_t
t10_pi_type1_generate_ip(struct blk_integrity_iter
*iter
)
112 return t10_pi_generate(iter
, t10_pi_ip_fn
, T10_PI_TYPE1_PROTECTION
);
115 static blk_status_t
t10_pi_type1_verify_crc(struct blk_integrity_iter
*iter
)
117 return t10_pi_verify(iter
, t10_pi_crc_fn
, T10_PI_TYPE1_PROTECTION
);
120 static blk_status_t
t10_pi_type1_verify_ip(struct blk_integrity_iter
*iter
)
122 return t10_pi_verify(iter
, t10_pi_ip_fn
, T10_PI_TYPE1_PROTECTION
);
126 * t10_pi_type1_prepare - prepare PI prior submitting request to device
127 * @rq: request with PI that should be prepared
129 * For Type 1/Type 2, the virtual start sector is the one that was
130 * originally submitted by the block layer for the ref_tag usage. Due to
131 * partitioning, MD/DM cloning, etc. the actual physical start sector is
132 * likely to be different. Remap protection information to match the
135 static void t10_pi_type1_prepare(struct request
*rq
)
137 const int tuple_sz
= rq
->q
->integrity
.tuple_size
;
138 u32 ref_tag
= t10_pi_ref_tag(rq
);
141 __rq_for_each_bio(bio
, rq
) {
142 struct bio_integrity_payload
*bip
= bio_integrity(bio
);
143 u32 virt
= bip_get_seed(bip
) & 0xffffffff;
145 struct bvec_iter iter
;
147 /* Already remapped? */
148 if (bip
->bip_flags
& BIP_MAPPED_INTEGRITY
)
151 bip_for_each_vec(iv
, bip
, iter
) {
155 p
= bvec_kmap_local(&iv
);
156 for (j
= 0; j
< iv
.bv_len
; j
+= tuple_sz
) {
157 struct t10_pi_tuple
*pi
= p
;
159 if (be32_to_cpu(pi
->ref_tag
) == virt
)
160 pi
->ref_tag
= cpu_to_be32(ref_tag
);
168 bip
->bip_flags
|= BIP_MAPPED_INTEGRITY
;
173 * t10_pi_type1_complete - prepare PI prior returning request to the blk layer
174 * @rq: request with PI that should be prepared
175 * @nr_bytes: total bytes to prepare
177 * For Type 1/Type 2, the virtual start sector is the one that was
178 * originally submitted by the block layer for the ref_tag usage. Due to
179 * partitioning, MD/DM cloning, etc. the actual physical start sector is
180 * likely to be different. Since the physical start sector was submitted
181 * to the device, we should remap it back to virtual values expected by the
184 static void t10_pi_type1_complete(struct request
*rq
, unsigned int nr_bytes
)
186 unsigned intervals
= nr_bytes
>> rq
->q
->integrity
.interval_exp
;
187 const int tuple_sz
= rq
->q
->integrity
.tuple_size
;
188 u32 ref_tag
= t10_pi_ref_tag(rq
);
191 __rq_for_each_bio(bio
, rq
) {
192 struct bio_integrity_payload
*bip
= bio_integrity(bio
);
193 u32 virt
= bip_get_seed(bip
) & 0xffffffff;
195 struct bvec_iter iter
;
197 bip_for_each_vec(iv
, bip
, iter
) {
201 p
= bvec_kmap_local(&iv
);
202 for (j
= 0; j
< iv
.bv_len
&& intervals
; j
+= tuple_sz
) {
203 struct t10_pi_tuple
*pi
= p
;
205 if (be32_to_cpu(pi
->ref_tag
) == ref_tag
)
206 pi
->ref_tag
= cpu_to_be32(virt
);
217 static blk_status_t
t10_pi_type3_generate_crc(struct blk_integrity_iter
*iter
)
219 return t10_pi_generate(iter
, t10_pi_crc_fn
, T10_PI_TYPE3_PROTECTION
);
222 static blk_status_t
t10_pi_type3_generate_ip(struct blk_integrity_iter
*iter
)
224 return t10_pi_generate(iter
, t10_pi_ip_fn
, T10_PI_TYPE3_PROTECTION
);
227 static blk_status_t
t10_pi_type3_verify_crc(struct blk_integrity_iter
*iter
)
229 return t10_pi_verify(iter
, t10_pi_crc_fn
, T10_PI_TYPE3_PROTECTION
);
232 static blk_status_t
t10_pi_type3_verify_ip(struct blk_integrity_iter
*iter
)
234 return t10_pi_verify(iter
, t10_pi_ip_fn
, T10_PI_TYPE3_PROTECTION
);
237 /* Type 3 does not have a reference tag so no remapping is required. */
238 static void t10_pi_type3_prepare(struct request
*rq
)
242 /* Type 3 does not have a reference tag so no remapping is required. */
243 static void t10_pi_type3_complete(struct request
*rq
, unsigned int nr_bytes
)
247 const struct blk_integrity_profile t10_pi_type1_crc
= {
248 .name
= "T10-DIF-TYPE1-CRC",
249 .generate_fn
= t10_pi_type1_generate_crc
,
250 .verify_fn
= t10_pi_type1_verify_crc
,
251 .prepare_fn
= t10_pi_type1_prepare
,
252 .complete_fn
= t10_pi_type1_complete
,
254 EXPORT_SYMBOL(t10_pi_type1_crc
);
256 const struct blk_integrity_profile t10_pi_type1_ip
= {
257 .name
= "T10-DIF-TYPE1-IP",
258 .generate_fn
= t10_pi_type1_generate_ip
,
259 .verify_fn
= t10_pi_type1_verify_ip
,
260 .prepare_fn
= t10_pi_type1_prepare
,
261 .complete_fn
= t10_pi_type1_complete
,
263 EXPORT_SYMBOL(t10_pi_type1_ip
);
265 const struct blk_integrity_profile t10_pi_type3_crc
= {
266 .name
= "T10-DIF-TYPE3-CRC",
267 .generate_fn
= t10_pi_type3_generate_crc
,
268 .verify_fn
= t10_pi_type3_verify_crc
,
269 .prepare_fn
= t10_pi_type3_prepare
,
270 .complete_fn
= t10_pi_type3_complete
,
272 EXPORT_SYMBOL(t10_pi_type3_crc
);
274 const struct blk_integrity_profile t10_pi_type3_ip
= {
275 .name
= "T10-DIF-TYPE3-IP",
276 .generate_fn
= t10_pi_type3_generate_ip
,
277 .verify_fn
= t10_pi_type3_verify_ip
,
278 .prepare_fn
= t10_pi_type3_prepare
,
279 .complete_fn
= t10_pi_type3_complete
,
281 EXPORT_SYMBOL(t10_pi_type3_ip
);
283 static __be64
ext_pi_crc64(void *data
, unsigned int len
)
285 return cpu_to_be64(crc64_rocksoft(data
, len
));
288 static blk_status_t
ext_pi_crc64_generate(struct blk_integrity_iter
*iter
,
289 enum t10_dif_type type
)
293 for (i
= 0 ; i
< iter
->data_size
; i
+= iter
->interval
) {
294 struct crc64_pi_tuple
*pi
= iter
->prot_buf
;
296 pi
->guard_tag
= ext_pi_crc64(iter
->data_buf
, iter
->interval
);
299 if (type
== T10_PI_TYPE1_PROTECTION
)
300 put_unaligned_be48(iter
->seed
, pi
->ref_tag
);
302 put_unaligned_be48(0ULL, pi
->ref_tag
);
304 iter
->data_buf
+= iter
->interval
;
305 iter
->prot_buf
+= iter
->tuple_size
;
312 static bool ext_pi_ref_escape(u8
*ref_tag
)
314 static u8 ref_escape
[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
316 return memcmp(ref_tag
, ref_escape
, sizeof(ref_escape
)) == 0;
319 static blk_status_t
ext_pi_crc64_verify(struct blk_integrity_iter
*iter
,
320 enum t10_dif_type type
)
324 for (i
= 0; i
< iter
->data_size
; i
+= iter
->interval
) {
325 struct crc64_pi_tuple
*pi
= iter
->prot_buf
;
329 if (type
== T10_PI_TYPE1_PROTECTION
) {
330 if (pi
->app_tag
== T10_PI_APP_ESCAPE
)
333 ref
= get_unaligned_be48(pi
->ref_tag
);
334 seed
= lower_48_bits(iter
->seed
);
336 pr_err("%s: ref tag error at location %llu (rcvd %llu)\n",
337 iter
->disk_name
, seed
, ref
);
338 return BLK_STS_PROTECTION
;
340 } else if (type
== T10_PI_TYPE3_PROTECTION
) {
341 if (pi
->app_tag
== T10_PI_APP_ESCAPE
&&
342 ext_pi_ref_escape(pi
->ref_tag
))
346 csum
= ext_pi_crc64(iter
->data_buf
, iter
->interval
);
347 if (pi
->guard_tag
!= csum
) {
348 pr_err("%s: guard tag error at sector %llu " \
349 "(rcvd %016llx, want %016llx)\n",
350 iter
->disk_name
, (unsigned long long)iter
->seed
,
351 be64_to_cpu(pi
->guard_tag
), be64_to_cpu(csum
));
352 return BLK_STS_PROTECTION
;
356 iter
->data_buf
+= iter
->interval
;
357 iter
->prot_buf
+= iter
->tuple_size
;
364 static blk_status_t
ext_pi_type1_verify_crc64(struct blk_integrity_iter
*iter
)
366 return ext_pi_crc64_verify(iter
, T10_PI_TYPE1_PROTECTION
);
369 static blk_status_t
ext_pi_type1_generate_crc64(struct blk_integrity_iter
*iter
)
371 return ext_pi_crc64_generate(iter
, T10_PI_TYPE1_PROTECTION
);
374 static void ext_pi_type1_prepare(struct request
*rq
)
376 const int tuple_sz
= rq
->q
->integrity
.tuple_size
;
377 u64 ref_tag
= ext_pi_ref_tag(rq
);
380 __rq_for_each_bio(bio
, rq
) {
381 struct bio_integrity_payload
*bip
= bio_integrity(bio
);
382 u64 virt
= lower_48_bits(bip_get_seed(bip
));
384 struct bvec_iter iter
;
386 /* Already remapped? */
387 if (bip
->bip_flags
& BIP_MAPPED_INTEGRITY
)
390 bip_for_each_vec(iv
, bip
, iter
) {
394 p
= bvec_kmap_local(&iv
);
395 for (j
= 0; j
< iv
.bv_len
; j
+= tuple_sz
) {
396 struct crc64_pi_tuple
*pi
= p
;
397 u64 ref
= get_unaligned_be48(pi
->ref_tag
);
400 put_unaligned_be48(ref_tag
, pi
->ref_tag
);
408 bip
->bip_flags
|= BIP_MAPPED_INTEGRITY
;
412 static void ext_pi_type1_complete(struct request
*rq
, unsigned int nr_bytes
)
414 unsigned intervals
= nr_bytes
>> rq
->q
->integrity
.interval_exp
;
415 const int tuple_sz
= rq
->q
->integrity
.tuple_size
;
416 u64 ref_tag
= ext_pi_ref_tag(rq
);
419 __rq_for_each_bio(bio
, rq
) {
420 struct bio_integrity_payload
*bip
= bio_integrity(bio
);
421 u64 virt
= lower_48_bits(bip_get_seed(bip
));
423 struct bvec_iter iter
;
425 bip_for_each_vec(iv
, bip
, iter
) {
429 p
= bvec_kmap_local(&iv
);
430 for (j
= 0; j
< iv
.bv_len
&& intervals
; j
+= tuple_sz
) {
431 struct crc64_pi_tuple
*pi
= p
;
432 u64 ref
= get_unaligned_be48(pi
->ref_tag
);
435 put_unaligned_be48(virt
, pi
->ref_tag
);
446 static blk_status_t
ext_pi_type3_verify_crc64(struct blk_integrity_iter
*iter
)
448 return ext_pi_crc64_verify(iter
, T10_PI_TYPE3_PROTECTION
);
451 static blk_status_t
ext_pi_type3_generate_crc64(struct blk_integrity_iter
*iter
)
453 return ext_pi_crc64_generate(iter
, T10_PI_TYPE3_PROTECTION
);
456 const struct blk_integrity_profile ext_pi_type1_crc64
= {
457 .name
= "EXT-DIF-TYPE1-CRC64",
458 .generate_fn
= ext_pi_type1_generate_crc64
,
459 .verify_fn
= ext_pi_type1_verify_crc64
,
460 .prepare_fn
= ext_pi_type1_prepare
,
461 .complete_fn
= ext_pi_type1_complete
,
463 EXPORT_SYMBOL_GPL(ext_pi_type1_crc64
);
465 const struct blk_integrity_profile ext_pi_type3_crc64
= {
466 .name
= "EXT-DIF-TYPE3-CRC64",
467 .generate_fn
= ext_pi_type3_generate_crc64
,
468 .verify_fn
= ext_pi_type3_verify_crc64
,
469 .prepare_fn
= t10_pi_type3_prepare
,
470 .complete_fn
= t10_pi_type3_complete
,
472 EXPORT_SYMBOL_GPL(ext_pi_type3_crc64
);
474 MODULE_LICENSE("GPL");
475 MODULE_LICENSE("GPL");