1 // SPDX-License-Identifier: GPL-2.0
3 * bio-integrity.c - bio data integrity extensions
5 * Copyright (C) 2007, 2008, 2009 Oracle Corporation
6 * Written by: Martin K. Petersen <martin.petersen@oracle.com>
9 #include <linux/blk-integrity.h>
10 #include <linux/mempool.h>
11 #include <linux/export.h>
12 #include <linux/bio.h>
13 #include <linux/workqueue.h>
14 #include <linux/slab.h>
17 static struct kmem_cache
*bip_slab
;
18 static struct workqueue_struct
*kintegrityd_wq
;
20 void blk_flush_integrity(void)
22 flush_workqueue(kintegrityd_wq
);
25 static void __bio_integrity_free(struct bio_set
*bs
,
26 struct bio_integrity_payload
*bip
)
28 if (bs
&& mempool_initialized(&bs
->bio_integrity_pool
)) {
30 bvec_free(&bs
->bvec_integrity_pool
, bip
->bip_vec
,
32 mempool_free(bip
, &bs
->bio_integrity_pool
);
39 * bio_integrity_alloc - Allocate integrity payload and attach it to bio
40 * @bio: bio to attach integrity metadata to
41 * @gfp_mask: Memory allocation mask
42 * @nr_vecs: Number of integrity metadata scatter-gather elements
44 * Description: This function prepares a bio for attaching integrity
45 * metadata. nr_vecs specifies the maximum number of pages containing
46 * integrity metadata that can be attached.
48 struct bio_integrity_payload
*bio_integrity_alloc(struct bio
*bio
,
52 struct bio_integrity_payload
*bip
;
53 struct bio_set
*bs
= bio
->bi_pool
;
56 if (WARN_ON_ONCE(bio_has_crypt_ctx(bio
)))
57 return ERR_PTR(-EOPNOTSUPP
);
59 if (!bs
|| !mempool_initialized(&bs
->bio_integrity_pool
)) {
60 bip
= kmalloc(struct_size(bip
, bip_inline_vecs
, nr_vecs
), gfp_mask
);
61 inline_vecs
= nr_vecs
;
63 bip
= mempool_alloc(&bs
->bio_integrity_pool
, gfp_mask
);
64 inline_vecs
= BIO_INLINE_VECS
;
68 return ERR_PTR(-ENOMEM
);
70 memset(bip
, 0, sizeof(*bip
));
72 /* always report as many vecs as asked explicitly, not inline vecs */
73 bip
->bip_max_vcnt
= nr_vecs
;
74 if (nr_vecs
> inline_vecs
) {
75 bip
->bip_vec
= bvec_alloc(&bs
->bvec_integrity_pool
,
76 &bip
->bip_max_vcnt
, gfp_mask
);
80 bip
->bip_vec
= bip
->bip_inline_vecs
;
84 bio
->bi_integrity
= bip
;
85 bio
->bi_opf
|= REQ_INTEGRITY
;
89 __bio_integrity_free(bs
, bip
);
90 return ERR_PTR(-ENOMEM
);
92 EXPORT_SYMBOL(bio_integrity_alloc
);
94 static void bio_integrity_unpin_bvec(struct bio_vec
*bv
, int nr_vecs
,
99 for (i
= 0; i
< nr_vecs
; i
++) {
100 if (dirty
&& !PageCompound(bv
[i
].bv_page
))
101 set_page_dirty_lock(bv
[i
].bv_page
);
102 unpin_user_page(bv
[i
].bv_page
);
106 static void bio_integrity_uncopy_user(struct bio_integrity_payload
*bip
)
108 unsigned short nr_vecs
= bip
->bip_max_vcnt
- 1;
109 struct bio_vec
*copy
= &bip
->bip_vec
[1];
110 size_t bytes
= bip
->bip_iter
.bi_size
;
111 struct iov_iter iter
;
114 iov_iter_bvec(&iter
, ITER_DEST
, copy
, nr_vecs
, bytes
);
115 ret
= copy_to_iter(bvec_virt(bip
->bip_vec
), bytes
, &iter
);
116 WARN_ON_ONCE(ret
!= bytes
);
118 bio_integrity_unpin_bvec(copy
, nr_vecs
, true);
121 static void bio_integrity_unmap_user(struct bio_integrity_payload
*bip
)
123 bool dirty
= bio_data_dir(bip
->bip_bio
) == READ
;
125 if (bip
->bip_flags
& BIP_COPY_USER
) {
127 bio_integrity_uncopy_user(bip
);
128 kfree(bvec_virt(bip
->bip_vec
));
132 bio_integrity_unpin_bvec(bip
->bip_vec
, bip
->bip_max_vcnt
, dirty
);
136 * bio_integrity_free - Free bio integrity payload
137 * @bio: bio containing bip to be freed
139 * Description: Used to free the integrity portion of a bio. Usually
140 * called from bio_free().
142 void bio_integrity_free(struct bio
*bio
)
144 struct bio_integrity_payload
*bip
= bio_integrity(bio
);
145 struct bio_set
*bs
= bio
->bi_pool
;
147 if (bip
->bip_flags
& BIP_BLOCK_INTEGRITY
)
148 kfree(bvec_virt(bip
->bip_vec
));
149 else if (bip
->bip_flags
& BIP_INTEGRITY_USER
)
150 bio_integrity_unmap_user(bip
);
152 __bio_integrity_free(bs
, bip
);
153 bio
->bi_integrity
= NULL
;
154 bio
->bi_opf
&= ~REQ_INTEGRITY
;
158 * bio_integrity_add_page - Attach integrity metadata
159 * @bio: bio to update
160 * @page: page containing integrity metadata
161 * @len: number of bytes of integrity metadata in page
162 * @offset: start offset within page
164 * Description: Attach a page containing integrity metadata to bio.
166 int bio_integrity_add_page(struct bio
*bio
, struct page
*page
,
167 unsigned int len
, unsigned int offset
)
169 struct request_queue
*q
= bdev_get_queue(bio
->bi_bdev
);
170 struct bio_integrity_payload
*bip
= bio_integrity(bio
);
172 if (((bip
->bip_iter
.bi_size
+ len
) >> SECTOR_SHIFT
) >
173 queue_max_hw_sectors(q
))
176 if (bip
->bip_vcnt
> 0) {
177 struct bio_vec
*bv
= &bip
->bip_vec
[bip
->bip_vcnt
- 1];
178 bool same_page
= false;
180 if (bvec_try_merge_hw_page(q
, bv
, page
, len
, offset
,
182 bip
->bip_iter
.bi_size
+= len
;
187 min(bip
->bip_max_vcnt
, queue_max_integrity_segments(q
)))
191 * If the queue doesn't support SG gaps and adding this segment
192 * would create a gap, disallow it.
194 if (bvec_gap_to_prev(&q
->limits
, bv
, offset
))
198 bvec_set_page(&bip
->bip_vec
[bip
->bip_vcnt
], page
, len
, offset
);
200 bip
->bip_iter
.bi_size
+= len
;
204 EXPORT_SYMBOL(bio_integrity_add_page
);
206 static int bio_integrity_copy_user(struct bio
*bio
, struct bio_vec
*bvec
,
207 int nr_vecs
, unsigned int len
,
208 unsigned int direction
, u32 seed
)
210 bool write
= direction
== ITER_SOURCE
;
211 struct bio_integrity_payload
*bip
;
212 struct iov_iter iter
;
216 buf
= kmalloc(len
, GFP_KERNEL
);
221 iov_iter_bvec(&iter
, direction
, bvec
, nr_vecs
, len
);
222 if (!copy_from_iter_full(buf
, len
, &iter
)) {
227 bip
= bio_integrity_alloc(bio
, GFP_KERNEL
, 1);
232 * We need to preserve the original bvec and the number of vecs
233 * in it for completion handling
235 bip
= bio_integrity_alloc(bio
, GFP_KERNEL
, nr_vecs
+ 1);
244 bio_integrity_unpin_bvec(bvec
, nr_vecs
, false);
246 memcpy(&bip
->bip_vec
[1], bvec
, nr_vecs
* sizeof(*bvec
));
248 ret
= bio_integrity_add_page(bio
, virt_to_page(buf
), len
,
249 offset_in_page(buf
));
255 bip
->bip_flags
|= BIP_INTEGRITY_USER
| BIP_COPY_USER
;
256 bip
->bip_iter
.bi_sector
= seed
;
259 bio_integrity_free(bio
);
265 static int bio_integrity_init_user(struct bio
*bio
, struct bio_vec
*bvec
,
266 int nr_vecs
, unsigned int len
, u32 seed
)
268 struct bio_integrity_payload
*bip
;
270 bip
= bio_integrity_alloc(bio
, GFP_KERNEL
, nr_vecs
);
274 memcpy(bip
->bip_vec
, bvec
, nr_vecs
* sizeof(*bvec
));
275 bip
->bip_flags
|= BIP_INTEGRITY_USER
;
276 bip
->bip_iter
.bi_sector
= seed
;
277 bip
->bip_iter
.bi_size
= len
;
281 static unsigned int bvec_from_pages(struct bio_vec
*bvec
, struct page
**pages
,
282 int nr_vecs
, ssize_t bytes
, ssize_t offset
)
284 unsigned int nr_bvecs
= 0;
287 for (i
= 0; i
< nr_vecs
; i
= j
) {
288 size_t size
= min_t(size_t, bytes
, PAGE_SIZE
- offset
);
289 struct folio
*folio
= page_folio(pages
[i
]);
292 for (j
= i
+ 1; j
< nr_vecs
; j
++) {
293 size_t next
= min_t(size_t, PAGE_SIZE
, bytes
);
295 if (page_folio(pages
[j
]) != folio
||
296 pages
[j
] != pages
[j
- 1] + 1)
298 unpin_user_page(pages
[j
]);
303 bvec_set_page(&bvec
[nr_bvecs
], pages
[i
], size
, offset
);
311 int bio_integrity_map_user(struct bio
*bio
, void __user
*ubuf
, ssize_t bytes
,
314 struct request_queue
*q
= bdev_get_queue(bio
->bi_bdev
);
315 unsigned int align
= q
->dma_pad_mask
| queue_dma_alignment(q
);
316 struct page
*stack_pages
[UIO_FASTIOV
], **pages
= stack_pages
;
317 struct bio_vec stack_vec
[UIO_FASTIOV
], *bvec
= stack_vec
;
318 unsigned int direction
, nr_bvecs
;
319 struct iov_iter iter
;
324 if (bio_integrity(bio
))
326 if (bytes
>> SECTOR_SHIFT
> queue_max_hw_sectors(q
))
329 if (bio_data_dir(bio
) == READ
)
330 direction
= ITER_DEST
;
332 direction
= ITER_SOURCE
;
334 iov_iter_ubuf(&iter
, direction
, ubuf
, bytes
);
335 nr_vecs
= iov_iter_npages(&iter
, BIO_MAX_VECS
+ 1);
336 if (nr_vecs
> BIO_MAX_VECS
)
338 if (nr_vecs
> UIO_FASTIOV
) {
339 bvec
= kcalloc(nr_vecs
, sizeof(*bvec
), GFP_KERNEL
);
345 copy
= !iov_iter_is_aligned(&iter
, align
, align
);
346 ret
= iov_iter_extract_pages(&iter
, &pages
, bytes
, nr_vecs
, 0, &offset
);
347 if (unlikely(ret
< 0))
350 nr_bvecs
= bvec_from_pages(bvec
, pages
, nr_vecs
, bytes
, offset
);
351 if (pages
!= stack_pages
)
353 if (nr_bvecs
> queue_max_integrity_segments(q
))
357 ret
= bio_integrity_copy_user(bio
, bvec
, nr_bvecs
, bytes
,
360 ret
= bio_integrity_init_user(bio
, bvec
, nr_bvecs
, bytes
, seed
);
363 if (bvec
!= stack_vec
)
369 bio_integrity_unpin_bvec(bvec
, nr_bvecs
, false);
371 if (bvec
!= stack_vec
)
375 EXPORT_SYMBOL_GPL(bio_integrity_map_user
);
378 * bio_integrity_process - Process integrity metadata for a bio
379 * @bio: bio to generate/verify integrity metadata for
380 * @proc_iter: iterator to process
381 * @proc_fn: Pointer to the relevant processing function
383 static blk_status_t
bio_integrity_process(struct bio
*bio
,
384 struct bvec_iter
*proc_iter
, integrity_processing_fn
*proc_fn
)
386 struct blk_integrity
*bi
= blk_get_integrity(bio
->bi_bdev
->bd_disk
);
387 struct blk_integrity_iter iter
;
388 struct bvec_iter bviter
;
390 struct bio_integrity_payload
*bip
= bio_integrity(bio
);
391 blk_status_t ret
= BLK_STS_OK
;
393 iter
.disk_name
= bio
->bi_bdev
->bd_disk
->disk_name
;
394 iter
.interval
= 1 << bi
->interval_exp
;
395 iter
.tuple_size
= bi
->tuple_size
;
396 iter
.seed
= proc_iter
->bi_sector
;
397 iter
.prot_buf
= bvec_virt(bip
->bip_vec
);
399 __bio_for_each_segment(bv
, bio
, bviter
, *proc_iter
) {
400 void *kaddr
= bvec_kmap_local(&bv
);
402 iter
.data_buf
= kaddr
;
403 iter
.data_size
= bv
.bv_len
;
404 ret
= proc_fn(&iter
);
415 * bio_integrity_prep - Prepare bio for integrity I/O
416 * @bio: bio to prepare
418 * Description: Checks if the bio already has an integrity payload attached.
419 * If it does, the payload has been generated by another kernel subsystem,
420 * and we just pass it through. Otherwise allocates integrity payload.
421 * The bio must have data direction, target device and start sector set priot
422 * to calling. In the WRITE case, integrity metadata will be generated using
423 * the block device's integrity function. In the READ case, the buffer
424 * will be prepared for DMA and a suitable end_io handler set up.
426 bool bio_integrity_prep(struct bio
*bio
)
428 struct bio_integrity_payload
*bip
;
429 struct blk_integrity
*bi
= blk_get_integrity(bio
->bi_bdev
->bd_disk
);
431 unsigned long start
, end
;
432 unsigned int len
, nr_pages
;
433 unsigned int bytes
, offset
, i
;
438 if (bio_op(bio
) != REQ_OP_READ
&& bio_op(bio
) != REQ_OP_WRITE
)
441 if (!bio_sectors(bio
))
444 /* Already protected? */
445 if (bio_integrity(bio
))
448 if (bio_data_dir(bio
) == READ
) {
449 if (!bi
->profile
->verify_fn
||
450 !(bi
->flags
& BLK_INTEGRITY_VERIFY
))
453 if (!bi
->profile
->generate_fn
||
454 !(bi
->flags
& BLK_INTEGRITY_GENERATE
))
458 /* Allocate kernel buffer for protection data */
459 len
= bio_integrity_bytes(bi
, bio_sectors(bio
));
460 buf
= kmalloc(len
, GFP_NOIO
);
461 if (unlikely(buf
== NULL
)) {
462 printk(KERN_ERR
"could not allocate integrity buffer\n");
466 end
= (((unsigned long) buf
) + len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
467 start
= ((unsigned long) buf
) >> PAGE_SHIFT
;
468 nr_pages
= end
- start
;
470 /* Allocate bio integrity payload and integrity vectors */
471 bip
= bio_integrity_alloc(bio
, GFP_NOIO
, nr_pages
);
473 printk(KERN_ERR
"could not allocate data integrity bioset\n");
478 bip
->bip_flags
|= BIP_BLOCK_INTEGRITY
;
479 bip_set_seed(bip
, bio
->bi_iter
.bi_sector
);
481 if (bi
->flags
& BLK_INTEGRITY_IP_CHECKSUM
)
482 bip
->bip_flags
|= BIP_IP_CHECKSUM
;
485 offset
= offset_in_page(buf
);
486 for (i
= 0; i
< nr_pages
&& len
> 0; i
++) {
487 bytes
= PAGE_SIZE
- offset
;
492 if (bio_integrity_add_page(bio
, virt_to_page(buf
),
493 bytes
, offset
) < bytes
) {
494 printk(KERN_ERR
"could not attach integrity payload\n");
503 /* Auto-generate integrity metadata if this is a write */
504 if (bio_data_dir(bio
) == WRITE
) {
505 bio_integrity_process(bio
, &bio
->bi_iter
,
506 bi
->profile
->generate_fn
);
508 bip
->bio_iter
= bio
->bi_iter
;
513 bio
->bi_status
= BLK_STS_RESOURCE
;
517 EXPORT_SYMBOL(bio_integrity_prep
);
520 * bio_integrity_verify_fn - Integrity I/O completion worker
521 * @work: Work struct stored in bio to be verified
523 * Description: This workqueue function is called to complete a READ
524 * request. The function verifies the transferred integrity metadata
525 * and then calls the original bio end_io function.
527 static void bio_integrity_verify_fn(struct work_struct
*work
)
529 struct bio_integrity_payload
*bip
=
530 container_of(work
, struct bio_integrity_payload
, bip_work
);
531 struct bio
*bio
= bip
->bip_bio
;
532 struct blk_integrity
*bi
= blk_get_integrity(bio
->bi_bdev
->bd_disk
);
535 * At the moment verify is called bio's iterator was advanced
536 * during split and completion, we need to rewind iterator to
537 * it's original position.
539 bio
->bi_status
= bio_integrity_process(bio
, &bip
->bio_iter
,
540 bi
->profile
->verify_fn
);
541 bio_integrity_free(bio
);
546 * __bio_integrity_endio - Integrity I/O completion function
547 * @bio: Protected bio
549 * Description: Completion for integrity I/O
551 * Normally I/O completion is done in interrupt context. However,
552 * verifying I/O integrity is a time-consuming task which must be run
553 * in process context. This function postpones completion
556 bool __bio_integrity_endio(struct bio
*bio
)
558 struct blk_integrity
*bi
= blk_get_integrity(bio
->bi_bdev
->bd_disk
);
559 struct bio_integrity_payload
*bip
= bio_integrity(bio
);
561 if (bio_op(bio
) == REQ_OP_READ
&& !bio
->bi_status
&&
562 (bip
->bip_flags
& BIP_BLOCK_INTEGRITY
) && bi
->profile
->verify_fn
) {
563 INIT_WORK(&bip
->bip_work
, bio_integrity_verify_fn
);
564 queue_work(kintegrityd_wq
, &bip
->bip_work
);
568 bio_integrity_free(bio
);
573 * bio_integrity_advance - Advance integrity vector
574 * @bio: bio whose integrity vector to update
575 * @bytes_done: number of data bytes that have been completed
577 * Description: This function calculates how many integrity bytes the
578 * number of completed data bytes correspond to and advances the
579 * integrity vector accordingly.
581 void bio_integrity_advance(struct bio
*bio
, unsigned int bytes_done
)
583 struct bio_integrity_payload
*bip
= bio_integrity(bio
);
584 struct blk_integrity
*bi
= blk_get_integrity(bio
->bi_bdev
->bd_disk
);
585 unsigned bytes
= bio_integrity_bytes(bi
, bytes_done
>> 9);
587 bip
->bip_iter
.bi_sector
+= bio_integrity_intervals(bi
, bytes_done
>> 9);
588 bvec_iter_advance(bip
->bip_vec
, &bip
->bip_iter
, bytes
);
592 * bio_integrity_trim - Trim integrity vector
593 * @bio: bio whose integrity vector to update
595 * Description: Used to trim the integrity vector in a cloned bio.
597 void bio_integrity_trim(struct bio
*bio
)
599 struct bio_integrity_payload
*bip
= bio_integrity(bio
);
600 struct blk_integrity
*bi
= blk_get_integrity(bio
->bi_bdev
->bd_disk
);
602 bip
->bip_iter
.bi_size
= bio_integrity_bytes(bi
, bio_sectors(bio
));
604 EXPORT_SYMBOL(bio_integrity_trim
);
607 * bio_integrity_clone - Callback for cloning bios with integrity metadata
609 * @bio_src: Original bio
610 * @gfp_mask: Memory allocation mask
612 * Description: Called to allocate a bip when cloning a bio
614 int bio_integrity_clone(struct bio
*bio
, struct bio
*bio_src
,
617 struct bio_integrity_payload
*bip_src
= bio_integrity(bio_src
);
618 struct bio_integrity_payload
*bip
;
620 BUG_ON(bip_src
== NULL
);
622 bip
= bio_integrity_alloc(bio
, gfp_mask
, bip_src
->bip_vcnt
);
626 memcpy(bip
->bip_vec
, bip_src
->bip_vec
,
627 bip_src
->bip_vcnt
* sizeof(struct bio_vec
));
629 bip
->bip_vcnt
= bip_src
->bip_vcnt
;
630 bip
->bip_iter
= bip_src
->bip_iter
;
631 bip
->bip_flags
= bip_src
->bip_flags
& ~BIP_BLOCK_INTEGRITY
;
636 int bioset_integrity_create(struct bio_set
*bs
, int pool_size
)
638 if (mempool_initialized(&bs
->bio_integrity_pool
))
641 if (mempool_init_slab_pool(&bs
->bio_integrity_pool
,
642 pool_size
, bip_slab
))
645 if (biovec_init_pool(&bs
->bvec_integrity_pool
, pool_size
)) {
646 mempool_exit(&bs
->bio_integrity_pool
);
652 EXPORT_SYMBOL(bioset_integrity_create
);
654 void bioset_integrity_free(struct bio_set
*bs
)
656 mempool_exit(&bs
->bio_integrity_pool
);
657 mempool_exit(&bs
->bvec_integrity_pool
);
660 void __init
bio_integrity_init(void)
663 * kintegrityd won't block much but may burn a lot of CPU cycles.
664 * Make it highpri CPU intensive wq with max concurrency of 1.
666 kintegrityd_wq
= alloc_workqueue("kintegrityd", WQ_MEM_RECLAIM
|
667 WQ_HIGHPRI
| WQ_CPU_INTENSIVE
, 1);
669 panic("Failed to create kintegrityd\n");
671 bip_slab
= kmem_cache_create("bio_integrity_payload",
672 sizeof(struct bio_integrity_payload
) +
673 sizeof(struct bio_vec
) * BIO_INLINE_VECS
,
674 0, SLAB_HWCACHE_ALIGN
|SLAB_PANIC
, NULL
);