#include "blk-cgroup.h"
#include "blk-crypto-internal.h"
-static unsigned int num_prealloc_bounce_pg = 32;
+static unsigned int num_prealloc_bounce_pg = BIO_MAX_VECS;
module_param(num_prealloc_bounce_pg, uint, 0);
MODULE_PARM_DESC(num_prealloc_bounce_pg,
"Number of preallocated bounce pages for the blk-crypto crypto API fallback");
static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio)
{
struct bio *src_bio = enc_bio->bi_private;
- int i;
+ struct page **pages = (struct page **)enc_bio->bi_io_vec;
+ struct bio_vec *bv;
+ unsigned int i;
+
+ /*
+ * Use the same trick as the alloc side to avoid the need for an extra
+ * pages array.
+ */
+ bio_for_each_bvec_all(bv, enc_bio, i)
+ pages[i] = bv->bv_page;
- for (i = 0; i < enc_bio->bi_vcnt; i++)
- mempool_free(enc_bio->bi_io_vec[i].bv_page,
- blk_crypto_bounce_page_pool);
+ i = mempool_free_bulk(blk_crypto_bounce_page_pool, (void **)pages,
+ enc_bio->bi_vcnt);
+ if (i < enc_bio->bi_vcnt)
+ release_pages(pages + i, enc_bio->bi_vcnt - i);
if (enc_bio->bi_status)
cmpxchg(&src_bio->bi_status, 0, enc_bio->bi_status);
bio_endio(src_bio);
}
+#define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *))
+
static struct bio *blk_crypto_alloc_enc_bio(struct bio *bio_src,
- unsigned int nr_segs)
+ unsigned int nr_segs, struct page ***pages_ret)
{
+ unsigned int memflags = memalloc_noio_save();
+ unsigned int nr_allocated;
+ struct page **pages;
struct bio *bio;
bio = bio_alloc_bioset(bio_src->bi_bdev, nr_segs, bio_src->bi_opf,
bio->bi_write_stream = bio_src->bi_write_stream;
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
bio_clone_blkg_association(bio, bio_src);
+
+ /*
+ * Move page array up in the allocated memory for the bio vecs as far as
+ * possible so that we can start filling biovecs from the beginning
+ * without overwriting the temporary page array.
+ */
+ static_assert(PAGE_PTRS_PER_BVEC > 1);
+ pages = (struct page **)bio->bi_io_vec;
+ pages += nr_segs * (PAGE_PTRS_PER_BVEC - 1);
+
+ /*
+ * Try a bulk allocation first. This could leave random pages in the
+ * array unallocated, but we'll fix that up later in mempool_alloc_bulk.
+ *
+ * Note: alloc_pages_bulk needs the array to be zeroed, as it assumes
+ * any non-zero slot already contains a valid allocation.
+ */
+ memset(pages, 0, sizeof(struct page *) * nr_segs);
+ nr_allocated = alloc_pages_bulk(GFP_KERNEL, nr_segs, pages);
+ if (nr_allocated < nr_segs)
+ mempool_alloc_bulk(blk_crypto_bounce_page_pool, (void **)pages,
+ nr_segs, nr_allocated);
+ memalloc_noio_restore(memflags);
+ *pages_ret = pages;
return bio;
}
struct scatterlist src, dst;
union blk_crypto_iv iv;
unsigned int nr_enc_pages, enc_idx;
+ struct page **enc_pages;
struct bio *enc_bio;
unsigned int i;
*/
new_bio:
nr_enc_pages = min(bio_segments(src_bio), BIO_MAX_VECS);
- enc_bio = blk_crypto_alloc_enc_bio(src_bio, nr_enc_pages);
+ enc_bio = blk_crypto_alloc_enc_bio(src_bio, nr_enc_pages, &enc_pages);
enc_idx = 0;
for (;;) {
struct bio_vec src_bv =
bio_iter_iovec(src_bio, src_bio->bi_iter);
- struct page *enc_page;
+ struct page *enc_page = enc_pages[enc_idx];
- enc_page = mempool_alloc(blk_crypto_bounce_page_pool,
- GFP_NOIO);
__bio_add_page(enc_bio, enc_page, src_bv.bv_len,
src_bv.bv_offset);
*/
for (i = 0; i < src_bv.bv_len; i += data_unit_size) {
blk_crypto_dun_to_iv(curr_dun, &iv);
- if (crypto_skcipher_encrypt(ciph_req)) {
- bio_io_error(enc_bio);
- return;
- }
+ if (crypto_skcipher_encrypt(ciph_req))
+ goto out_free_enc_bio;
bio_crypt_dun_increment(curr_dun, 1);
src.offset += data_unit_size;
dst.offset += data_unit_size;
}
submit_bio(enc_bio);
+ return;
+
+out_free_enc_bio:
+ /*
+ * Add the remaining pages to the bio so that the normal completion path
+ * in blk_crypto_fallback_encrypt_endio frees them. The exact data
+ * layout does not matter for that, so don't bother iterating the source
+ * bio.
+ */
+ for (; enc_idx < nr_enc_pages; enc_idx++)
+ __bio_add_page(enc_bio, enc_pages[enc_idx], PAGE_SIZE, 0);
+ bio_io_error(enc_bio);
}
/*