]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
dm crypt: use bio_add_page
authorMilan Broz <mbroz@redhat.com>
Thu, 13 Dec 2007 14:44:18 +0000 (14:44 +0000)
committerGreg Kroah-Hartman <gregkh@suse.de>
Fri, 8 Feb 2008 20:01:09 +0000 (12:01 -0800)
patch 91e106259214b40e992a58fb9417da46868e19b2 in mainline.

Fix possible max_phys_segments violation in cloned dm-crypt bio.

In write operation dm-crypt needs to allocate new bio request
and run crypto operation on this clone. Cloned request has always
the same size, but number of physical segments can be increased
and violate max_phys_segments restriction.

This can lead to data corruption and serious hardware malfunction.
This was observed when using XFS over dm-crypt and at least
two HBA controller drivers (arcmsr, cciss) recently.

Fix it by using bio_add_page() call (which tests for other
restrictions too) instead of constructing own biovec.

All versions of dm-crypt are affected by this bug.

Signed-off-by: Milan Broz <mbroz@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
drivers/md/dm-crypt.c

index b3fcee0fea5a315c12145d80152607e8cc9f6405..ba2e1359ecb731d85b5d5ff8ca0baa7db19f1c5f 100644 (file)
@@ -399,7 +399,8 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
        struct bio *clone;
        unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
        gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
-       unsigned int i;
+       unsigned i, len;
+       struct page *page;
 
        clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
        if (!clone)
@@ -408,10 +409,8 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
        clone_init(io, clone);
 
        for (i = 0; i < nr_iovecs; i++) {
-               struct bio_vec *bv = bio_iovec_idx(clone, i);
-
-               bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask);
-               if (!bv->bv_page)
+               page = mempool_alloc(cc->page_pool, gfp_mask);
+               if (!page)
                        break;
 
                /*
@@ -422,15 +421,14 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
                if (i == (MIN_BIO_PAGES - 1))
                        gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
 
-               bv->bv_offset = 0;
-               if (size > PAGE_SIZE)
-                       bv->bv_len = PAGE_SIZE;
-               else
-                       bv->bv_len = size;
+               len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
+
+               if (!bio_add_page(clone, page, len, 0)) {
+                       mempool_free(page, cc->page_pool);
+                       break;
+               }
 
-               clone->bi_size += bv->bv_len;
-               clone->bi_vcnt++;
-               size -= bv->bv_len;
+               size -= len;
        }
 
        if (!clone->bi_size) {