--- /dev/null
+From e893fba90c09f9b57fb97daae204ea9cc2c52fa5 Mon Sep 17 00:00:00 2001
+From: Heinz Mauelshagen <heinzm@redhat.com>
+Date: Wed, 12 Mar 2014 16:13:39 +0100
+Subject: dm cache: fix access beyond end of origin device
+
+From: Heinz Mauelshagen <heinzm@redhat.com>
+
+commit e893fba90c09f9b57fb97daae204ea9cc2c52fa5 upstream.
+
+In order to avoid wasting cache space a partial block at the end of the
+origin device is not cached. Unfortunately, the check for such a
+partial block at the end of the origin device was flawed.
+
+Fix accesses beyond the end of the origin device that occured due to
+attempted promotion of an undetected partial block by:
+
+- initializing the per bio data struct to allow cache_end_io to work properly
+- recognizing access to the partial block at the end of the origin device
+- avoiding out of bounds access to the discard bitset
+
+Otherwise, users can experience errors like the following:
+
+ attempt to access beyond end of device
+ dm-5: rw=0, want=20971520, limit=20971456
+ ...
+ device-mapper: cache: promotion failed; couldn't copy block
+
+Signed-off-by: Heinz Mauelshagen <heinzm@redhat.com>
+Acked-by: Joe Thornber <ejt@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-cache-target.c | 8 +++-----
+ 1 file changed, 3 insertions(+), 5 deletions(-)
+
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -2175,20 +2175,18 @@ static int cache_map(struct dm_target *t
+ bool discarded_block;
+ struct dm_bio_prison_cell *cell;
+ struct policy_result lookup_result;
+- struct per_bio_data *pb;
++ struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size);
+
+- if (from_oblock(block) > from_oblock(cache->origin_blocks)) {
++ if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
+ /*
+ * This can only occur if the io goes to a partial block at
+ * the end of the origin device. We don't cache these.
+ * Just remap to the origin and carry on.
+ */
+- remap_to_origin_clear_discard(cache, bio, block);
++ remap_to_origin(cache, bio);
+ return DM_MAPIO_REMAPPED;
+ }
+
+- pb = init_per_bio_data(bio, pb_data_size);
+-
+ if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) {
+ defer_bio(cache, bio);
+ return DM_MAPIO_SUBMITTED;
--- /dev/null
+From 8b9d96666529a979acf4825391efcc7c8a3e9f12 Mon Sep 17 00:00:00 2001
+From: Heinz Mauelshagen <heinzm@redhat.com>
+Date: Wed, 12 Mar 2014 00:40:05 +0100
+Subject: dm cache: fix truncation bug when copying a block to/from >2TB fast device
+
+From: Heinz Mauelshagen <heinzm@redhat.com>
+
+commit 8b9d96666529a979acf4825391efcc7c8a3e9f12 upstream.
+
+During demotion or promotion to a cache's >2TB fast device we must not
+truncate the cache block's associated sector to 32bits. The 32bit
+temporary result of from_cblock() caused a 32bit multiplication when
+calculating the sector of the fast device in issue_copy_real().
+
+Use an intermediate 64bit type to store the 32bit from_cblock() to allow
+for proper 64bit multiplication.
+
+Here is an example of how this bug manifests on an ext4 filesystem:
+
+ EXT4-fs error (device dm-0): ext4_mb_generate_buddy:756: group 17136, 32768 clusters in bitmap, 30688 in gd; block bitmap corrupt.
+ JBD2: Spotted dirty metadata buffer (dev = dm-0, blocknr = 0). There's a risk of filesystem corruption in case of system crash.
+
+Signed-off-by: Heinz Mauelshagen <heinzm@redhat.com>
+Acked-by: Joe Thornber <ejt@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-cache-target.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -861,12 +861,13 @@ static void issue_copy_real(struct dm_ca
+ int r;
+ struct dm_io_region o_region, c_region;
+ struct cache *cache = mg->cache;
++ sector_t cblock = from_cblock(mg->cblock);
+
+ o_region.bdev = cache->origin_dev->bdev;
+ o_region.count = cache->sectors_per_block;
+
+ c_region.bdev = cache->cache_dev->bdev;
+- c_region.sector = from_cblock(mg->cblock) * cache->sectors_per_block;
++ c_region.sector = cblock * cache->sectors_per_block;
+ c_region.count = cache->sectors_per_block;
+
+ if (mg->writeback || mg->demote) {
--- /dev/null
+From 2af120bc040c5ebcda156df6be6a66610ab6957f Mon Sep 17 00:00:00 2001
+From: Laura Abbott <lauraa@codeaurora.org>
+Date: Mon, 10 Mar 2014 15:49:44 -0700
+Subject: mm/compaction: break out of loop on !PageBuddy in isolate_freepages_block
+
+From: Laura Abbott <lauraa@codeaurora.org>
+
+commit 2af120bc040c5ebcda156df6be6a66610ab6957f upstream.
+
+We received several reports of bad page state when freeing CMA pages
+previously allocated with alloc_contig_range:
+
+ BUG: Bad page state in process Binder_A pfn:63202
+ page:d21130b0 count:0 mapcount:1 mapping: (null) index:0x7dfbf
+ page flags: 0x40080068(uptodate|lru|active|swapbacked)
+
+Based on the page state, it looks like the page was still in use. The
+page flags do not make sense for the use case though. Further debugging
+showed that despite alloc_contig_range returning success, at least one
+page in the range still remained in the buddy allocator.
+
+There is an issue with isolate_freepages_block. In strict mode (which
+CMA uses), if any pages in the range cannot be isolated,
+isolate_freepages_block should return failure 0. The current check
+keeps track of the total number of isolated pages and compares against
+the size of the range:
+
+ if (strict && nr_strict_required > total_isolated)
+ total_isolated = 0;
+
+After taking the zone lock, if one of the pages in the range is not in
+the buddy allocator, we continue through the loop and do not increment
+total_isolated. If in the last iteration of the loop we isolate more
+than one page (e.g. last page needed is a higher order page), the check
+for total_isolated may pass and we fail to detect that a page was
+skipped. The fix is to bail out if the loop immediately if we are in
+strict mode. There's no benfit to continuing anyway since we need all
+pages to be isolated. Additionally, drop the error checking based on
+nr_strict_required and just check the pfn ranges. This matches with
+what isolate_freepages_range does.
+
+Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
+Acked-by: Minchan Kim <minchan@kernel.org>
+Cc: Mel Gorman <mgorman@suse.de>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Acked-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
+Acked-by: Michal Nazarewicz <mina86@mina86.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/compaction.c | 20 +++++++++++++-------
+ 1 file changed, 13 insertions(+), 7 deletions(-)
+
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -252,7 +252,6 @@ static unsigned long isolate_freepages_b
+ {
+ int nr_scanned = 0, total_isolated = 0;
+ struct page *cursor, *valid_page = NULL;
+- unsigned long nr_strict_required = end_pfn - blockpfn;
+ unsigned long flags;
+ bool locked = false;
+
+@@ -265,11 +264,12 @@ static unsigned long isolate_freepages_b
+
+ nr_scanned++;
+ if (!pfn_valid_within(blockpfn))
+- continue;
++ goto isolate_fail;
++
+ if (!valid_page)
+ valid_page = page;
+ if (!PageBuddy(page))
+- continue;
++ goto isolate_fail;
+
+ /*
+ * The zone lock must be held to isolate freepages.
+@@ -290,12 +290,10 @@ static unsigned long isolate_freepages_b
+
+ /* Recheck this is a buddy page under lock */
+ if (!PageBuddy(page))
+- continue;
++ goto isolate_fail;
+
+ /* Found a free page, break it into order-0 pages */
+ isolated = split_free_page(page);
+- if (!isolated && strict)
+- break;
+ total_isolated += isolated;
+ for (i = 0; i < isolated; i++) {
+ list_add(&page->lru, freelist);
+@@ -306,7 +304,15 @@ static unsigned long isolate_freepages_b
+ if (isolated) {
+ blockpfn += isolated - 1;
+ cursor += isolated - 1;
++ continue;
+ }
++
++isolate_fail:
++ if (strict)
++ break;
++ else
++ continue;
++
+ }
+
+ trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
+@@ -316,7 +322,7 @@ static unsigned long isolate_freepages_b
+ * pages requested were isolated. If there were any failures, 0 is
+ * returned and CMA will fail.
+ */
+- if (strict && nr_strict_required > total_isolated)
++ if (strict && blockpfn < end_pfn)
+ total_isolated = 0;
+
+ if (locked)
pci-enable-intx-in-pci_reenable_device-only-when-msi-msi-x-not-enabled.patch
vmxnet3-fix-netpoll-race-condition.patch
vmxnet3-fix-building-without-config_pci_msi.patch
+mm-compaction-break-out-of-loop-on-pagebuddy-in-isolate_freepages_block.patch
+dm-cache-fix-truncation-bug-when-copying-a-block-to-from-2tb-fast-device.patch
+dm-cache-fix-access-beyond-end-of-origin-device.patch