--- /dev/null
+From e893fba90c09f9b57fb97daae204ea9cc2c52fa5 Mon Sep 17 00:00:00 2001
+From: Heinz Mauelshagen <heinzm@redhat.com>
+Date: Wed, 12 Mar 2014 16:13:39 +0100
+Subject: dm cache: fix access beyond end of origin device
+
+From: Heinz Mauelshagen <heinzm@redhat.com>
+
+commit e893fba90c09f9b57fb97daae204ea9cc2c52fa5 upstream.
+
+In order to avoid wasting cache space a partial block at the end of the
+origin device is not cached. Unfortunately, the check for such a
+partial block at the end of the origin device was flawed.
+
+Fix accesses beyond the end of the origin device that occured due to
+attempted promotion of an undetected partial block by:
+
+- initializing the per bio data struct to allow cache_end_io to work properly
+- recognizing access to the partial block at the end of the origin device
+- avoiding out of bounds access to the discard bitset
+
+Otherwise, users can experience errors like the following:
+
+ attempt to access beyond end of device
+ dm-5: rw=0, want=20971520, limit=20971456
+ ...
+ device-mapper: cache: promotion failed; couldn't copy block
+
+Signed-off-by: Heinz Mauelshagen <heinzm@redhat.com>
+Acked-by: Joe Thornber <ejt@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-cache-target.c | 8 +++-----
+ 1 file changed, 3 insertions(+), 5 deletions(-)
+
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -2454,20 +2454,18 @@ static int cache_map(struct dm_target *t
+ bool discarded_block;
+ struct dm_bio_prison_cell *cell;
+ struct policy_result lookup_result;
+- struct per_bio_data *pb;
++ struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size);
+
+- if (from_oblock(block) > from_oblock(cache->origin_blocks)) {
++ if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
+ /*
+ * This can only occur if the io goes to a partial block at
+ * the end of the origin device. We don't cache these.
+ * Just remap to the origin and carry on.
+ */
+- remap_to_origin_clear_discard(cache, bio, block);
++ remap_to_origin(cache, bio);
+ return DM_MAPIO_REMAPPED;
+ }
+
+- pb = init_per_bio_data(bio, pb_data_size);
+-
+ if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) {
+ defer_bio(cache, bio);
+ return DM_MAPIO_SUBMITTED;
--- /dev/null
+From 8b9d96666529a979acf4825391efcc7c8a3e9f12 Mon Sep 17 00:00:00 2001
+From: Heinz Mauelshagen <heinzm@redhat.com>
+Date: Wed, 12 Mar 2014 00:40:05 +0100
+Subject: dm cache: fix truncation bug when copying a block to/from >2TB fast device
+
+From: Heinz Mauelshagen <heinzm@redhat.com>
+
+commit 8b9d96666529a979acf4825391efcc7c8a3e9f12 upstream.
+
+During demotion or promotion to a cache's >2TB fast device we must not
+truncate the cache block's associated sector to 32bits. The 32bit
+temporary result of from_cblock() caused a 32bit multiplication when
+calculating the sector of the fast device in issue_copy_real().
+
+Use an intermediate 64bit type to store the 32bit from_cblock() to allow
+for proper 64bit multiplication.
+
+Here is an example of how this bug manifests on an ext4 filesystem:
+
+ EXT4-fs error (device dm-0): ext4_mb_generate_buddy:756: group 17136, 32768 clusters in bitmap, 30688 in gd; block bitmap corrupt.
+ JBD2: Spotted dirty metadata buffer (dev = dm-0, blocknr = 0). There's a risk of filesystem corruption in case of system crash.
+
+Signed-off-by: Heinz Mauelshagen <heinzm@redhat.com>
+Acked-by: Joe Thornber <ejt@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-cache-target.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -970,12 +970,13 @@ static void issue_copy_real(struct dm_ca
+ int r;
+ struct dm_io_region o_region, c_region;
+ struct cache *cache = mg->cache;
++ sector_t cblock = from_cblock(mg->cblock);
+
+ o_region.bdev = cache->origin_dev->bdev;
+ o_region.count = cache->sectors_per_block;
+
+ c_region.bdev = cache->cache_dev->bdev;
+- c_region.sector = from_cblock(mg->cblock) * cache->sectors_per_block;
++ c_region.sector = cblock * cache->sectors_per_block;
+ c_region.count = cache->sectors_per_block;
+
+ if (mg->writeback || mg->demote) {
--- /dev/null
+From 14f398ca2f26a2ed6236aec54395e0fa06ec8a82 Mon Sep 17 00:00:00 2001
+From: Heinz Mauelshagen <heinzm@redhat.com>
+Date: Fri, 28 Feb 2014 12:02:56 -0500
+Subject: dm cache mq: fix memory allocation failure for large cache devices
+
+From: Heinz Mauelshagen <heinzm@redhat.com>
+
+commit 14f398ca2f26a2ed6236aec54395e0fa06ec8a82 upstream.
+
+The memory allocated for the multiqueue policy's hash table doesn't need
+to be physically contiguous. Use vzalloc() instead of kzalloc().
+Fedora has been carrying this fix since 10/10/2013.
+
+Failure seen during creation of a 10TB cached device with a 2048 sector
+block size and 411GB cache size:
+
+ dmsetup: page allocation failure: order:9, mode:0x10c0d0
+ CPU: 11 PID: 29235 Comm: dmsetup Not tainted 3.10.4 #3
+ Hardware name: Supermicro X8DTL/X8DTL, BIOS 2.1a 12/30/2011
+ 000000000010c0d0 ffff880090941898 ffffffff81387ab4 ffff880090941928
+ ffffffff810bb26f 0000000000000009 000000000010c0d0 ffff880090941928
+ ffffffff81385dbc ffffffff815f3840 ffffffff00000000 000002000010c0d0
+ Call Trace:
+ [<ffffffff81387ab4>] dump_stack+0x19/0x1b
+ [<ffffffff810bb26f>] warn_alloc_failed+0x110/0x124
+ [<ffffffff81385dbc>] ? __alloc_pages_direct_compact+0x17c/0x18e
+ [<ffffffff810bda2e>] __alloc_pages_nodemask+0x6c7/0x75e
+ [<ffffffff810bdad7>] __get_free_pages+0x12/0x3f
+ [<ffffffff810ea148>] kmalloc_order_trace+0x29/0x88
+ [<ffffffff810ec1fd>] __kmalloc+0x36/0x11b
+ [<ffffffffa031eeed>] ? mq_create+0x1dc/0x2cf [dm_cache_mq]
+ [<ffffffffa031efc0>] mq_create+0x2af/0x2cf [dm_cache_mq]
+ [<ffffffffa0314605>] dm_cache_policy_create+0xa7/0xd2 [dm_cache]
+ [<ffffffffa0312530>] ? cache_ctr+0x245/0xa13 [dm_cache]
+ [<ffffffffa031263e>] cache_ctr+0x353/0xa13 [dm_cache]
+ [<ffffffffa012b916>] dm_table_add_target+0x227/0x2ce [dm_mod]
+ [<ffffffffa012e8e4>] table_load+0x286/0x2ac [dm_mod]
+ [<ffffffffa012e65e>] ? dev_wait+0x8a/0x8a [dm_mod]
+ [<ffffffffa012e324>] ctl_ioctl+0x39a/0x3c2 [dm_mod]
+ [<ffffffffa012e35a>] dm_ctl_ioctl+0xe/0x12 [dm_mod]
+ [<ffffffff81101181>] vfs_ioctl+0x21/0x34
+ [<ffffffff811019d3>] do_vfs_ioctl+0x3b1/0x3f4
+ [<ffffffff810f4d2e>] ? ____fput+0x9/0xb
+ [<ffffffff81050b6c>] ? task_work_run+0x7e/0x92
+ [<ffffffff81101a68>] SyS_ioctl+0x52/0x82
+ [<ffffffff81391d92>] system_call_fastpath+0x16/0x1b
+
+Signed-off-by: Heinz Mauelshagen <heinzm@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-cache-policy-mq.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/dm-cache-policy-mq.c
++++ b/drivers/md/dm-cache-policy-mq.c
+@@ -869,7 +869,7 @@ static void mq_destroy(struct dm_cache_p
+ {
+ struct mq_policy *mq = to_mq_policy(p);
+
+- kfree(mq->table);
++ vfree(mq->table);
+ epool_exit(&mq->cache_pool);
+ epool_exit(&mq->pre_cache_pool);
+ kfree(mq);
+@@ -1224,7 +1224,7 @@ static struct dm_cache_policy *mq_create
+
+ mq->nr_buckets = next_power(from_cblock(cache_size) / 2, 16);
+ mq->hash_bits = ffs(mq->nr_buckets) - 1;
+- mq->table = kzalloc(sizeof(*mq->table) * mq->nr_buckets, GFP_KERNEL);
++ mq->table = vzalloc(sizeof(*mq->table) * mq->nr_buckets);
+ if (!mq->table)
+ goto bad_alloc_table;
+
--- /dev/null
+From cebc2de44d3bce53e46476e774126c298ca2c8a9 Mon Sep 17 00:00:00 2001
+From: Joe Thornber <ejt@redhat.com>
+Date: Fri, 7 Mar 2014 14:57:19 +0000
+Subject: dm space map metadata: fix refcount decrement below 0 which caused corruption
+
+From: Joe Thornber <ejt@redhat.com>
+
+commit cebc2de44d3bce53e46476e774126c298ca2c8a9 upstream.
+
+This has been a relatively long-standing issue that wasn't nailed down
+until Teng-Feng Yang's meticulous bug report to dm-devel on 3/7/2014,
+see: http://www.redhat.com/archives/dm-devel/2014-March/msg00021.html
+
+From that report:
+ "When decreasing the reference count of a metadata block with its
+ reference count equals 3, we will call dm_btree_remove() to remove
+ this enrty from the B+tree which keeps the reference count info in
+ metadata device.
+
+ The B+tree will try to rebalance the entry of the child nodes in each
+ node it traversed, and the rebalance process contains the following
+ steps.
+
+ (1) Finding the corresponding children in current node (shadow_current(s))
+ (2) Shadow the children block (issue BOP_INC)
+ (3) redistribute keys among children, and free children if necessary (issue BOP_DEC)
+
+ Since the update of a metadata block's reference count could be
+ recursive, we will stash these reference count update operations in
+ smm->uncommitted and then process them in a FILO fashion.
+
+ The problem is that step(3) could free the children which is created
+ in step(2), so the BOP_DEC issued in step(3) will be carried out
+ before the BOP_INC issued in step(2) since these BOPs will be
+ processed in FILO fashion. Once the BOP_DEC from step(3) tries to
+ decrease the reference count of newly shadow block, it will report
+ failure for its reference equals 0 before decreasing. It looks like we
+ can solve this issue by processing these BOPs in a FIFO fashion
+ instead of FILO."
+
+Commit 5b564d80 ("dm space map: disallow decrementing a reference count
+below zero") changed the code to report an error for this temporary
+refcount decrement below zero. So what was previously a harmless
+invalid refcount became a hard failure due to the new error path:
+
+ device-mapper: space map common: unable to decrement a reference count below 0
+ device-mapper: thin: 253:6: dm_thin_insert_block() failed: error = -22
+ device-mapper: thin: 253:6: switching pool to read-only mode
+
+This bug is in dm persistent-data code that is common to the DM thin and
+cache targets. So any users of those targets should apply this fix.
+
+Fix this by applying recursive space map operations in FIFO order rather
+than FILO.
+
+Resolves: https://bugzilla.kernel.org/show_bug.cgi?id=68801
+
+Reported-by: Apollon Oikonomopoulos <apoikos@debian.org>
+Reported-by: edwillam1007@gmail.com
+Reported-by: Teng-Feng Yang <shinrairis@gmail.com>
+Signed-off-by: Joe Thornber <ejt@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/persistent-data/dm-space-map-metadata.c | 113 +++++++++++++++++----
+ 1 file changed, 92 insertions(+), 21 deletions(-)
+
+--- a/drivers/md/persistent-data/dm-space-map-metadata.c
++++ b/drivers/md/persistent-data/dm-space-map-metadata.c
+@@ -91,6 +91,69 @@ struct block_op {
+ dm_block_t block;
+ };
+
++struct bop_ring_buffer {
++ unsigned begin;
++ unsigned end;
++ struct block_op bops[MAX_RECURSIVE_ALLOCATIONS + 1];
++};
++
++static void brb_init(struct bop_ring_buffer *brb)
++{
++ brb->begin = 0;
++ brb->end = 0;
++}
++
++static bool brb_empty(struct bop_ring_buffer *brb)
++{
++ return brb->begin == brb->end;
++}
++
++static unsigned brb_next(struct bop_ring_buffer *brb, unsigned old)
++{
++ unsigned r = old + 1;
++ return (r >= (sizeof(brb->bops) / sizeof(*brb->bops))) ? 0 : r;
++}
++
++static int brb_push(struct bop_ring_buffer *brb,
++ enum block_op_type type, dm_block_t b)
++{
++ struct block_op *bop;
++ unsigned next = brb_next(brb, brb->end);
++
++ /*
++ * We don't allow the last bop to be filled, this way we can
++ * differentiate between full and empty.
++ */
++ if (next == brb->begin)
++ return -ENOMEM;
++
++ bop = brb->bops + brb->end;
++ bop->type = type;
++ bop->block = b;
++
++ brb->end = next;
++
++ return 0;
++}
++
++static int brb_pop(struct bop_ring_buffer *brb, struct block_op *result)
++{
++ struct block_op *bop;
++
++ if (brb_empty(brb))
++ return -ENODATA;
++
++ bop = brb->bops + brb->begin;
++ result->type = bop->type;
++ result->block = bop->block;
++
++ brb->begin = brb_next(brb, brb->begin);
++
++ return 0;
++}
++
++/*----------------------------------------------------------------*/
++
+ struct sm_metadata {
+ struct dm_space_map sm;
+
+@@ -101,25 +164,20 @@ struct sm_metadata {
+
+ unsigned recursion_count;
+ unsigned allocated_this_transaction;
+- unsigned nr_uncommitted;
+- struct block_op uncommitted[MAX_RECURSIVE_ALLOCATIONS];
++ struct bop_ring_buffer uncommitted;
+
+ struct threshold threshold;
+ };
+
+ static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b)
+ {
+- struct block_op *op;
++ int r = brb_push(&smm->uncommitted, type, b);
+
+- if (smm->nr_uncommitted == MAX_RECURSIVE_ALLOCATIONS) {
++ if (r) {
+ DMERR("too many recursive allocations");
+ return -ENOMEM;
+ }
+
+- op = smm->uncommitted + smm->nr_uncommitted++;
+- op->type = type;
+- op->block = b;
+-
+ return 0;
+ }
+
+@@ -158,11 +216,17 @@ static int out(struct sm_metadata *smm)
+ return -ENOMEM;
+ }
+
+- if (smm->recursion_count == 1 && smm->nr_uncommitted) {
+- while (smm->nr_uncommitted && !r) {
+- smm->nr_uncommitted--;
+- r = commit_bop(smm, smm->uncommitted +
+- smm->nr_uncommitted);
++ if (smm->recursion_count == 1) {
++ while (!brb_empty(&smm->uncommitted)) {
++ struct block_op bop;
++
++ r = brb_pop(&smm->uncommitted, &bop);
++ if (r) {
++ DMERR("bug in bop ring buffer");
++ break;
++ }
++
++ r = commit_bop(smm, &bop);
+ if (r)
+ break;
+ }
+@@ -217,7 +281,8 @@ static int sm_metadata_get_nr_free(struc
+ static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,
+ uint32_t *result)
+ {
+- int r, i;
++ int r;
++ unsigned i;
+ struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+ unsigned adjustment = 0;
+
+@@ -225,8 +290,10 @@ static int sm_metadata_get_count(struct
+ * We may have some uncommitted adjustments to add. This list
+ * should always be really short.
+ */
+- for (i = 0; i < smm->nr_uncommitted; i++) {
+- struct block_op *op = smm->uncommitted + i;
++ for (i = smm->uncommitted.begin;
++ i != smm->uncommitted.end;
++ i = brb_next(&smm->uncommitted, i)) {
++ struct block_op *op = smm->uncommitted.bops + i;
+
+ if (op->block != b)
+ continue;
+@@ -254,7 +321,8 @@ static int sm_metadata_get_count(struct
+ static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm,
+ dm_block_t b, int *result)
+ {
+- int r, i, adjustment = 0;
++ int r, adjustment = 0;
++ unsigned i;
+ struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+ uint32_t rc;
+
+@@ -262,8 +330,11 @@ static int sm_metadata_count_is_more_tha
+ * We may have some uncommitted adjustments to add. This list
+ * should always be really short.
+ */
+- for (i = 0; i < smm->nr_uncommitted; i++) {
+- struct block_op *op = smm->uncommitted + i;
++ for (i = smm->uncommitted.begin;
++ i != smm->uncommitted.end;
++ i = brb_next(&smm->uncommitted, i)) {
++
++ struct block_op *op = smm->uncommitted.bops + i;
+
+ if (op->block != b)
+ continue;
+@@ -671,7 +742,7 @@ int dm_sm_metadata_create(struct dm_spac
+ smm->begin = superblock + 1;
+ smm->recursion_count = 0;
+ smm->allocated_this_transaction = 0;
+- smm->nr_uncommitted = 0;
++ brb_init(&smm->uncommitted);
+ threshold_init(&smm->threshold);
+
+ memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
+@@ -713,7 +784,7 @@ int dm_sm_metadata_open(struct dm_space_
+ smm->begin = 0;
+ smm->recursion_count = 0;
+ smm->allocated_this_transaction = 0;
+- smm->nr_uncommitted = 0;
++ brb_init(&smm->uncommitted);
+ threshold_init(&smm->threshold);
+
+ memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll));
--- /dev/null
+From 2af120bc040c5ebcda156df6be6a66610ab6957f Mon Sep 17 00:00:00 2001
+From: Laura Abbott <lauraa@codeaurora.org>
+Date: Mon, 10 Mar 2014 15:49:44 -0700
+Subject: mm/compaction: break out of loop on !PageBuddy in isolate_freepages_block
+
+From: Laura Abbott <lauraa@codeaurora.org>
+
+commit 2af120bc040c5ebcda156df6be6a66610ab6957f upstream.
+
+We received several reports of bad page state when freeing CMA pages
+previously allocated with alloc_contig_range:
+
+ BUG: Bad page state in process Binder_A pfn:63202
+ page:d21130b0 count:0 mapcount:1 mapping: (null) index:0x7dfbf
+ page flags: 0x40080068(uptodate|lru|active|swapbacked)
+
+Based on the page state, it looks like the page was still in use. The
+page flags do not make sense for the use case though. Further debugging
+showed that despite alloc_contig_range returning success, at least one
+page in the range still remained in the buddy allocator.
+
+There is an issue with isolate_freepages_block. In strict mode (which
+CMA uses), if any pages in the range cannot be isolated,
+isolate_freepages_block should return failure 0. The current check
+keeps track of the total number of isolated pages and compares against
+the size of the range:
+
+ if (strict && nr_strict_required > total_isolated)
+ total_isolated = 0;
+
+After taking the zone lock, if one of the pages in the range is not in
+the buddy allocator, we continue through the loop and do not increment
+total_isolated. If in the last iteration of the loop we isolate more
+than one page (e.g. last page needed is a higher order page), the check
+for total_isolated may pass and we fail to detect that a page was
+skipped. The fix is to bail out if the loop immediately if we are in
+strict mode. There's no benfit to continuing anyway since we need all
+pages to be isolated. Additionally, drop the error checking based on
+nr_strict_required and just check the pfn ranges. This matches with
+what isolate_freepages_range does.
+
+Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
+Acked-by: Minchan Kim <minchan@kernel.org>
+Cc: Mel Gorman <mgorman@suse.de>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Acked-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
+Acked-by: Michal Nazarewicz <mina86@mina86.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/compaction.c | 20 +++++++++++++-------
+ 1 file changed, 13 insertions(+), 7 deletions(-)
+
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -251,7 +251,6 @@ static unsigned long isolate_freepages_b
+ {
+ int nr_scanned = 0, total_isolated = 0;
+ struct page *cursor, *valid_page = NULL;
+- unsigned long nr_strict_required = end_pfn - blockpfn;
+ unsigned long flags;
+ bool locked = false;
+
+@@ -264,11 +263,12 @@ static unsigned long isolate_freepages_b
+
+ nr_scanned++;
+ if (!pfn_valid_within(blockpfn))
+- continue;
++ goto isolate_fail;
++
+ if (!valid_page)
+ valid_page = page;
+ if (!PageBuddy(page))
+- continue;
++ goto isolate_fail;
+
+ /*
+ * The zone lock must be held to isolate freepages.
+@@ -289,12 +289,10 @@ static unsigned long isolate_freepages_b
+
+ /* Recheck this is a buddy page under lock */
+ if (!PageBuddy(page))
+- continue;
++ goto isolate_fail;
+
+ /* Found a free page, break it into order-0 pages */
+ isolated = split_free_page(page);
+- if (!isolated && strict)
+- break;
+ total_isolated += isolated;
+ for (i = 0; i < isolated; i++) {
+ list_add(&page->lru, freelist);
+@@ -305,7 +303,15 @@ static unsigned long isolate_freepages_b
+ if (isolated) {
+ blockpfn += isolated - 1;
+ cursor += isolated - 1;
++ continue;
+ }
++
++isolate_fail:
++ if (strict)
++ break;
++ else
++ continue;
++
+ }
+
+ trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
+@@ -315,7 +321,7 @@ static unsigned long isolate_freepages_b
+ * pages requested were isolated. If there were any failures, 0 is
+ * returned and CMA will fail.
+ */
+- if (strict && nr_strict_required > total_isolated)
++ if (strict && blockpfn < end_pfn)
+ total_isolated = 0;
+
+ if (locked)
pci-enable-intx-in-pci_reenable_device-only-when-msi-msi-x-not-enabled.patch
vmxnet3-fix-netpoll-race-condition.patch
vmxnet3-fix-building-without-config_pci_msi.patch
+mm-compaction-break-out-of-loop-on-pagebuddy-in-isolate_freepages_block.patch
+dm-cache-mq-fix-memory-allocation-failure-for-large-cache-devices.patch
+dm-space-map-metadata-fix-refcount-decrement-below-0-which-caused-corruption.patch
+dm-cache-fix-truncation-bug-when-copying-a-block-to-from-2tb-fast-device.patch
+dm-cache-fix-access-beyond-end-of-origin-device.patch