From: Greg Kroah-Hartman Date: Mon, 27 Nov 2017 13:19:51 +0000 (+0100) Subject: 4.14-stable patches X-Git-Tag: v3.18.85~35 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=a04a5309fd6c03f26d0538401c0cea6c117ff8dc;p=thirdparty%2Fkernel%2Fstable-queue.git 4.14-stable patches added patches: dm-allocate-struct-mapped_device-with-kvzalloc.patch dm-bufio-fix-integer-overflow-when-limiting-maximum-cache-size.patch dm-cache-fix-race-condition-in-the-writeback-mode-overwrite_bio-optimisation.patch dm-crypt-allow-unaligned-bv_offset.patch dm-integrity-allow-unaligned-bv_offset.patch dm-mpath-remove-annoying-message-of-blk_get_request-returned-11.patch dm-zoned-ignore-last-smaller-runt-zone.patch ovl-put-upperdentry-if-ovl_check_origin-fails.patch --- diff --git a/queue-4.14/dm-allocate-struct-mapped_device-with-kvzalloc.patch b/queue-4.14/dm-allocate-struct-mapped_device-with-kvzalloc.patch new file mode 100644 index 00000000000..edf8e61818f --- /dev/null +++ b/queue-4.14/dm-allocate-struct-mapped_device-with-kvzalloc.patch @@ -0,0 +1,83 @@ +From 856eb0916d181da6d043cc33e03f54d5c5bbe54a Mon Sep 17 00:00:00 2001 +From: Mikulas Patocka +Date: Tue, 31 Oct 2017 19:33:02 -0400 +Subject: dm: allocate struct mapped_device with kvzalloc + +From: Mikulas Patocka + +commit 856eb0916d181da6d043cc33e03f54d5c5bbe54a upstream. + +The structure srcu_struct can be very big, its size is proportional to the +value CONFIG_NR_CPUS. The Fedora kernel has CONFIG_NR_CPUS 8192, the field +io_barrier in the struct mapped_device has 84kB in the debugging kernel +and 50kB in the non-debugging kernel. The large size may result in failure +of the function kzalloc_node. + +In order to avoid the allocation failure, we use the function +kvzalloc_node, this function falls back to vmalloc if a large contiguous +chunk of memory is not available. This patch also moves the field +io_barrier to the last position of struct mapped_device - the reason is +that on many processor architectures, short memory offsets result in +smaller code than long memory offsets - on x86-64 it reduces code size by +320 bytes. + +Note to stable kernel maintainers - the kernels 4.11 and older don't have +the function kvzalloc_node, you can use the function vzalloc_node instead. + +Signed-off-by: Mikulas Patocka +Signed-off-by: Mike Snitzer +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/md/dm-core.h | 3 ++- + drivers/md/dm.c | 6 +++--- + 2 files changed, 5 insertions(+), 4 deletions(-) + +--- a/drivers/md/dm-core.h ++++ b/drivers/md/dm-core.h +@@ -29,7 +29,6 @@ struct dm_kobject_holder { + * DM targets must _not_ deference a mapped_device to directly access its members! + */ + struct mapped_device { +- struct srcu_struct io_barrier; + struct mutex suspend_lock; + + /* +@@ -127,6 +126,8 @@ struct mapped_device { + struct blk_mq_tag_set *tag_set; + bool use_blk_mq:1; + bool init_tio_pdu:1; ++ ++ struct srcu_struct io_barrier; + }; + + void dm_init_md_queue(struct mapped_device *md); +--- a/drivers/md/dm.c ++++ b/drivers/md/dm.c +@@ -1695,7 +1695,7 @@ static struct mapped_device *alloc_dev(i + struct mapped_device *md; + void *old_md; + +- md = kzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); ++ md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); + if (!md) { + DMWARN("unable to allocate device, out of memory."); + return NULL; +@@ -1795,7 +1795,7 @@ bad_io_barrier: + bad_minor: + module_put(THIS_MODULE); + bad_module_get: +- kfree(md); ++ kvfree(md); + return NULL; + } + +@@ -1814,7 +1814,7 @@ static void free_dev(struct mapped_devic + free_minor(minor); + + module_put(THIS_MODULE); +- kfree(md); ++ kvfree(md); + } + + static void __bind_mempools(struct mapped_device *md, struct dm_table *t) diff --git a/queue-4.14/dm-bufio-fix-integer-overflow-when-limiting-maximum-cache-size.patch b/queue-4.14/dm-bufio-fix-integer-overflow-when-limiting-maximum-cache-size.patch new file mode 100644 index 00000000000..075d7fc7bd7 --- /dev/null +++ b/queue-4.14/dm-bufio-fix-integer-overflow-when-limiting-maximum-cache-size.patch @@ -0,0 +1,72 @@ +From 74d4108d9e681dbbe4a2940ed8fdff1f6868184c Mon Sep 17 00:00:00 2001 +From: Eric Biggers +Date: Wed, 15 Nov 2017 16:38:09 -0800 +Subject: dm bufio: fix integer overflow when limiting maximum cache size + +From: Eric Biggers + +commit 74d4108d9e681dbbe4a2940ed8fdff1f6868184c upstream. + +The default max_cache_size_bytes for dm-bufio is meant to be the lesser +of 25% of the size of the vmalloc area and 2% of the size of lowmem. +However, on 32-bit systems the intermediate result in the expression + + (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100 + +overflows, causing the wrong result to be computed. For example, on a +32-bit system where the vmalloc area is 520093696 bytes, the result is +1174405 rather than the expected 130023424, which makes the maximum +cache size much too small (far less than 2% of lowmem). This causes +severe performance problems for dm-verity users on affected systems. + +Fix this by using mult_frac() to correctly multiply by a percentage. Do +this for all places in dm-bufio that multiply by a percentage. Also +replace (VMALLOC_END - VMALLOC_START) with VMALLOC_TOTAL, which contrary +to the comment is now defined in include/linux/vmalloc.h. + +Depends-on: 9993bc635 ("sched/x86: Fix overflow in cyc2ns_offset") +Fixes: 95d402f057f2 ("dm: add bufio") +Signed-off-by: Eric Biggers +Signed-off-by: Mike Snitzer +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/md/dm-bufio.c | 15 ++++++--------- + 1 file changed, 6 insertions(+), 9 deletions(-) + +--- a/drivers/md/dm-bufio.c ++++ b/drivers/md/dm-bufio.c +@@ -974,7 +974,8 @@ static void __get_memory_limit(struct dm + buffers = c->minimum_buffers; + + *limit_buffers = buffers; +- *threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100; ++ *threshold_buffers = mult_frac(buffers, ++ DM_BUFIO_WRITEBACK_PERCENT, 100); + } + + /* +@@ -1910,19 +1911,15 @@ static int __init dm_bufio_init(void) + memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches); + memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names); + +- mem = (__u64)((totalram_pages - totalhigh_pages) * +- DM_BUFIO_MEMORY_PERCENT / 100) << PAGE_SHIFT; ++ mem = (__u64)mult_frac(totalram_pages - totalhigh_pages, ++ DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT; + + if (mem > ULONG_MAX) + mem = ULONG_MAX; + + #ifdef CONFIG_MMU +- /* +- * Get the size of vmalloc space the same way as VMALLOC_TOTAL +- * in fs/proc/internal.h +- */ +- if (mem > (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100) +- mem = (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100; ++ if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100)) ++ mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100); + #endif + + dm_bufio_default_cache_size = mem; diff --git a/queue-4.14/dm-cache-fix-race-condition-in-the-writeback-mode-overwrite_bio-optimisation.patch b/queue-4.14/dm-cache-fix-race-condition-in-the-writeback-mode-overwrite_bio-optimisation.patch new file mode 100644 index 00000000000..4e5932da329 --- /dev/null +++ b/queue-4.14/dm-cache-fix-race-condition-in-the-writeback-mode-overwrite_bio-optimisation.patch @@ -0,0 +1,156 @@ +From d1260e2a3f85f4c1010510a15f89597001318b1b Mon Sep 17 00:00:00 2001 +From: Joe Thornber +Date: Fri, 10 Nov 2017 07:53:31 -0500 +Subject: dm cache: fix race condition in the writeback mode overwrite_bio optimisation + +From: Joe Thornber + +commit d1260e2a3f85f4c1010510a15f89597001318b1b upstream. + +When a DM cache in writeback mode moves data between the slow and fast +device it can often avoid a copy if the triggering bio either: + +i) covers the whole block (no point copying if we're about to overwrite it) +ii) the migration is a promotion and the origin block is currently discarded + +Prior to this fix there was a race with case (ii). The discard status +was checked with a shared lock held (rather than exclusive). This meant +another bio could run in parallel and write data to the origin, removing +the discard state. After the promotion the parallel write would have +been lost. + +With this fix the discard status is re-checked once the exclusive lock +has been aquired. If the block is no longer discarded it falls back to +the slower full copy path. + +Fixes: b29d4986d ("dm cache: significant rework to leverage dm-bio-prison-v2") +Signed-off-by: Joe Thornber +Signed-off-by: Mike Snitzer +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/md/dm-cache-target.c | 86 ++++++++++++++++++++++++++----------------- + 1 file changed, 53 insertions(+), 33 deletions(-) + +--- a/drivers/md/dm-cache-target.c ++++ b/drivers/md/dm-cache-target.c +@@ -1201,6 +1201,18 @@ static void background_work_end(struct c + + /*----------------------------------------------------------------*/ + ++static bool bio_writes_complete_block(struct cache *cache, struct bio *bio) ++{ ++ return (bio_data_dir(bio) == WRITE) && ++ (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); ++} ++ ++static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block) ++{ ++ return writeback_mode(&cache->features) && ++ (is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio)); ++} ++ + static void quiesce(struct dm_cache_migration *mg, + void (*continuation)(struct work_struct *)) + { +@@ -1474,13 +1486,51 @@ static void mg_upgrade_lock(struct work_ + } + } + ++static void mg_full_copy(struct work_struct *ws) ++{ ++ struct dm_cache_migration *mg = ws_to_mg(ws); ++ struct cache *cache = mg->cache; ++ struct policy_work *op = mg->op; ++ bool is_policy_promote = (op->op == POLICY_PROMOTE); ++ ++ if ((!is_policy_promote && !is_dirty(cache, op->cblock)) || ++ is_discarded_oblock(cache, op->oblock)) { ++ mg_upgrade_lock(ws); ++ return; ++ } ++ ++ init_continuation(&mg->k, mg_upgrade_lock); ++ ++ if (copy(mg, is_policy_promote)) { ++ DMERR_LIMIT("%s: migration copy failed", cache_device_name(cache)); ++ mg->k.input = BLK_STS_IOERR; ++ mg_complete(mg, false); ++ } ++} ++ + static void mg_copy(struct work_struct *ws) + { +- int r; + struct dm_cache_migration *mg = ws_to_mg(ws); + + if (mg->overwrite_bio) { + /* ++ * No exclusive lock was held when we last checked if the bio ++ * was optimisable. So we have to check again in case things ++ * have changed (eg, the block may no longer be discarded). ++ */ ++ if (!optimisable_bio(mg->cache, mg->overwrite_bio, mg->op->oblock)) { ++ /* ++ * Fallback to a real full copy after doing some tidying up. ++ */ ++ bool rb = bio_detain_shared(mg->cache, mg->op->oblock, mg->overwrite_bio); ++ BUG_ON(rb); /* An exclussive lock must _not_ be held for this block */ ++ mg->overwrite_bio = NULL; ++ inc_io_migrations(mg->cache); ++ mg_full_copy(ws); ++ return; ++ } ++ ++ /* + * It's safe to do this here, even though it's new data + * because all IO has been locked out of the block. + * +@@ -1489,26 +1539,8 @@ static void mg_copy(struct work_struct * + */ + overwrite(mg, mg_update_metadata_after_copy); + +- } else { +- struct cache *cache = mg->cache; +- struct policy_work *op = mg->op; +- bool is_policy_promote = (op->op == POLICY_PROMOTE); +- +- if ((!is_policy_promote && !is_dirty(cache, op->cblock)) || +- is_discarded_oblock(cache, op->oblock)) { +- mg_upgrade_lock(ws); +- return; +- } +- +- init_continuation(&mg->k, mg_upgrade_lock); +- +- r = copy(mg, is_policy_promote); +- if (r) { +- DMERR_LIMIT("%s: migration copy failed", cache_device_name(cache)); +- mg->k.input = BLK_STS_IOERR; +- mg_complete(mg, false); +- } +- } ++ } else ++ mg_full_copy(ws); + } + + static int mg_lock_writes(struct dm_cache_migration *mg) +@@ -1748,18 +1780,6 @@ static void inc_miss_counter(struct cach + + /*----------------------------------------------------------------*/ + +-static bool bio_writes_complete_block(struct cache *cache, struct bio *bio) +-{ +- return (bio_data_dir(bio) == WRITE) && +- (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); +-} +- +-static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block) +-{ +- return writeback_mode(&cache->features) && +- (is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio)); +-} +- + static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block, + bool *commit_needed) + { diff --git a/queue-4.14/dm-crypt-allow-unaligned-bv_offset.patch b/queue-4.14/dm-crypt-allow-unaligned-bv_offset.patch new file mode 100644 index 00000000000..b4c9db26b98 --- /dev/null +++ b/queue-4.14/dm-crypt-allow-unaligned-bv_offset.patch @@ -0,0 +1,56 @@ +From 0440d5c0ca9744b92a07aeb6df0a9a75db6f4280 Mon Sep 17 00:00:00 2001 +From: Mikulas Patocka +Date: Tue, 7 Nov 2017 10:35:57 -0500 +Subject: dm crypt: allow unaligned bv_offset +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Mikulas Patocka + +commit 0440d5c0ca9744b92a07aeb6df0a9a75db6f4280 upstream. + +When slub_debug is enabled kmalloc returns unaligned memory. XFS uses +this unaligned memory for its buffers (if an unaligned buffer crosses a +page, XFS frees it and allocates a full page instead - see the function +xfs_buf_allocate_memory). + +dm-crypt checks if bv_offset is aligned on page size and these checks +fail with slub_debug and XFS. + +Fix this bug by removing the bv_offset checks. Switch to checking if +bv_len is aligned instead of bv_offset (this check should be sufficient +to prevent overruns if a bio with too small bv_len is received). + +Fixes: 8f0009a22517 ("dm crypt: optionally support larger encryption sector size") +Reported-by: Bruno Prémont +Tested-by: Bruno Prémont +Signed-off-by: Mikulas Patocka +Reviewed-by: Milan Broz +Signed-off-by: Mike Snitzer +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/md/dm-crypt.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/md/dm-crypt.c ++++ b/drivers/md/dm-crypt.c +@@ -1075,7 +1075,7 @@ static int crypt_convert_block_aead(stru + BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size); + + /* Reject unexpected unaligned bio. */ +- if (unlikely(bv_in.bv_offset & (cc->sector_size - 1))) ++ if (unlikely(bv_in.bv_len & (cc->sector_size - 1))) + return -EIO; + + dmreq = dmreq_of_req(cc, req); +@@ -1168,7 +1168,7 @@ static int crypt_convert_block_skcipher( + int r = 0; + + /* Reject unexpected unaligned bio. */ +- if (unlikely(bv_in.bv_offset & (cc->sector_size - 1))) ++ if (unlikely(bv_in.bv_len & (cc->sector_size - 1))) + return -EIO; + + dmreq = dmreq_of_req(cc, req); diff --git a/queue-4.14/dm-integrity-allow-unaligned-bv_offset.patch b/queue-4.14/dm-integrity-allow-unaligned-bv_offset.patch new file mode 100644 index 00000000000..33db98823cc --- /dev/null +++ b/queue-4.14/dm-integrity-allow-unaligned-bv_offset.patch @@ -0,0 +1,45 @@ +From 95b1369a9638cfa322ad1c0cde8efbe524059884 Mon Sep 17 00:00:00 2001 +From: Mikulas Patocka +Date: Tue, 7 Nov 2017 10:40:40 -0500 +Subject: dm integrity: allow unaligned bv_offset +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Mikulas Patocka + +commit 95b1369a9638cfa322ad1c0cde8efbe524059884 upstream. + +When slub_debug is enabled kmalloc returns unaligned memory. XFS uses +this unaligned memory for its buffers (if an unaligned buffer crosses a +page, XFS frees it and allocates a full page instead - see the function +xfs_buf_allocate_memory). + +dm-integrity checks if bv_offset is aligned on page size and this check +fail with slub_debug and XFS. + +Fix this bug by removing the bv_offset check, leaving only the check for +bv_len. + +Fixes: 7eada909bfd7 ("dm: add integrity target") +Reported-by: Bruno Prémont +Reviewed-by: Milan Broz +Signed-off-by: Mikulas Patocka +Signed-off-by: Mike Snitzer +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/md/dm-integrity.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/md/dm-integrity.c ++++ b/drivers/md/dm-integrity.c +@@ -1376,7 +1376,7 @@ static int dm_integrity_map(struct dm_ta + struct bvec_iter iter; + struct bio_vec bv; + bio_for_each_segment(bv, bio, iter) { +- if (unlikely((bv.bv_offset | bv.bv_len) & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) { ++ if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) { + DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary", + bv.bv_offset, bv.bv_len, ic->sectors_per_block); + return DM_MAPIO_KILL; diff --git a/queue-4.14/dm-mpath-remove-annoying-message-of-blk_get_request-returned-11.patch b/queue-4.14/dm-mpath-remove-annoying-message-of-blk_get_request-returned-11.patch new file mode 100644 index 00000000000..cf978ef6066 --- /dev/null +++ b/queue-4.14/dm-mpath-remove-annoying-message-of-blk_get_request-returned-11.patch @@ -0,0 +1,39 @@ +From 9dc112e2daf87b40607fd8d357e2d7de32290d45 Mon Sep 17 00:00:00 2001 +From: Ming Lei +Date: Sat, 30 Sep 2017 19:46:48 +0800 +Subject: dm mpath: remove annoying message of 'blk_get_request() returned -11' + +From: Ming Lei + +commit 9dc112e2daf87b40607fd8d357e2d7de32290d45 upstream. + +It is very normal to see allocation failure, especially with blk-mq +request_queues, so it's unnecessary to report this error and annoy +people. + +In practice this 'blk_get_request() returned -11' error gets logged +quite frequently when a blk-mq DM multipath device sees heavy IO. + +This change is marked for stable@ because the annoying message in +question was included in stable@ commit 7083abbbf. + +Fixes: 7083abbbf ("dm mpath: avoid that path removal can trigger an infinite loop") +Signed-off-by: Ming Lei +Signed-off-by: Mike Snitzer +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/md/dm-mpath.c | 2 -- + 1 file changed, 2 deletions(-) + +--- a/drivers/md/dm-mpath.c ++++ b/drivers/md/dm-mpath.c +@@ -499,8 +499,6 @@ static int multipath_clone_and_map(struc + if (IS_ERR(clone)) { + /* EBUSY, ENODEV or EWOULDBLOCK: requeue */ + bool queue_dying = blk_queue_dying(q); +- DMERR_LIMIT("blk_get_request() returned %ld%s - requeuing", +- PTR_ERR(clone), queue_dying ? " (path offline)" : ""); + if (queue_dying) { + atomic_inc(&m->pg_init_in_progress); + activate_or_offline_path(pgpath); diff --git a/queue-4.14/dm-zoned-ignore-last-smaller-runt-zone.patch b/queue-4.14/dm-zoned-ignore-last-smaller-runt-zone.patch new file mode 100644 index 00000000000..c585f955f44 --- /dev/null +++ b/queue-4.14/dm-zoned-ignore-last-smaller-runt-zone.patch @@ -0,0 +1,77 @@ +From 114e025968b5990ad0b57bf60697ea64ee206aac Mon Sep 17 00:00:00 2001 +From: Damien Le Moal +Date: Sat, 28 Oct 2017 16:39:34 +0900 +Subject: dm zoned: ignore last smaller runt zone + +From: Damien Le Moal + +commit 114e025968b5990ad0b57bf60697ea64ee206aac upstream. + +The SCSI layer allows ZBC drives to have a smaller last runt zone. For +such a device, specifying the entire capacity for a dm-zoned target +table entry fails because the specified capacity is not aligned on a +device zone size indicated in the request queue structure of the +device. + +Fix this problem by ignoring the last runt zone in the entry length +when seting up the dm-zoned target (ctr method) and when iterating table +entries of the target (iterate_devices method). This allows dm-zoned +users to still easily setup a target using the entire device capacity +(as mandated by dm-zoned) or the aligned capacity excluding the last +runt zone. + +While at it, replace direct references to the device queue chunk_sectors +limit with calls to the accessor blk_queue_zone_sectors(). + +Reported-by: Peter Desnoyers +Signed-off-by: Damien Le Moal +Signed-off-by: Mike Snitzer +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/md/dm-zoned-target.c | 13 +++++++++---- + 1 file changed, 9 insertions(+), 4 deletions(-) + +--- a/drivers/md/dm-zoned-target.c ++++ b/drivers/md/dm-zoned-target.c +@@ -660,6 +660,7 @@ static int dmz_get_zoned_device(struct d + struct dmz_target *dmz = ti->private; + struct request_queue *q; + struct dmz_dev *dev; ++ sector_t aligned_capacity; + int ret; + + /* Get the target device */ +@@ -685,15 +686,17 @@ static int dmz_get_zoned_device(struct d + goto err; + } + ++ q = bdev_get_queue(dev->bdev); + dev->capacity = i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT; +- if (ti->begin || (ti->len != dev->capacity)) { ++ aligned_capacity = dev->capacity & ~(blk_queue_zone_sectors(q) - 1); ++ if (ti->begin || ++ ((ti->len != dev->capacity) && (ti->len != aligned_capacity))) { + ti->error = "Partial mapping not supported"; + ret = -EINVAL; + goto err; + } + +- q = bdev_get_queue(dev->bdev); +- dev->zone_nr_sectors = q->limits.chunk_sectors; ++ dev->zone_nr_sectors = blk_queue_zone_sectors(q); + dev->zone_nr_sectors_shift = ilog2(dev->zone_nr_sectors); + + dev->zone_nr_blocks = dmz_sect2blk(dev->zone_nr_sectors); +@@ -929,8 +932,10 @@ static int dmz_iterate_devices(struct dm + iterate_devices_callout_fn fn, void *data) + { + struct dmz_target *dmz = ti->private; ++ struct dmz_dev *dev = dmz->dev; ++ sector_t capacity = dev->capacity & ~(dev->zone_nr_sectors - 1); + +- return fn(ti, dmz->ddev, 0, dmz->dev->capacity, data); ++ return fn(ti, dmz->ddev, 0, capacity, data); + } + + static struct target_type dmz_type = { diff --git a/queue-4.14/ovl-put-upperdentry-if-ovl_check_origin-fails.patch b/queue-4.14/ovl-put-upperdentry-if-ovl_check_origin-fails.patch new file mode 100644 index 00000000000..2ba716bc756 --- /dev/null +++ b/queue-4.14/ovl-put-upperdentry-if-ovl_check_origin-fails.patch @@ -0,0 +1,32 @@ +From 5455f92b54e516995a9ca45bbf790d3629c27a93 Mon Sep 17 00:00:00 2001 +From: Vivek Goyal +Date: Wed, 1 Nov 2017 15:37:22 -0400 +Subject: ovl: Put upperdentry if ovl_check_origin() fails + +From: Vivek Goyal + +commit 5455f92b54e516995a9ca45bbf790d3629c27a93 upstream. + +If ovl_check_origin() fails, we should put upperdentry. We have a reference +on it by now. So goto out_put_upper instead of out. + +Fixes: a9d019573e88 ("ovl: lookup non-dir copy-up-origin by file handle") +Signed-off-by: Vivek Goyal +Signed-off-by: Miklos Szeredi +Signed-off-by: Greg Kroah-Hartman + +--- + fs/overlayfs/namei.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/fs/overlayfs/namei.c ++++ b/fs/overlayfs/namei.c +@@ -630,7 +630,7 @@ struct dentry *ovl_lookup(struct inode * + err = ovl_check_origin(upperdentry, roe->lowerstack, + roe->numlower, &stack, &ctr); + if (err) +- goto out; ++ goto out_put_upper; + } + + if (d.redirect) { diff --git a/queue-4.14/series b/queue-4.14/series index bc5b057bda0..d842b05daa3 100644 --- a/queue-4.14/series +++ b/queue-4.14/series @@ -33,3 +33,11 @@ pci-hv-use-effective-affinity-mask.patch pci-set-cavium-acs-capability-quirk-flags-to-assert-rr-cr-sv-uf.patch pci-apply-cavium-thunderx-acs-quirk-to-more-root-ports.patch alsa-hda-add-raven-pci-id.patch +dm-integrity-allow-unaligned-bv_offset.patch +dm-cache-fix-race-condition-in-the-writeback-mode-overwrite_bio-optimisation.patch +dm-crypt-allow-unaligned-bv_offset.patch +dm-zoned-ignore-last-smaller-runt-zone.patch +dm-mpath-remove-annoying-message-of-blk_get_request-returned-11.patch +dm-bufio-fix-integer-overflow-when-limiting-maximum-cache-size.patch +ovl-put-upperdentry-if-ovl_check_origin-fails.patch +dm-allocate-struct-mapped_device-with-kvzalloc.patch