--- /dev/null
+From 5e55e3cbd1042cffa6249f22c10585e63f8a29bf Mon Sep 17 00:00:00 2001
+From: Michal Kazior <michal.kazior@tieto.com>
+Date: Wed, 19 Aug 2015 13:10:43 +0200
+Subject: ath10k: fix dma_mapping_error() handling
+
+From: Michal Kazior <michal.kazior@tieto.com>
+
+commit 5e55e3cbd1042cffa6249f22c10585e63f8a29bf upstream.
+
+The function returns 1 when DMA mapping fails. The
+driver would return bogus values and could
+possibly confuse itself if DMA failed.
+
+Fixes: 767d34fc67af ("ath10k: remove DMA mapping wrappers")
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Michal Kazior <michal.kazior@tieto.com>
+Signed-off-by: Kalle Valo <kvalo@qca.qualcomm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/ath/ath10k/htc.c | 4 +++-
+ drivers/net/wireless/ath/ath10k/htt_tx.c | 8 ++++++--
+ drivers/net/wireless/ath/ath10k/pci.c | 8 ++++++--
+ drivers/net/wireless/ath/ath10k/wmi.c | 1 +
+ 4 files changed, 16 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/wireless/ath/ath10k/htc.c
++++ b/drivers/net/wireless/ath/ath10k/htc.c
+@@ -163,8 +163,10 @@ int ath10k_htc_send(struct ath10k_htc *h
+ skb_cb->eid = eid;
+ skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(dev, skb_cb->paddr);
+- if (ret)
++ if (ret) {
++ ret = -EIO;
+ goto err_credits;
++ }
+
+ sg_item.transfer_id = ep->eid;
+ sg_item.transfer_context = skb;
+--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
++++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
+@@ -371,8 +371,10 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt
+ skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
+ DMA_TO_DEVICE);
+ res = dma_mapping_error(dev, skb_cb->paddr);
+- if (res)
++ if (res) {
++ res = -EIO;
+ goto err_free_txdesc;
++ }
+
+ skb_put(txdesc, len);
+ cmd = (struct htt_cmd *)txdesc->data;
+@@ -463,8 +465,10 @@ int ath10k_htt_tx(struct ath10k_htt *htt
+ skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
+ DMA_TO_DEVICE);
+ res = dma_mapping_error(dev, skb_cb->paddr);
+- if (res)
++ if (res) {
++ res = -EIO;
+ goto err_free_txbuf;
++ }
+
+ if (likely(use_frags)) {
+ frags = skb_cb->htt.txbuf->frags;
+--- a/drivers/net/wireless/ath/ath10k/pci.c
++++ b/drivers/net/wireless/ath/ath10k/pci.c
+@@ -1378,8 +1378,10 @@ static int ath10k_pci_hif_exchange_bmi_m
+
+ req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(ar->dev, req_paddr);
+- if (ret)
++ if (ret) {
++ ret = -EIO;
+ goto err_dma;
++ }
+
+ if (resp && resp_len) {
+ tresp = kzalloc(*resp_len, GFP_KERNEL);
+@@ -1391,8 +1393,10 @@ static int ath10k_pci_hif_exchange_bmi_m
+ resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
+ DMA_FROM_DEVICE);
+ ret = dma_mapping_error(ar->dev, resp_paddr);
+- if (ret)
++ if (ret) {
++ ret = EIO;
+ goto err_req;
++ }
+
+ xfer.wait_for_resp = true;
+ xfer.resp_len = 0;
+--- a/drivers/net/wireless/ath/ath10k/wmi.c
++++ b/drivers/net/wireless/ath/ath10k/wmi.c
+@@ -2517,6 +2517,7 @@ void ath10k_wmi_event_host_swba(struct a
+ ath10k_warn(ar, "failed to map beacon: %d\n",
+ ret);
+ dev_kfree_skb_any(bcn);
++ ret = -EIO;
+ goto skip;
+ }
+
--- /dev/null
+From b0dc3c8bc157c60b1d470163882be8c13e1950af Mon Sep 17 00:00:00 2001
+From: Joe Thornber <ejt@redhat.com>
+Date: Wed, 12 Aug 2015 15:12:09 +0100
+Subject: dm btree: add ref counting ops for the leaves of top level btrees
+
+From: Joe Thornber <ejt@redhat.com>
+
+commit b0dc3c8bc157c60b1d470163882be8c13e1950af upstream.
+
+When using nested btrees, the top leaves of the top levels contain
+block addresses for the root of the next tree down. If we shadow a
+shared leaf node the leaf values (sub tree roots) should be incremented
+accordingly.
+
+This is only an issue if there is metadata sharing in the top levels.
+Which only occurs if metadata snapshots are being used (as is possible
+with dm-thinp). And could result in a block from the thinp metadata
+snap being reused early, thus corrupting the thinp metadata snap.
+
+Signed-off-by: Joe Thornber <ejt@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/persistent-data/dm-btree-internal.h | 6 ++++
+ drivers/md/persistent-data/dm-btree-remove.c | 12 ++------
+ drivers/md/persistent-data/dm-btree-spine.c | 37 +++++++++++++++++++++++++
+ drivers/md/persistent-data/dm-btree.c | 7 ----
+ 4 files changed, 47 insertions(+), 15 deletions(-)
+
+--- a/drivers/md/persistent-data/dm-btree-internal.h
++++ b/drivers/md/persistent-data/dm-btree-internal.h
+@@ -138,4 +138,10 @@ int lower_bound(struct btree_node *n, ui
+
+ extern struct dm_block_validator btree_node_validator;
+
++/*
++ * Value type for upper levels of multi-level btrees.
++ */
++extern void init_le64_type(struct dm_transaction_manager *tm,
++ struct dm_btree_value_type *vt);
++
+ #endif /* DM_BTREE_INTERNAL_H */
+--- a/drivers/md/persistent-data/dm-btree-remove.c
++++ b/drivers/md/persistent-data/dm-btree-remove.c
+@@ -544,14 +544,6 @@ static int remove_raw(struct shadow_spin
+ return r;
+ }
+
+-static struct dm_btree_value_type le64_type = {
+- .context = NULL,
+- .size = sizeof(__le64),
+- .inc = NULL,
+- .dec = NULL,
+- .equal = NULL
+-};
+-
+ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *keys, dm_block_t *new_root)
+ {
+@@ -559,12 +551,14 @@ int dm_btree_remove(struct dm_btree_info
+ int index = 0, r = 0;
+ struct shadow_spine spine;
+ struct btree_node *n;
++ struct dm_btree_value_type le64_vt;
+
++ init_le64_type(info->tm, &le64_vt);
+ init_shadow_spine(&spine, info);
+ for (level = 0; level < info->levels; level++) {
+ r = remove_raw(&spine, info,
+ (level == last_level ?
+- &info->value_type : &le64_type),
++ &info->value_type : &le64_vt),
+ root, keys[level], (unsigned *)&index);
+ if (r < 0)
+ break;
+--- a/drivers/md/persistent-data/dm-btree-spine.c
++++ b/drivers/md/persistent-data/dm-btree-spine.c
+@@ -249,3 +249,40 @@ int shadow_root(struct shadow_spine *s)
+ {
+ return s->root;
+ }
++
++static void le64_inc(void *context, const void *value_le)
++{
++ struct dm_transaction_manager *tm = context;
++ __le64 v_le;
++
++ memcpy(&v_le, value_le, sizeof(v_le));
++ dm_tm_inc(tm, le64_to_cpu(v_le));
++}
++
++static void le64_dec(void *context, const void *value_le)
++{
++ struct dm_transaction_manager *tm = context;
++ __le64 v_le;
++
++ memcpy(&v_le, value_le, sizeof(v_le));
++ dm_tm_dec(tm, le64_to_cpu(v_le));
++}
++
++static int le64_equal(void *context, const void *value1_le, const void *value2_le)
++{
++ __le64 v1_le, v2_le;
++
++ memcpy(&v1_le, value1_le, sizeof(v1_le));
++ memcpy(&v2_le, value2_le, sizeof(v2_le));
++ return v1_le == v2_le;
++}
++
++void init_le64_type(struct dm_transaction_manager *tm,
++ struct dm_btree_value_type *vt)
++{
++ vt->context = tm;
++ vt->size = sizeof(__le64);
++ vt->inc = le64_inc;
++ vt->dec = le64_dec;
++ vt->equal = le64_equal;
++}
+--- a/drivers/md/persistent-data/dm-btree.c
++++ b/drivers/md/persistent-data/dm-btree.c
+@@ -667,12 +667,7 @@ static int insert(struct dm_btree_info *
+ struct btree_node *n;
+ struct dm_btree_value_type le64_type;
+
+- le64_type.context = NULL;
+- le64_type.size = sizeof(__le64);
+- le64_type.inc = NULL;
+- le64_type.dec = NULL;
+- le64_type.equal = NULL;
+-
++ init_le64_type(info->tm, &le64_type);
+ init_shadow_spine(&spine, info);
+
+ for (level = 0; level < (info->levels - 1); level++) {
--- /dev/null
+From 586b286b110e94eb31840ac5afc0c24e0881fe34 Mon Sep 17 00:00:00 2001
+From: Mike Snitzer <snitzer@redhat.com>
+Date: Wed, 9 Sep 2015 21:34:51 -0400
+Subject: dm crypt: constrain crypt device's max_segment_size to PAGE_SIZE
+
+From: Mike Snitzer <snitzer@redhat.com>
+
+commit 586b286b110e94eb31840ac5afc0c24e0881fe34 upstream.
+
+Setting the dm-crypt device's max_segment_size to PAGE_SIZE is an
+unfortunate constraint that is required to avoid the potential for
+exceeding dm-crypt's underlying device's max_segments limits -- due to
+crypt_alloc_buffer() possibly allocating pages for the encryption bio
+that are not as physically contiguous as the original bio.
+
+It is interesting to note that this problem was already fixed back in
+2007 via commit 91e106259 ("dm crypt: use bio_add_page"). But Linux 4.0
+commit cf2f1abfb ("dm crypt: don't allocate pages for a partial
+request") regressed dm-crypt back to _not_ using bio_add_page(). But
+given dm-crypt's cpu parallelization changes all depend on commit
+cf2f1abfb's abandoning of the more complex io fragments processing that
+dm-crypt previously had we cannot easily go back to using
+bio_add_page().
+
+So all said the cleanest way to resolve this issue is to fix dm-crypt to
+properly constrain the original bios entering dm-crypt so the encryption
+bios that dm-crypt generates from the original bios are always
+compatible with the underlying device's max_segments queue limits.
+
+It should be noted that technically Linux 4.3 does _not_ need this fix
+because of the block core's new late bio-splitting capability. But, it
+is reasoned, there is little to be gained by having the block core split
+the encrypted bio that is composed of PAGE_SIZE segments. That said, in
+the future we may revert this change.
+
+Fixes: cf2f1abfb ("dm crypt: don't allocate pages for a partial request")
+Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=104421
+Suggested-by: Jeff Moyer <jmoyer@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-crypt.c | 17 +++++++++++++++--
+ 1 file changed, 15 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -955,7 +955,8 @@ static void crypt_free_buffer_pages(stru
+
+ /*
+ * Generate a new unfragmented bio with the given size
+- * This should never violate the device limitations
++ * This should never violate the device limitations (but only because
++ * max_segment_size is being constrained to PAGE_SIZE).
+ *
+ * This function may be called concurrently. If we allocate from the mempool
+ * concurrently, there is a possibility of deadlock. For example, if we have
+@@ -2040,9 +2041,20 @@ static int crypt_iterate_devices(struct
+ return fn(ti, cc->dev, cc->start, ti->len, data);
+ }
+
++static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
++{
++ /*
++ * Unfortunate constraint that is required to avoid the potential
++ * for exceeding underlying device's max_segments limits -- due to
++ * crypt_alloc_buffer() possibly allocating pages for the encryption
++ * bio that are not as physically contiguous as the original bio.
++ */
++ limits->max_segment_size = PAGE_SIZE;
++}
++
+ static struct target_type crypt_target = {
+ .name = "crypt",
+- .version = {1, 14, 0},
++ .version = {1, 14, 1},
+ .module = THIS_MODULE,
+ .ctr = crypt_ctr,
+ .dtr = crypt_dtr,
+@@ -2054,6 +2066,7 @@ static struct target_type crypt_target =
+ .message = crypt_message,
+ .merge = crypt_merge,
+ .iterate_devices = crypt_iterate_devices,
++ .io_hints = crypt_io_hints,
+ };
+
+ static int __init dm_crypt_init(void)
--- /dev/null
+From 042745ee53a0a7c1f5aff191a4a24213c6dcfb52 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Fri, 2 Oct 2015 11:17:37 -0400
+Subject: dm raid: fix round up of default region size
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit 042745ee53a0a7c1f5aff191a4a24213c6dcfb52 upstream.
+
+Commit 3a0f9aaee028 ("dm raid: round region_size to power of two")
+intended to make sure that the default region size is a power of two.
+However, the logic in that commit is incorrect and sets the variable
+region_size to 0 or 1, depending on whether min_region_size is a power
+of two.
+
+Fix this logic, using roundup_pow_of_two(), so that region_size is
+properly rounded up to the next power of two.
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Fixes: 3a0f9aaee028 ("dm raid: round region_size to power of two")
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-raid.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/md/dm-raid.c
++++ b/drivers/md/dm-raid.c
+@@ -327,8 +327,7 @@ static int validate_region_size(struct r
+ */
+ if (min_region_size > (1 << 13)) {
+ /* If not a power of 2, make it the next power of 2 */
+- if (min_region_size & (min_region_size - 1))
+- region_size = 1 << fls(region_size);
++ region_size = roundup_pow_of_two(min_region_size);
+ DMINFO("Choosing default region size of %lu sectors",
+ region_size);
+ } else {
--- /dev/null
+From 66eefe5de11db1e0d8f2edc3880d50e7c36a9d43 Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.com>
+Date: Thu, 24 Sep 2015 15:47:47 +1000
+Subject: md/raid0: apply base queue limits *before* disk_stack_limits
+
+From: NeilBrown <neilb@suse.com>
+
+commit 66eefe5de11db1e0d8f2edc3880d50e7c36a9d43 upstream.
+
+Calling e.g. blk_queue_max_hw_sectors() after calls to
+disk_stack_limits() discards the settings determined by
+disk_stack_limits().
+So we need to make those calls first.
+
+Fixes: 199dc6ed5179 ("md/raid0: update queue parameter in a safer location.")
+Reported-by: Jes Sorensen <Jes.Sorensen@redhat.com>
+Signed-off-by: NeilBrown <neilb@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/raid0.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/drivers/md/raid0.c
++++ b/drivers/md/raid0.c
+@@ -431,12 +431,6 @@ static int raid0_run(struct mddev *mddev
+ struct md_rdev *rdev;
+ bool discard_supported = false;
+
+- rdev_for_each(rdev, mddev) {
+- disk_stack_limits(mddev->gendisk, rdev->bdev,
+- rdev->data_offset << 9);
+- if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
+- discard_supported = true;
+- }
+ blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
+ blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
+ blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
+@@ -445,6 +439,12 @@ static int raid0_run(struct mddev *mddev
+ blk_queue_io_opt(mddev->queue,
+ (mddev->chunk_sectors << 9) * mddev->raid_disks);
+
++ rdev_for_each(rdev, mddev) {
++ disk_stack_limits(mddev->gendisk, rdev->bdev,
++ rdev->data_offset << 9);
++ if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
++ discard_supported = true;
++ }
+ if (!discard_supported)
+ queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
+ else
--- /dev/null
+From 199dc6ed5179251fa6158a461499c24bdd99c836 Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.com>
+Date: Mon, 3 Aug 2015 13:11:47 +1000
+Subject: md/raid0: update queue parameter in a safer location.
+
+From: NeilBrown <neilb@suse.com>
+
+commit 199dc6ed5179251fa6158a461499c24bdd99c836 upstream.
+
+When a (e.g.) RAID5 array is reshaped to RAID0, the updating
+of queue parameters (e.g. max number of sectors per bio) is
+done in the wrong place.
+It should be part of ->run, but it is actually part of ->takeover.
+This means it happens before level_store() calls:
+
+ blk_set_stacking_limits(&mddev->queue->limits);
+
+and so it ineffective. This can lead to errors from underlying
+devices.
+
+So move all the relevant settings out of create_stripe_zones()
+and into raid0_run().
+
+As this can lead to a bug-on it is suitable for any -stable
+kernel which supports reshape to RAID0. So 2.6.35 or later.
+As the bug has been present for five years there is no urgency,
+so no need to rush into -stable.
+
+Fixes: 9af204cf720c ("md: Add support for Raid5->Raid0 and Raid10->Raid0 takeover")
+Reported-by: Yi Zhang <yizhan@redhat.com>
+Signed-off-by: NeilBrown <neilb@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/raid0.c | 75 +++++++++++++++++++++++++++--------------------------
+ 1 file changed, 39 insertions(+), 36 deletions(-)
+
+--- a/drivers/md/raid0.c
++++ b/drivers/md/raid0.c
+@@ -83,7 +83,7 @@ static int create_strip_zones(struct mdd
+ char b[BDEVNAME_SIZE];
+ char b2[BDEVNAME_SIZE];
+ struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
+- bool discard_supported = false;
++ unsigned short blksize = 512;
+
+ if (!conf)
+ return -ENOMEM;
+@@ -98,6 +98,9 @@ static int create_strip_zones(struct mdd
+ sector_div(sectors, mddev->chunk_sectors);
+ rdev1->sectors = sectors * mddev->chunk_sectors;
+
++ blksize = max(blksize, queue_logical_block_size(
++ rdev1->bdev->bd_disk->queue));
++
+ rdev_for_each(rdev2, mddev) {
+ pr_debug("md/raid0:%s: comparing %s(%llu)"
+ " with %s(%llu)\n",
+@@ -134,6 +137,18 @@ static int create_strip_zones(struct mdd
+ }
+ pr_debug("md/raid0:%s: FINAL %d zones\n",
+ mdname(mddev), conf->nr_strip_zones);
++ /*
++ * now since we have the hard sector sizes, we can make sure
++ * chunk size is a multiple of that sector size
++ */
++ if ((mddev->chunk_sectors << 9) % blksize) {
++ printk(KERN_ERR "md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
++ mdname(mddev),
++ mddev->chunk_sectors << 9, blksize);
++ err = -EINVAL;
++ goto abort;
++ }
++
+ err = -ENOMEM;
+ conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
+ conf->nr_strip_zones, GFP_KERNEL);
+@@ -188,19 +203,12 @@ static int create_strip_zones(struct mdd
+ }
+ dev[j] = rdev1;
+
+- if (mddev->queue)
+- disk_stack_limits(mddev->gendisk, rdev1->bdev,
+- rdev1->data_offset << 9);
+-
+ if (rdev1->bdev->bd_disk->queue->merge_bvec_fn)
+ conf->has_merge_bvec = 1;
+
+ if (!smallest || (rdev1->sectors < smallest->sectors))
+ smallest = rdev1;
+ cnt++;
+-
+- if (blk_queue_discard(bdev_get_queue(rdev1->bdev)))
+- discard_supported = true;
+ }
+ if (cnt != mddev->raid_disks) {
+ printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - "
+@@ -261,28 +269,6 @@ static int create_strip_zones(struct mdd
+ (unsigned long long)smallest->sectors);
+ }
+
+- /*
+- * now since we have the hard sector sizes, we can make sure
+- * chunk size is a multiple of that sector size
+- */
+- if ((mddev->chunk_sectors << 9) % queue_logical_block_size(mddev->queue)) {
+- printk(KERN_ERR "md/raid0:%s: chunk_size of %d not valid\n",
+- mdname(mddev),
+- mddev->chunk_sectors << 9);
+- goto abort;
+- }
+-
+- if (mddev->queue) {
+- blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
+- blk_queue_io_opt(mddev->queue,
+- (mddev->chunk_sectors << 9) * mddev->raid_disks);
+-
+- if (!discard_supported)
+- queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
+- else
+- queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
+- }
+-
+ pr_debug("md/raid0:%s: done.\n", mdname(mddev));
+ *private_conf = conf;
+
+@@ -433,12 +419,6 @@ static int raid0_run(struct mddev *mddev
+ if (md_check_no_bitmap(mddev))
+ return -EINVAL;
+
+- if (mddev->queue) {
+- blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
+- blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
+- blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
+- }
+-
+ /* if private is not null, we are here after takeover */
+ if (mddev->private == NULL) {
+ ret = create_strip_zones(mddev, &conf);
+@@ -447,6 +427,29 @@ static int raid0_run(struct mddev *mddev
+ mddev->private = conf;
+ }
+ conf = mddev->private;
++ if (mddev->queue) {
++ struct md_rdev *rdev;
++ bool discard_supported = false;
++
++ rdev_for_each(rdev, mddev) {
++ disk_stack_limits(mddev->gendisk, rdev->bdev,
++ rdev->data_offset << 9);
++ if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
++ discard_supported = true;
++ }
++ blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
++ blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
++ blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
++
++ blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
++ blk_queue_io_opt(mddev->queue,
++ (mddev->chunk_sectors << 9) * mddev->raid_disks);
++
++ if (!discard_supported)
++ queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
++ else
++ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
++ }
+
+ /* calculate array device size */
+ md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
pci-fix-devfn-for-vpd-access-through-function-0.patch
pci-use-function-0-vpd-for-identical-functions-regular-vpd-for-others.patch
pci-clear-ioresource_unset-when-clipping-a-bridge-window.patch
+dm-crypt-constrain-crypt-device-s-max_segment_size-to-page_size.patch
+ath10k-fix-dma_mapping_error-handling.patch
+svcrdma-fix-send_reply-scatter-gather-set-up.patch
+dm-btree-add-ref-counting-ops-for-the-leaves-of-top-level-btrees.patch
+staging-ion-fix-corruption-of-ion_import_dma_buf.patch
+usb-option-add-zte-pids.patch
+md-raid0-update-queue-parameter-in-a-safer-location.patch
+md-raid0-apply-base-queue-limits-before-disk_stack_limits.patch
+dm-raid-fix-round-up-of-default-region-size.patch
--- /dev/null
+From 6fa92e2bcf6390e64895b12761e851c452d87bd8 Mon Sep 17 00:00:00 2001
+From: Shawn Lin <shawn.lin@rock-chips.com>
+Date: Wed, 9 Sep 2015 15:41:52 +0800
+Subject: staging: ion: fix corruption of ion_import_dma_buf
+
+From: Shawn Lin <shawn.lin@rock-chips.com>
+
+commit 6fa92e2bcf6390e64895b12761e851c452d87bd8 upstream.
+
+we found this issue but still exit in lastest kernel. Simply
+keep ion_handle_create under mutex_lock to avoid this race.
+
+WARNING: CPU: 2 PID: 2648 at drivers/staging/android/ion/ion.c:512 ion_handle_add+0xb4/0xc0()
+ion_handle_add: buffer already found.
+Modules linked in: iwlmvm iwlwifi mac80211 cfg80211 compat
+CPU: 2 PID: 2648 Comm: TimedEventQueue Tainted: G W 3.14.0 #7
+ 00000000 00000000 9a3efd2c 80faf273 9a3efd6c 9a3efd5c 80935dc9 811d7fd3
+ 9a3efd88 00000a58 812208a0 00000200 80e128d4 80e128d4 8d4ae00c a8cd8600
+ a8cd8094 9a3efd74 80935e0e 00000009 9a3efd6c 811d7fd3 9a3efd88 9a3efd9c
+Call Trace:
+ [<80faf273>] dump_stack+0x48/0x69
+ [<80935dc9>] warn_slowpath_common+0x79/0x90
+ [<80e128d4>] ? ion_handle_add+0xb4/0xc0
+ [<80e128d4>] ? ion_handle_add+0xb4/0xc0
+ [<80935e0e>] warn_slowpath_fmt+0x2e/0x30
+ [<80e128d4>] ion_handle_add+0xb4/0xc0
+ [<80e144cc>] ion_import_dma_buf+0x8c/0x110
+ [<80c517c4>] reg_init+0x364/0x7d0
+ [<80993363>] ? futex_wait+0x123/0x210
+ [<80992e0e>] ? get_futex_key+0x16e/0x1e0
+ [<8099308f>] ? futex_wake+0x5f/0x120
+ [<80c51e19>] vpu_service_ioctl+0x1e9/0x500
+ [<80994aec>] ? do_futex+0xec/0x8e0
+ [<80971080>] ? prepare_to_wait_event+0xc0/0xc0
+ [<80c51c30>] ? reg_init+0x7d0/0x7d0
+ [<80a22562>] do_vfs_ioctl+0x2d2/0x4c0
+ [<80b198ad>] ? inode_has_perm.isra.41+0x2d/0x40
+ [<80b199cf>] ? file_has_perm+0x7f/0x90
+ [<80b1a5f7>] ? selinux_file_ioctl+0x47/0xf0
+ [<80a227a8>] SyS_ioctl+0x58/0x80
+ [<80fb45e8>] syscall_call+0x7/0x7
+ [<80fb0000>] ? mmc_do_calc_max_discard+0xab/0xe4
+
+Fixes: 83271f626 ("ion: hold reference to handle...")
+Signed-off-by: Shawn Lin <shawn.lin@rock-chips.com>
+Reviewed-by: Laura Abbott <labbott@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/android/ion/ion.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/staging/android/ion/ion.c
++++ b/drivers/staging/android/ion/ion.c
+@@ -1179,13 +1179,13 @@ struct ion_handle *ion_import_dma_buf(st
+ mutex_unlock(&client->lock);
+ goto end;
+ }
+- mutex_unlock(&client->lock);
+
+ handle = ion_handle_create(client, buffer);
+- if (IS_ERR(handle))
++ if (IS_ERR(handle)) {
++ mutex_unlock(&client->lock);
+ goto end;
++ }
+
+- mutex_lock(&client->lock);
+ ret = ion_handle_add(client, handle);
+ mutex_unlock(&client->lock);
+ if (ret) {
--- /dev/null
+From 9d11b51ce7c150a69e761e30518f294fc73d55ff Mon Sep 17 00:00:00 2001
+From: Chuck Lever <chuck.lever@oracle.com>
+Date: Thu, 9 Jul 2015 16:45:18 -0400
+Subject: svcrdma: Fix send_reply() scatter/gather set-up
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+commit 9d11b51ce7c150a69e761e30518f294fc73d55ff upstream.
+
+The Linux NFS server returns garbage in the data payload of inline
+NFS/RDMA READ replies. These are READs of under 1000 bytes or so
+where the client has not provided either a reply chunk or a write
+list.
+
+The NFS server delivers the data payload for an NFS READ reply to
+the transport in an xdr_buf page list. If the NFS client did not
+provide a reply chunk or a write list, send_reply() is supposed to
+set up a separate sge for the page containing the READ data, and
+another sge for XDR padding if needed, then post all of the sges via
+a single SEND Work Request.
+
+The problem is send_reply() does not advance through the xdr_buf
+when setting up scatter/gather entries for SEND WR. It always calls
+dma_map_xdr with xdr_off set to zero. When there's more than one
+sge, dma_map_xdr() sets up the SEND sge's so they all point to the
+xdr_buf's head.
+
+The current Linux NFS/RDMA client always provides a reply chunk or
+a write list when performing an NFS READ over RDMA. Therefore, it
+does not exercise this particular case. The Linux server has never
+had to use more than one extra sge for building RPC/RDMA replies
+with a Linux client.
+
+However, an NFS/RDMA client _is_ allowed to send small NFS READs
+without setting up a write list or reply chunk. The NFS READ reply
+fits entirely within the inline reply buffer in this case. This is
+perhaps a more efficient way of performing NFS READs that the Linux
+NFS/RDMA client may some day adopt.
+
+Fixes: b432e6b3d9c1 ('svcrdma: Change DMA mapping logic to . . .')
+BugLink: https://bugzilla.linux-nfs.org/show_bug.cgi?id=285
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: J. Bruce Fields <bfields@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/sunrpc/xprtrdma/svc_rdma_sendto.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+@@ -382,6 +382,7 @@ static int send_reply(struct svcxprt_rdm
+ int byte_count)
+ {
+ struct ib_send_wr send_wr;
++ u32 xdr_off;
+ int sge_no;
+ int sge_bytes;
+ int page_no;
+@@ -416,8 +417,8 @@ static int send_reply(struct svcxprt_rdm
+ ctxt->direction = DMA_TO_DEVICE;
+
+ /* Map the payload indicated by 'byte_count' */
++ xdr_off = 0;
+ for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {
+- int xdr_off = 0;
+ sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);
+ byte_count -= sge_bytes;
+ ctxt->sge[sge_no].addr =
+@@ -455,6 +456,13 @@ static int send_reply(struct svcxprt_rdm
+ }
+ rqstp->rq_next_page = rqstp->rq_respages + 1;
+
++ /* The loop above bumps sc_dma_used for each sge. The
++ * xdr_buf.tail gets a separate sge, but resides in the
++ * same page as xdr_buf.head. Don't count it twice.
++ */
++ if (sge_no > ctxt->count)
++ atomic_dec(&rdma->sc_dma_used);
++
+ if (sge_no > rdma->sc_max_sge) {
+ pr_err("svcrdma: Too many sges (%d)\n", sge_no);
+ goto err;
--- /dev/null
+From 19ab6bc5674a30fdb6a2436b068d19a3c17dc73e Mon Sep 17 00:00:00 2001
+From: "Liu.Zhao" <lzsos369@163.com>
+Date: Mon, 24 Aug 2015 08:36:12 -0700
+Subject: USB: option: add ZTE PIDs
+
+From: "Liu.Zhao" <lzsos369@163.com>
+
+commit 19ab6bc5674a30fdb6a2436b068d19a3c17dc73e upstream.
+
+This is intended to add ZTE device PIDs on kernel.
+
+Signed-off-by: Liu.Zhao <lzsos369@163.com>
+[johan: sort the new entries ]
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/serial/option.c | 24 ++++++++++++++++++++++++
+ 1 file changed, 24 insertions(+)
+
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -278,6 +278,10 @@ static void option_instat_callback(struc
+ #define ZTE_PRODUCT_MF622 0x0001
+ #define ZTE_PRODUCT_MF628 0x0015
+ #define ZTE_PRODUCT_MF626 0x0031
++#define ZTE_PRODUCT_ZM8620_X 0x0396
++#define ZTE_PRODUCT_ME3620_MBIM 0x0426
++#define ZTE_PRODUCT_ME3620_X 0x1432
++#define ZTE_PRODUCT_ME3620_L 0x1433
+ #define ZTE_PRODUCT_AC2726 0xfff1
+ #define ZTE_PRODUCT_MG880 0xfffd
+ #define ZTE_PRODUCT_CDMA_TECH 0xfffe
+@@ -544,6 +548,18 @@ static const struct option_blacklist_inf
+ .sendsetup = BIT(1) | BIT(2) | BIT(3),
+ };
+
++static const struct option_blacklist_info zte_me3620_mbim_blacklist = {
++ .reserved = BIT(2) | BIT(3) | BIT(4),
++};
++
++static const struct option_blacklist_info zte_me3620_xl_blacklist = {
++ .reserved = BIT(3) | BIT(4) | BIT(5),
++};
++
++static const struct option_blacklist_info zte_zm8620_x_blacklist = {
++ .reserved = BIT(3) | BIT(4) | BIT(5),
++};
++
+ static const struct option_blacklist_info huawei_cdc12_blacklist = {
+ .reserved = BIT(1) | BIT(2),
+ };
+@@ -1591,6 +1607,14 @@ static const struct usb_device_id option
+ .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
++ { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_L),
++ .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
++ { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_MBIM),
++ .driver_info = (kernel_ulong_t)&zte_me3620_mbim_blacklist },
++ { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_X),
++ .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
++ { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ZM8620_X),
++ .driver_info = (kernel_ulong_t)&zte_zm8620_x_blacklist },
+ { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
+ { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
+ { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },