--- /dev/null
+From ab7c7bb6f4ab95dbca96fcfc4463cd69843e3e24 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Fri, 27 Feb 2015 14:04:27 -0500
+Subject: dm: hold suspend_lock while suspending device during device deletion
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit ab7c7bb6f4ab95dbca96fcfc4463cd69843e3e24 upstream.
+
+__dm_destroy() must take the suspend_lock so that its presuspend and
+postsuspend calls do not race with an internal suspend.
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -2526,10 +2526,16 @@ static void __dm_destroy(struct mapped_d
+ set_bit(DMF_FREEING, &md->flags);
+ spin_unlock(&_minor_lock);
+
++ /*
++ * Take suspend_lock so that presuspend and postsuspend methods
++ * do not race with internal suspend.
++ */
++ mutex_lock(&md->suspend_lock);
+ if (!dm_suspended_md(md)) {
+ dm_table_presuspend_targets(map);
+ dm_table_postsuspend_targets(map);
+ }
++ mutex_unlock(&md->suspend_lock);
+
+ /* dm_put_live_table must be before msleep, otherwise deadlock is possible */
+ dm_put_live_table(md, srcu_idx);
--- /dev/null
+From e5db29806b99ce2b2640d2e4d4fcb983cea115c5 Mon Sep 17 00:00:00 2001
+From: "Darrick J. Wong" <darrick.wong@oracle.com>
+Date: Fri, 27 Feb 2015 10:44:38 -0800
+Subject: dm io: deal with wandering queue limits when handling REQ_DISCARD and REQ_WRITE_SAME
+
+From: "Darrick J. Wong" <darrick.wong@oracle.com>
+
+commit e5db29806b99ce2b2640d2e4d4fcb983cea115c5 upstream.
+
+Since it's possible for the discard and write same queue limits to
+change while the upper level command is being sliced and diced, fix up
+both of them (a) to reject IO if the special command is unsupported at
+the start of the function and (b) read the limits once and let the
+commands error out on their own if the status happens to change.
+
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-io.c | 15 +++++++++++----
+ 1 file changed, 11 insertions(+), 4 deletions(-)
+
+--- a/drivers/md/dm-io.c
++++ b/drivers/md/dm-io.c
+@@ -289,9 +289,16 @@ static void do_region(int rw, unsigned r
+ struct request_queue *q = bdev_get_queue(where->bdev);
+ unsigned short logical_block_size = queue_logical_block_size(q);
+ sector_t num_sectors;
++ unsigned int uninitialized_var(special_cmd_max_sectors);
+
+- /* Reject unsupported discard requests */
+- if ((rw & REQ_DISCARD) && !blk_queue_discard(q)) {
++ /*
++ * Reject unsupported discard and write same requests.
++ */
++ if (rw & REQ_DISCARD)
++ special_cmd_max_sectors = q->limits.max_discard_sectors;
++ else if (rw & REQ_WRITE_SAME)
++ special_cmd_max_sectors = q->limits.max_write_same_sectors;
++ if ((rw & (REQ_DISCARD | REQ_WRITE_SAME)) && special_cmd_max_sectors == 0) {
+ dec_count(io, region, -EOPNOTSUPP);
+ return;
+ }
+@@ -317,7 +324,7 @@ static void do_region(int rw, unsigned r
+ store_io_and_region_in_bio(bio, io, region);
+
+ if (rw & REQ_DISCARD) {
+- num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining);
++ num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
+ bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
+ remaining -= num_sectors;
+ } else if (rw & REQ_WRITE_SAME) {
+@@ -326,7 +333,7 @@ static void do_region(int rw, unsigned r
+ */
+ dp->get_page(dp, &page, &len, &offset);
+ bio_add_page(bio, page, logical_block_size, offset);
+- num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining);
++ num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
+ bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
+
+ offset = 0;
--- /dev/null
+From 09ee96b21456883e108c3b00597bb37ec512151b Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Thu, 26 Feb 2015 11:41:28 -0500
+Subject: dm snapshot: suspend merging snapshot when doing exception handover
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit 09ee96b21456883e108c3b00597bb37ec512151b upstream.
+
+The "dm snapshot: suspend origin when doing exception handover" commit
+fixed a exception store handover bug associated with pending exceptions
+to the "snapshot-origin" target.
+
+However, a similar problem exists in snapshot merging. When snapshot
+merging is in progress, we use the target "snapshot-merge" instead of
+"snapshot-origin". Consequently, during exception store handover, we
+must find the snapshot-merge target and suspend its associated
+mapped_device.
+
+To avoid lockdep warnings, the target must be suspended and resumed
+without holding _origins_lock.
+
+Introduce a dm_hold() function that grabs a reference on a
+mapped_device, but unlike dm_get(), it doesn't crash if the device has
+the DMF_FREEING flag set, it returns an error in this case.
+
+In snapshot_resume() we grab the reference to the origin device using
+dm_hold() while holding _origins_lock (_origins_lock guarantees that the
+device won't disappear). Then we release _origins_lock, suspend the
+device and grab _origins_lock again.
+
+NOTE to stable@ people:
+When backporting to kernels 3.18 and older, use dm_internal_suspend and
+dm_internal_resume instead of dm_internal_suspend_fast and
+dm_internal_resume_fast.
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-snap.c | 35 +++++++++++++++++++++++++++++------
+ drivers/md/dm.c | 13 +++++++++++++
+ include/linux/device-mapper.h | 1 +
+ 3 files changed, 43 insertions(+), 6 deletions(-)
+
+--- a/drivers/md/dm-snap.c
++++ b/drivers/md/dm-snap.c
+@@ -1888,20 +1888,39 @@ static int snapshot_preresume(struct dm_
+ static void snapshot_resume(struct dm_target *ti)
+ {
+ struct dm_snapshot *s = ti->private;
+- struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
++ struct dm_snapshot *snap_src = NULL, *snap_dest = NULL, *snap_merging = NULL;
+ struct dm_origin *o;
+ struct mapped_device *origin_md = NULL;
++ bool must_restart_merging = false;
+
+ down_read(&_origins_lock);
+
+ o = __lookup_dm_origin(s->origin->bdev);
+ if (o)
+ origin_md = dm_table_get_md(o->ti->table);
++ if (!origin_md) {
++ (void) __find_snapshots_sharing_cow(s, NULL, NULL, &snap_merging);
++ if (snap_merging)
++ origin_md = dm_table_get_md(snap_merging->ti->table);
++ }
+ if (origin_md == dm_table_get_md(ti->table))
+ origin_md = NULL;
++ if (origin_md) {
++ if (dm_hold(origin_md))
++ origin_md = NULL;
++ }
++
++ up_read(&_origins_lock);
+
+- if (origin_md)
++ if (origin_md) {
+ dm_internal_suspend_fast(origin_md);
++ if (snap_merging && test_bit(RUNNING_MERGE, &snap_merging->state_bits)) {
++ must_restart_merging = true;
++ stop_merge(snap_merging);
++ }
++ }
++
++ down_read(&_origins_lock);
+
+ (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
+ if (snap_src && snap_dest) {
+@@ -1912,11 +1931,15 @@ static void snapshot_resume(struct dm_ta
+ up_write(&snap_src->lock);
+ }
+
+- if (origin_md)
+- dm_internal_resume_fast(origin_md);
+-
+ up_read(&_origins_lock);
+
++ if (origin_md) {
++ if (must_restart_merging)
++ start_merge(snap_merging);
++ dm_internal_resume_fast(origin_md);
++ dm_put(origin_md);
++ }
++
+ /* Now we have correct chunk size, reregister */
+ reregister_snapshot(s);
+
+@@ -2360,7 +2383,7 @@ static struct target_type snapshot_targe
+
+ static struct target_type merge_target = {
+ .name = dm_snapshot_merge_target_name,
+- .version = {1, 2, 0},
++ .version = {1, 3, 0},
+ .module = THIS_MODULE,
+ .ctr = snapshot_ctr,
+ .dtr = snapshot_dtr,
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -2507,6 +2507,19 @@ void dm_get(struct mapped_device *md)
+ BUG_ON(test_bit(DMF_FREEING, &md->flags));
+ }
+
++int dm_hold(struct mapped_device *md)
++{
++ spin_lock(&_minor_lock);
++ if (test_bit(DMF_FREEING, &md->flags)) {
++ spin_unlock(&_minor_lock);
++ return -EBUSY;
++ }
++ dm_get(md);
++ spin_unlock(&_minor_lock);
++ return 0;
++}
++EXPORT_SYMBOL_GPL(dm_hold);
++
+ const char *dm_device_name(struct mapped_device *md)
+ {
+ return md->name;
+--- a/include/linux/device-mapper.h
++++ b/include/linux/device-mapper.h
+@@ -368,6 +368,7 @@ int dm_create(int minor, struct mapped_d
+ */
+ struct mapped_device *dm_get_md(dev_t dev);
+ void dm_get(struct mapped_device *md);
++int dm_hold(struct mapped_device *md);
+ void dm_put(struct mapped_device *md);
+
+ /*
--- /dev/null
+From b735fede8d957d9d255e9c5cf3964cfa59799637 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Thu, 26 Feb 2015 11:40:35 -0500
+Subject: dm snapshot: suspend origin when doing exception handover
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit b735fede8d957d9d255e9c5cf3964cfa59799637 upstream.
+
+In the function snapshot_resume we perform exception store handover. If
+there is another active snapshot target, the exception store is moved
+from this target to the target that is being resumed.
+
+The problem is that if there is some pending exception, it will point to
+an incorrect exception store after that handover, causing a crash due to
+dm-snap-persistent.c:get_exception()'s BUG_ON.
+
+This bug can be triggered by repeatedly changing snapshot permissions
+with "lvchange -p r" and "lvchange -p rw" while there are writes on the
+associated origin device.
+
+To fix this bug, we must suspend the origin device when doing the
+exception store handover to make sure that there are no pending
+exceptions:
+- introduce _origin_hash that keeps track of dm_origin structures.
+- introduce functions __lookup_dm_origin, __insert_dm_origin and
+ __remove_dm_origin that manipulate the origin hash.
+- modify snapshot_resume so that it calls dm_internal_suspend_fast() and
+ dm_internal_resume_fast() on the origin device.
+
+NOTE to stable@ people:
+
+When backporting to kernels 3.12-3.18, use dm_internal_suspend and
+dm_internal_resume instead of dm_internal_suspend_fast and
+dm_internal_resume_fast.
+
+When backporting to kernels older than 3.12, you need to pick functions
+dm_internal_suspend and dm_internal_resume from the commit
+fd2ed4d252701d3bbed4cd3e3d267ad469bb832a.
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-snap.c | 93 ++++++++++++++++++++++++++++++++++++++++++++++-----
+ drivers/md/dm.c | 2 +
+ 2 files changed, 86 insertions(+), 9 deletions(-)
+
+--- a/drivers/md/dm-snap.c
++++ b/drivers/md/dm-snap.c
+@@ -20,6 +20,8 @@
+ #include <linux/log2.h>
+ #include <linux/dm-kcopyd.h>
+
++#include "dm.h"
++
+ #include "dm-exception-store.h"
+
+ #define DM_MSG_PREFIX "snapshots"
+@@ -291,12 +293,23 @@ struct origin {
+ };
+
+ /*
++ * This structure is allocated for each origin target
++ */
++struct dm_origin {
++ struct dm_dev *dev;
++ struct dm_target *ti;
++ unsigned split_boundary;
++ struct list_head hash_list;
++};
++
++/*
+ * Size of the hash table for origin volumes. If we make this
+ * the size of the minors list then it should be nearly perfect
+ */
+ #define ORIGIN_HASH_SIZE 256
+ #define ORIGIN_MASK 0xFF
+ static struct list_head *_origins;
++static struct list_head *_dm_origins;
+ static struct rw_semaphore _origins_lock;
+
+ static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
+@@ -310,12 +323,22 @@ static int init_origin_hash(void)
+ _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
+ GFP_KERNEL);
+ if (!_origins) {
+- DMERR("unable to allocate memory");
++ DMERR("unable to allocate memory for _origins");
+ return -ENOMEM;
+ }
+-
+ for (i = 0; i < ORIGIN_HASH_SIZE; i++)
+ INIT_LIST_HEAD(_origins + i);
++
++ _dm_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
++ GFP_KERNEL);
++ if (!_dm_origins) {
++ DMERR("unable to allocate memory for _dm_origins");
++ kfree(_origins);
++ return -ENOMEM;
++ }
++ for (i = 0; i < ORIGIN_HASH_SIZE; i++)
++ INIT_LIST_HEAD(_dm_origins + i);
++
+ init_rwsem(&_origins_lock);
+
+ return 0;
+@@ -324,6 +347,7 @@ static int init_origin_hash(void)
+ static void exit_origin_hash(void)
+ {
+ kfree(_origins);
++ kfree(_dm_origins);
+ }
+
+ static unsigned origin_hash(struct block_device *bdev)
+@@ -350,6 +374,30 @@ static void __insert_origin(struct origi
+ list_add_tail(&o->hash_list, sl);
+ }
+
++static struct dm_origin *__lookup_dm_origin(struct block_device *origin)
++{
++ struct list_head *ol;
++ struct dm_origin *o;
++
++ ol = &_dm_origins[origin_hash(origin)];
++ list_for_each_entry (o, ol, hash_list)
++ if (bdev_equal(o->dev->bdev, origin))
++ return o;
++
++ return NULL;
++}
++
++static void __insert_dm_origin(struct dm_origin *o)
++{
++ struct list_head *sl = &_dm_origins[origin_hash(o->dev->bdev)];
++ list_add_tail(&o->hash_list, sl);
++}
++
++static void __remove_dm_origin(struct dm_origin *o)
++{
++ list_del(&o->hash_list);
++}
++
+ /*
+ * _origins_lock must be held when calling this function.
+ * Returns number of snapshots registered using the supplied cow device, plus:
+@@ -1841,8 +1889,20 @@ static void snapshot_resume(struct dm_ta
+ {
+ struct dm_snapshot *s = ti->private;
+ struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
++ struct dm_origin *o;
++ struct mapped_device *origin_md = NULL;
+
+ down_read(&_origins_lock);
++
++ o = __lookup_dm_origin(s->origin->bdev);
++ if (o)
++ origin_md = dm_table_get_md(o->ti->table);
++ if (origin_md == dm_table_get_md(ti->table))
++ origin_md = NULL;
++
++ if (origin_md)
++ dm_internal_suspend_fast(origin_md);
++
+ (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
+ if (snap_src && snap_dest) {
+ down_write(&snap_src->lock);
+@@ -1851,6 +1911,10 @@ static void snapshot_resume(struct dm_ta
+ up_write(&snap_dest->lock);
+ up_write(&snap_src->lock);
+ }
++
++ if (origin_md)
++ dm_internal_resume_fast(origin_md);
++
+ up_read(&_origins_lock);
+
+ /* Now we have correct chunk size, reregister */
+@@ -2133,11 +2197,6 @@ static int origin_write_extent(struct dm
+ * Origin: maps a linear range of a device, with hooks for snapshotting.
+ */
+
+-struct dm_origin {
+- struct dm_dev *dev;
+- unsigned split_boundary;
+-};
+-
+ /*
+ * Construct an origin mapping: <dev_path>
+ * The context for an origin is merely a 'struct dm_dev *'
+@@ -2166,6 +2225,7 @@ static int origin_ctr(struct dm_target *
+ goto bad_open;
+ }
+
++ o->ti = ti;
+ ti->private = o;
+ ti->num_flush_bios = 1;
+
+@@ -2180,6 +2240,7 @@ bad_alloc:
+ static void origin_dtr(struct dm_target *ti)
+ {
+ struct dm_origin *o = ti->private;
++
+ dm_put_device(ti, o->dev);
+ kfree(o);
+ }
+@@ -2216,6 +2277,19 @@ static void origin_resume(struct dm_targ
+ struct dm_origin *o = ti->private;
+
+ o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev);
++
++ down_write(&_origins_lock);
++ __insert_dm_origin(o);
++ up_write(&_origins_lock);
++}
++
++static void origin_postsuspend(struct dm_target *ti)
++{
++ struct dm_origin *o = ti->private;
++
++ down_write(&_origins_lock);
++ __remove_dm_origin(o);
++ up_write(&_origins_lock);
+ }
+
+ static void origin_status(struct dm_target *ti, status_type_t type,
+@@ -2258,12 +2332,13 @@ static int origin_iterate_devices(struct
+
+ static struct target_type origin_target = {
+ .name = "snapshot-origin",
+- .version = {1, 8, 1},
++ .version = {1, 9, 0},
+ .module = THIS_MODULE,
+ .ctr = origin_ctr,
+ .dtr = origin_dtr,
+ .map = origin_map,
+ .resume = origin_resume,
++ .postsuspend = origin_postsuspend,
+ .status = origin_status,
+ .merge = origin_merge,
+ .iterate_devices = origin_iterate_devices,
+@@ -2271,7 +2346,7 @@ static struct target_type origin_target
+
+ static struct target_type snapshot_target = {
+ .name = "snapshot",
+- .version = {1, 12, 0},
++ .version = {1, 13, 0},
+ .module = THIS_MODULE,
+ .ctr = snapshot_ctr,
+ .dtr = snapshot_dtr,
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -3007,6 +3007,7 @@ void dm_internal_suspend_fast(struct map
+ flush_workqueue(md->wq);
+ dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
+ }
++EXPORT_SYMBOL_GPL(dm_internal_suspend_fast);
+
+ void dm_internal_resume_fast(struct mapped_device *md)
+ {
+@@ -3018,6 +3019,7 @@ void dm_internal_resume_fast(struct mapp
+ done:
+ mutex_unlock(&md->suspend_lock);
+ }
++EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
+
+ /*-----------------------------------------------------------------
+ * Event notification.
--- /dev/null
+From 5f027a3bf184d1d36e68745f7cd3718a8b879cc0 Mon Sep 17 00:00:00 2001
+From: Joe Thornber <ejt@redhat.com>
+Date: Fri, 27 Feb 2015 14:09:12 +0000
+Subject: dm thin: fix to consistently zero-fill reads to unprovisioned blocks
+
+From: Joe Thornber <ejt@redhat.com>
+
+commit 5f027a3bf184d1d36e68745f7cd3718a8b879cc0 upstream.
+
+It was always intended that a read to an unprovisioned block will return
+zeroes regardless of whether the pool is in read-only or read-write
+mode. thin_bio_map() was inconsistent with its handling of such reads
+when the pool is in read-only mode, it now properly zero-fills the bios
+it returns in response to unprovisioned block reads.
+
+Eliminate thin_bio_map()'s special read-only mode handling of -ENODATA
+and just allow the IO to be deferred to the worker which will result in
+pool->process_bio() handling the IO (which already properly zero-fills
+reads to unprovisioned blocks).
+
+Reported-by: Eric Sandeen <sandeen@redhat.com>
+Signed-off-by: Joe Thornber <ejt@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-thin.c | 11 -----------
+ 1 file changed, 11 deletions(-)
+
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -2357,17 +2357,6 @@ static int thin_bio_map(struct dm_target
+ return DM_MAPIO_REMAPPED;
+
+ case -ENODATA:
+- if (get_pool_mode(tc->pool) == PM_READ_ONLY) {
+- /*
+- * This block isn't provisioned, and we have no way
+- * of doing so.
+- */
+- handle_unserviceable_bio(tc->pool, bio);
+- cell_defer_no_holder(tc, virt_cell);
+- return DM_MAPIO_SUBMITTED;
+- }
+- /* fall through */
+-
+ case -EWOULDBLOCK:
+ thin_defer_cell(tc, virt_cell);
+ return DM_MAPIO_SUBMITTED;
--- /dev/null
+From a104a45ba7a51b5b4c5e8437020d9d48edf22f89 Mon Sep 17 00:00:00 2001
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Date: Mon, 9 Mar 2015 12:16:42 +0200
+Subject: dmaengine: dw: append MODULE_ALIAS for platform driver
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+commit a104a45ba7a51b5b4c5e8437020d9d48edf22f89 upstream.
+
+The commit 9cade1a46c77 (dma: dw: split driver to library part and platform
+code) introduced a separate platform driver but missed to add a
+MODULE_ALIAS("platform:dw_dmac"); to that module.
+
+The patch adds this to get driver loaded automatically if platform device is
+registered.
+
+Reported-by: "Blin, Jerome" <jerome.blin@intel.com>
+Fixes: 9cade1a46c77 (dma: dw: split driver to library part and platform code)
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Vinod Koul <vinod.koul@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma/dw/platform.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/dma/dw/platform.c
++++ b/drivers/dma/dw/platform.c
+@@ -26,6 +26,8 @@
+
+ #include "internal.h"
+
++#define DRV_NAME "dw_dmac"
++
+ static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+ {
+@@ -284,7 +286,7 @@ static struct platform_driver dw_driver
+ .remove = dw_remove,
+ .shutdown = dw_shutdown,
+ .driver = {
+- .name = "dw_dmac",
++ .name = DRV_NAME,
+ .pm = &dw_dev_pm_ops,
+ .of_match_table = of_match_ptr(dw_dma_of_id_table),
+ .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table),
+@@ -305,3 +307,4 @@ module_exit(dw_exit);
+
+ MODULE_LICENSE("GPL v2");
+ MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver");
++MODULE_ALIAS("platform:" DRV_NAME);
--- /dev/null
+From 98cf21c61a7f5419d82f847c4d77bf6e96a76f5f Mon Sep 17 00:00:00 2001
+From: Sergei Antonov <saproj@gmail.com>
+Date: Wed, 25 Mar 2015 15:55:34 -0700
+Subject: hfsplus: fix B-tree corruption after insertion at position 0
+
+From: Sergei Antonov <saproj@gmail.com>
+
+commit 98cf21c61a7f5419d82f847c4d77bf6e96a76f5f upstream.
+
+Fix B-tree corruption when a new record is inserted at position 0 in the
+node in hfs_brec_insert(). In this case a hfs_brec_update_parent() is
+called to update the parent index node (if exists) and it is passed
+hfs_find_data with a search_key containing a newly inserted key instead
+of the key to be updated. This results in an inconsistent index node.
+The bug reproduces on my machine after an extents overflow record for
+the catalog file (CNID=4) is inserted into the extents overflow B-tree.
+Because of a low (reserved) value of CNID=4, it has to become the first
+record in the first leaf node.
+
+The resulting first leaf node is correct:
+
+ ----------------------------------------------------
+ | key0.CNID=4 | key1.CNID=123 | key2.CNID=456, ... |
+ ----------------------------------------------------
+
+But the parent index key0 still contains the previous key CNID=123:
+
+ -----------------------
+ | key0.CNID=123 | ... |
+ -----------------------
+
+A change in hfs_brec_insert() makes hfs_brec_update_parent() work
+correctly by preventing it from getting fd->record=-1 value from
+__hfs_brec_find().
+
+Along the way, I removed duplicate code with unification of the if
+condition. The resulting code is equivalent to the original code
+because node is never 0.
+
+Also hfs_brec_update_parent() will now return an error after getting a
+negative fd->record value. However, the return value of
+hfs_brec_update_parent() is not checked anywhere in the file and I'm
+leaving it unchanged by this patch. brec.c lacks error checking after
+some other calls too, but this issue is of less importance than the one
+being fixed by this patch.
+
+Signed-off-by: Sergei Antonov <saproj@gmail.com>
+Cc: Joe Perches <joe@perches.com>
+Reviewed-by: Vyacheslav Dubeyko <slava@dubeyko.com>
+Acked-by: Hin-Tak Leung <htl10@users.sourceforge.net>
+Cc: Anton Altaparmakov <aia21@cam.ac.uk>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: Christoph Hellwig <hch@infradead.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/hfsplus/brec.c | 20 +++++++++++---------
+ 1 file changed, 11 insertions(+), 9 deletions(-)
+
+--- a/fs/hfsplus/brec.c
++++ b/fs/hfsplus/brec.c
+@@ -131,13 +131,16 @@ skip:
+ hfs_bnode_write(node, entry, data_off + key_len, entry_len);
+ hfs_bnode_dump(node);
+
+- if (new_node) {
+- /* update parent key if we inserted a key
+- * at the start of the first node
+- */
+- if (!rec && new_node != node)
+- hfs_brec_update_parent(fd);
++ /*
++ * update parent key if we inserted a key
++ * at the start of the node and it is not the new node
++ */
++ if (!rec && new_node != node) {
++ hfs_bnode_read_key(node, fd->search_key, data_off + size);
++ hfs_brec_update_parent(fd);
++ }
+
++ if (new_node) {
+ hfs_bnode_put(fd->bnode);
+ if (!new_node->parent) {
+ hfs_btree_inc_height(tree);
+@@ -168,9 +171,6 @@ skip:
+ goto again;
+ }
+
+- if (!rec)
+- hfs_brec_update_parent(fd);
+-
+ return 0;
+ }
+
+@@ -370,6 +370,8 @@ again:
+ if (IS_ERR(parent))
+ return PTR_ERR(parent);
+ __hfs_brec_find(parent, fd, hfs_find_rec_by_key);
++ if (fd->record < 0)
++ return -ENOENT;
+ hfs_bnode_dump(parent);
+ rec = fd->record;
+
staging-vt6656-vnt_rf_setpower-fix-missing-rate-rate_12m.patch
vt6655-rfbsetpower-fix-missing-rate-rate_12m.patch
vt6655-fix-late-setting-of-byrftype.patch
+dmaengine-dw-append-module_alias-for-platform-driver.patch
+dm-hold-suspend_lock-while-suspending-device-during-device-deletion.patch
+dm-io-deal-with-wandering-queue-limits-when-handling-req_discard-and-req_write_same.patch
+dm-thin-fix-to-consistently-zero-fill-reads-to-unprovisioned-blocks.patch
+dm-snapshot-suspend-origin-when-doing-exception-handover.patch
+dm-snapshot-suspend-merging-snapshot-when-doing-exception-handover.patch
+spi-qup-fix-cs-num-dt-property-parsing.patch
+spi-dw-mid-clear-busy-flag-fist-and-test-other-one.patch
+spi-trigger-trace-event-for-message-done-before-mesg-complete.patch
+hfsplus-fix-b-tree-corruption-after-insertion-at-position-0.patch
--- /dev/null
+From 854d2f241d71f6ca08ccde30e6c7c2e403363e52 Mon Sep 17 00:00:00 2001
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Date: Fri, 6 Mar 2015 14:42:01 +0200
+Subject: spi: dw-mid: clear BUSY flag fist and test other one
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+commit 854d2f241d71f6ca08ccde30e6c7c2e403363e52 upstream.
+
+The logic of DMA completion is broken now since test_and_clear_bit() never
+returns the other bit is set. It means condition are always false and we have
+spi_finalize_current_transfer() called per each DMA completion which is wrong.
+
+The patch fixes logic by clearing BUSY bit first and then check for the other
+one.
+
+Fixes: 30c8eb52cc4a (spi: dw-mid: split rx and tx callbacks when DMA)
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/spi/spi-dw-mid.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/spi/spi-dw-mid.c
++++ b/drivers/spi/spi-dw-mid.c
+@@ -108,7 +108,8 @@ static void dw_spi_dma_tx_done(void *arg
+ {
+ struct dw_spi *dws = arg;
+
+- if (test_and_clear_bit(TX_BUSY, &dws->dma_chan_busy) & BIT(RX_BUSY))
++ clear_bit(TX_BUSY, &dws->dma_chan_busy);
++ if (test_bit(RX_BUSY, &dws->dma_chan_busy))
+ return;
+ dw_spi_xfer_done(dws);
+ }
+@@ -156,7 +157,8 @@ static void dw_spi_dma_rx_done(void *arg
+ {
+ struct dw_spi *dws = arg;
+
+- if (test_and_clear_bit(RX_BUSY, &dws->dma_chan_busy) & BIT(TX_BUSY))
++ clear_bit(RX_BUSY, &dws->dma_chan_busy);
++ if (test_bit(TX_BUSY, &dws->dma_chan_busy))
+ return;
+ dw_spi_xfer_done(dws);
+ }
--- /dev/null
+From 12cb89e37a0c25fae7a0f1d2e4985558db9d0b13 Mon Sep 17 00:00:00 2001
+From: "Ivan T. Ivanov" <iivanov@mm-sol.com>
+Date: Fri, 6 Mar 2015 17:26:17 +0200
+Subject: spi: qup: Fix cs-num DT property parsing
+
+From: "Ivan T. Ivanov" <iivanov@mm-sol.com>
+
+commit 12cb89e37a0c25fae7a0f1d2e4985558db9d0b13 upstream.
+
+num-cs is 32 bit property, don't read just upper 16 bits.
+
+Fixes: 4a8573abe965 (spi: qup: Remove chip select function)
+Signed-off-by: Ivan T. Ivanov <iivanov@mm-sol.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/spi/spi-qup.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/drivers/spi/spi-qup.c
++++ b/drivers/spi/spi-qup.c
+@@ -489,7 +489,7 @@ static int spi_qup_probe(struct platform
+ struct resource *res;
+ struct device *dev;
+ void __iomem *base;
+- u32 max_freq, iomode;
++ u32 max_freq, iomode, num_cs;
+ int ret, irq, size;
+
+ dev = &pdev->dev;
+@@ -541,10 +541,11 @@ static int spi_qup_probe(struct platform
+ }
+
+ /* use num-cs unless not present or out of range */
+- if (of_property_read_u16(dev->of_node, "num-cs",
+- &master->num_chipselect) ||
+- (master->num_chipselect > SPI_NUM_CHIPSELECTS))
++ if (of_property_read_u32(dev->of_node, "num-cs", &num_cs) ||
++ num_cs > SPI_NUM_CHIPSELECTS)
+ master->num_chipselect = SPI_NUM_CHIPSELECTS;
++ else
++ master->num_chipselect = num_cs;
+
+ master->bus_num = pdev->id;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
--- /dev/null
+From 391949b6f02121371e3d7d9082c6d17fd9853034 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= <u.kleine-koenig@pengutronix.de>
+Date: Wed, 18 Mar 2015 11:27:28 +0100
+Subject: spi: trigger trace event for message-done before mesg->complete
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= <u.kleine-koenig@pengutronix.de>
+
+commit 391949b6f02121371e3d7d9082c6d17fd9853034 upstream.
+
+With spidev the mesg->complete callback points to spidev_complete.
+Calling this unblocks spidev_sync and so spidev_sync_write finishes. As
+the struct spi_message just read is a local variable in
+spidev_sync_write and recording the trace event accesses this message
+the recording is better done first. The same can happen for
+spidev_sync_read.
+
+This fixes an oops observed on a 3.14-rt system with spidev activity
+after
+
+ echo 1 > /sys/kernel/debug/tracing/events/spi/enable
+
+ .
+
+Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/spi/spi.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -1073,13 +1073,14 @@ void spi_finalize_current_message(struct
+ "failed to unprepare message: %d\n", ret);
+ }
+ }
++
++ trace_spi_message_done(mesg);
++
+ master->cur_msg_prepared = false;
+
+ mesg->state = NULL;
+ if (mesg->complete)
+ mesg->complete(mesg->context);
+-
+- trace_spi_message_done(mesg);
+ }
+ EXPORT_SYMBOL_GPL(spi_finalize_current_message);
+