--- /dev/null
+From 49b2e27ab9f66b0a22c21980ad8118a4038324ae Mon Sep 17 00:00:00 2001
+From: Alexander Sverdlin <alexander.sverdlin@gmail.com>
+Date: Sat, 29 Apr 2017 12:19:33 +0200
+Subject: ASoC: cs4271: configure reset GPIO as output
+
+From: Alexander Sverdlin <alexander.sverdlin@gmail.com>
+
+commit 49b2e27ab9f66b0a22c21980ad8118a4038324ae upstream.
+
+During reset "refactoring" the output configuration was lost.
+This commit repairs sound on EDB93XX boards.
+
+Fixes: 9a397f4 ("ASoC: cs4271: add regulator consumer support")
+Signed-off-by: Alexander Sverdlin <alexander.sverdlin@gmail.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/soc/codecs/cs4271.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/sound/soc/codecs/cs4271.c
++++ b/sound/soc/codecs/cs4271.c
+@@ -498,7 +498,7 @@ static int cs4271_reset(struct snd_soc_c
+ struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec);
+
+ if (gpio_is_valid(cs4271->gpio_nreset)) {
+- gpio_set_value(cs4271->gpio_nreset, 0);
++ gpio_direction_output(cs4271->gpio_nreset, 0);
+ mdelay(1);
+ gpio_set_value(cs4271->gpio_nreset, 1);
+ mdelay(1);
--- /dev/null
+From 7d1fedb6e96a960aa91e4ff70714c3fb09195a5a Mon Sep 17 00:00:00 2001
+From: Vinothkumar Raja <vinraja@cs.stonybrook.edu>
+Date: Thu, 6 Apr 2017 22:09:38 -0400
+Subject: dm btree: fix for dm_btree_find_lowest_key()
+
+From: Vinothkumar Raja <vinraja@cs.stonybrook.edu>
+
+commit 7d1fedb6e96a960aa91e4ff70714c3fb09195a5a upstream.
+
+dm_btree_find_lowest_key() is giving incorrect results. find_key()
+traverses the btree correctly for finding the highest key, but there is
+an error in the way it traverses the btree for retrieving the lowest
+key. dm_btree_find_lowest_key() fetches the first key of the rightmost
+block of the btree instead of fetching the first key from the leftmost
+block.
+
+Fix this by conditionally passing the correct parameter to value64()
+based on the @find_highest flag.
+
+Signed-off-by: Erez Zadok <ezk@fsl.cs.sunysb.edu>
+Signed-off-by: Vinothkumar Raja <vinraja@cs.stonybrook.edu>
+Signed-off-by: Nidhi Panpalia <npanpalia@cs.stonybrook.edu>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/persistent-data/dm-btree.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/persistent-data/dm-btree.c
++++ b/drivers/md/persistent-data/dm-btree.c
+@@ -902,8 +902,12 @@ static int find_key(struct ro_spine *s,
+ else
+ *result_key = le64_to_cpu(ro_node(s)->keys[0]);
+
+- if (next_block || flags & INTERNAL_NODE)
+- block = value64(ro_node(s), i);
++ if (next_block || flags & INTERNAL_NODE) {
++ if (find_highest)
++ block = value64(ro_node(s), i);
++ else
++ block = value64(ro_node(s), 0);
++ }
+
+ } while (flags & INTERNAL_NODE);
+
--- /dev/null
+From 1b0fb5a5b2dc0dddcfa575060441a7176ba7ac37 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Sun, 30 Apr 2017 17:33:26 -0400
+Subject: dm bufio: avoid a possible ABBA deadlock
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit 1b0fb5a5b2dc0dddcfa575060441a7176ba7ac37 upstream.
+
+__get_memory_limit() tests if dm_bufio_cache_size changed and calls
+__cache_size_refresh() if it did. It takes dm_bufio_clients_lock while
+it already holds the client lock. However, lock ordering is violated
+because in cleanup_old_buffers() dm_bufio_clients_lock is taken before
+the client lock.
+
+This results in a possible deadlock and lockdep engine warning.
+
+Fix this deadlock by changing mutex_lock() to mutex_trylock(). If the
+lock can't be taken, it will be re-checked next time when a new buffer
+is allocated.
+
+Also add "unlikely" to the if condition, so that the optimizer assumes
+that the condition is false.
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-bufio.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/drivers/md/dm-bufio.c
++++ b/drivers/md/dm-bufio.c
+@@ -933,10 +933,11 @@ static void __get_memory_limit(struct dm
+ {
+ unsigned long buffers;
+
+- if (ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch) {
+- mutex_lock(&dm_bufio_clients_lock);
+- __cache_size_refresh();
+- mutex_unlock(&dm_bufio_clients_lock);
++ if (unlikely(ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) {
++ if (mutex_trylock(&dm_bufio_clients_lock)) {
++ __cache_size_refresh();
++ mutex_unlock(&dm_bufio_clients_lock);
++ }
+ }
+
+ buffers = dm_bufio_cache_size_per_client >>
--- /dev/null
+From 390020ad2af9ca04844c4f3b1f299ad8746d84c8 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Sun, 30 Apr 2017 17:34:53 -0400
+Subject: dm bufio: check new buffer allocation watermark every 30 seconds
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit 390020ad2af9ca04844c4f3b1f299ad8746d84c8 upstream.
+
+dm-bufio checks a watermark when it allocates a new buffer in
+__bufio_new(). However, it doesn't check the watermark when the user
+changes /sys/module/dm_bufio/parameters/max_cache_size_bytes.
+
+This may result in a problem - if the watermark is high enough so that
+all possible buffers are allocated and if the user lowers the value of
+"max_cache_size_bytes", the watermark will never be checked against the
+new value because no new buffer would be allocated.
+
+To fix this, change __evict_old_buffers() so that it checks the
+watermark. __evict_old_buffers() is called every 30 seconds, so if the
+user reduces "max_cache_size_bytes", dm-bufio will react to this change
+within 30 seconds and decrease memory consumption.
+
+Depends-on: 1b0fb5a5b2 ("dm bufio: avoid a possible ABBA deadlock")
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-bufio.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/drivers/md/dm-bufio.c
++++ b/drivers/md/dm-bufio.c
+@@ -1783,9 +1783,17 @@ static void __evict_old_buffers(struct d
+ struct dm_buffer *b, *tmp;
+ unsigned retain_target = get_retain_buffers(c);
+ unsigned count;
++ LIST_HEAD(write_list);
+
+ dm_bufio_lock(c);
+
++ __check_watermark(c, &write_list);
++ if (unlikely(!list_empty(&write_list))) {
++ dm_bufio_unlock(c);
++ __flush_write_list(&write_list);
++ dm_bufio_lock(c);
++ }
++
+ count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
+ list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) {
+ if (count <= retain_target)
+@@ -1810,6 +1818,8 @@ static void cleanup_old_buffers(void)
+
+ mutex_lock(&dm_bufio_clients_lock);
+
++ __cache_size_refresh();
++
+ list_for_each_entry(c, &dm_bufio_all_clients, client_list)
+ __evict_old_buffers(c, max_age_hz);
+
--- /dev/null
+From 13840d38016203f0095cd547b90352812d24b787 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Sun, 30 Apr 2017 17:32:28 -0400
+Subject: dm bufio: make the parameter "retain_bytes" unsigned long
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit 13840d38016203f0095cd547b90352812d24b787 upstream.
+
+Change the type of the parameter "retain_bytes" from unsigned to
+unsigned long, so that on 64-bit machines the user can set more than
+4GiB of data to be retained.
+
+Also, change the type of the variable "count" in the function
+"__evict_old_buffers" to unsigned long. The assignment
+"count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];"
+could result in unsigned long to unsigned overflow and that could result
+in buffers not being freed when they should.
+
+While at it, avoid division in get_retain_buffers(). Division is slow,
+we can change it to shift because we have precalculated the log2 of
+block size.
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-bufio.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/drivers/md/dm-bufio.c
++++ b/drivers/md/dm-bufio.c
+@@ -216,7 +216,7 @@ static DEFINE_SPINLOCK(param_spinlock);
+ * Buffers are freed after this timeout
+ */
+ static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
+-static unsigned dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
++static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
+
+ static unsigned long dm_bufio_peak_allocated;
+ static unsigned long dm_bufio_allocated_kmem_cache;
+@@ -1551,10 +1551,10 @@ static bool __try_evict_buffer(struct dm
+ return true;
+ }
+
+-static unsigned get_retain_buffers(struct dm_bufio_client *c)
++static unsigned long get_retain_buffers(struct dm_bufio_client *c)
+ {
+- unsigned retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes);
+- return retain_bytes / c->block_size;
++ unsigned long retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes);
++ return retain_bytes >> (c->sectors_per_block_bits + SECTOR_SHIFT);
+ }
+
+ static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
+@@ -1564,7 +1564,7 @@ static unsigned long __scan(struct dm_bu
+ struct dm_buffer *b, *tmp;
+ unsigned long freed = 0;
+ unsigned long count = nr_to_scan;
+- unsigned retain_target = get_retain_buffers(c);
++ unsigned long retain_target = get_retain_buffers(c);
+
+ for (l = 0; l < LIST_SIZE; l++) {
+ list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
+@@ -1781,8 +1781,8 @@ static bool older_than(struct dm_buffer
+ static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
+ {
+ struct dm_buffer *b, *tmp;
+- unsigned retain_target = get_retain_buffers(c);
+- unsigned count;
++ unsigned long retain_target = get_retain_buffers(c);
++ unsigned long count;
+ LIST_HEAD(write_list);
+
+ dm_bufio_lock(c);
+@@ -1942,7 +1942,7 @@ MODULE_PARM_DESC(max_cache_size_bytes, "
+ module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
+ MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
+
+-module_param_named(retain_bytes, dm_bufio_retain_bytes, uint, S_IRUGO | S_IWUSR);
++module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, S_IRUGO | S_IWUSR);
+ MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
+
+ module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
--- /dev/null
+From 10add84e276432d9dd8044679a1028dd4084117e Mon Sep 17 00:00:00 2001
+From: Mike Snitzer <snitzer@redhat.com>
+Date: Fri, 5 May 2017 14:40:13 -0400
+Subject: dm cache metadata: fail operations if fail_io mode has been established
+
+From: Mike Snitzer <snitzer@redhat.com>
+
+commit 10add84e276432d9dd8044679a1028dd4084117e upstream.
+
+Otherwise it is possible to trigger crashes due to the metadata being
+inaccessible yet these methods don't safely account for that possibility
+without these checks.
+
+Reported-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-cache-metadata.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+--- a/drivers/md/dm-cache-metadata.c
++++ b/drivers/md/dm-cache-metadata.c
+@@ -1627,17 +1627,19 @@ void dm_cache_metadata_set_stats(struct
+
+ int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown)
+ {
+- int r;
++ int r = -EINVAL;
+ flags_mutator mutator = (clean_shutdown ? set_clean_shutdown :
+ clear_clean_shutdown);
+
+ WRITE_LOCK(cmd);
++ if (cmd->fail_io)
++ goto out;
++
+ r = __commit_transaction(cmd, mutator);
+ if (r)
+ goto out;
+
+ r = __begin_transaction(cmd);
+-
+ out:
+ WRITE_UNLOCK(cmd);
+ return r;
+@@ -1649,7 +1651,8 @@ int dm_cache_get_free_metadata_block_cou
+ int r = -EINVAL;
+
+ READ_LOCK(cmd);
+- r = dm_sm_get_nr_free(cmd->metadata_sm, result);
++ if (!cmd->fail_io)
++ r = dm_sm_get_nr_free(cmd->metadata_sm, result);
+ READ_UNLOCK(cmd);
+
+ return r;
+@@ -1661,7 +1664,8 @@ int dm_cache_get_metadata_dev_size(struc
+ int r = -EINVAL;
+
+ READ_LOCK(cmd);
+- r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
++ if (!cmd->fail_io)
++ r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
+ READ_UNLOCK(cmd);
+
+ return r;
--- /dev/null
+From 7083abbbfc4fa706ff72d27d33a5214881979336 Mon Sep 17 00:00:00 2001
+From: Bart Van Assche <bart.vanassche@sandisk.com>
+Date: Thu, 27 Apr 2017 10:11:15 -0700
+Subject: dm mpath: avoid that path removal can trigger an infinite loop
+
+From: Bart Van Assche <bart.vanassche@sandisk.com>
+
+commit 7083abbbfc4fa706ff72d27d33a5214881979336 upstream.
+
+If blk_get_request() fails, check whether the failure is due to a path
+being removed. If that is the case, fail the path by triggering a call
+to fail_path(). This avoids that the following scenario can be
+encountered while removing paths:
+* CPU usage of a kworker thread jumps to 100%.
+* Removing the DM device becomes impossible.
+
+Delay requeueing if blk_get_request() returns -EBUSY or -EWOULDBLOCK,
+and the queue is not dying, because in these cases immediate requeuing
+is inappropriate.
+
+Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
+Cc: Hannes Reinecke <hare@suse.com>
+Cc: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-mpath.c | 15 +++++++++++----
+ 1 file changed, 11 insertions(+), 4 deletions(-)
+
+--- a/drivers/md/dm-mpath.c
++++ b/drivers/md/dm-mpath.c
+@@ -489,6 +489,7 @@ static int multipath_clone_and_map(struc
+ struct pgpath *pgpath;
+ struct block_device *bdev;
+ struct dm_mpath_io *mpio = get_mpio(map_context);
++ struct request_queue *q;
+ struct request *clone;
+
+ /* Do we need to select a new pgpath? */
+@@ -511,12 +512,18 @@ static int multipath_clone_and_map(struc
+ mpio->nr_bytes = nr_bytes;
+
+ bdev = pgpath->path.dev->bdev;
+-
+- clone = blk_get_request(bdev_get_queue(bdev),
+- rq->cmd_flags | REQ_NOMERGE,
+- GFP_ATOMIC);
++ q = bdev_get_queue(bdev);
++ clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE, GFP_ATOMIC);
+ if (IS_ERR(clone)) {
+ /* EBUSY, ENODEV or EWOULDBLOCK: requeue */
++ bool queue_dying = blk_queue_dying(q);
++ DMERR_LIMIT("blk_get_request() returned %ld%s - requeuing",
++ PTR_ERR(clone), queue_dying ? " (path offline)" : "");
++ if (queue_dying) {
++ atomic_inc(&m->pg_init_in_progress);
++ activate_or_offline_path(pgpath);
++ return DM_MAPIO_REQUEUE;
++ }
+ return DM_MAPIO_DELAY_REQUEUE;
+ }
+ clone->bio = clone->biotail = NULL;
--- /dev/null
+From c1d7ecf7ca11d0edd3085262c8597203440d056c Mon Sep 17 00:00:00 2001
+From: Bart Van Assche <bart.vanassche@sandisk.com>
+Date: Thu, 27 Apr 2017 10:11:16 -0700
+Subject: dm mpath: delay requeuing while path initialization is in progress
+
+From: Bart Van Assche <bart.vanassche@sandisk.com>
+
+commit c1d7ecf7ca11d0edd3085262c8597203440d056c upstream.
+
+Requeuing a request immediately while path initialization is ongoing
+causes high CPU usage, something that is undesired. Hence delay
+requeuing while path initialization is in progress.
+
+Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
+Reviewed-by: Hannes Reinecke <hare@suse.com>
+Cc: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-mpath.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+--- a/drivers/md/dm-mpath.c
++++ b/drivers/md/dm-mpath.c
+@@ -322,13 +322,16 @@ static int __pg_init_all_paths(struct mu
+ return atomic_read(&m->pg_init_in_progress);
+ }
+
+-static void pg_init_all_paths(struct multipath *m)
++static int pg_init_all_paths(struct multipath *m)
+ {
++ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&m->lock, flags);
+- __pg_init_all_paths(m);
++ ret = __pg_init_all_paths(m);
+ spin_unlock_irqrestore(&m->lock, flags);
++
++ return ret;
+ }
+
+ static void __switch_pg(struct multipath *m, struct priority_group *pg)
+@@ -503,7 +506,8 @@ static int multipath_clone_and_map(struc
+ return -EIO; /* Failed */
+ } else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
+ test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
+- pg_init_all_paths(m);
++ if (pg_init_all_paths(m))
++ return DM_MAPIO_DELAY_REQUEUE;
+ return DM_MAPIO_REQUEUE;
+ }
+
--- /dev/null
+From 06eb061f48594aa369f6e852b352410298b317a8 Mon Sep 17 00:00:00 2001
+From: Bart Van Assche <bart.vanassche@sandisk.com>
+Date: Fri, 7 Apr 2017 16:50:44 -0700
+Subject: dm mpath: requeue after a small delay if blk_get_request() fails
+
+From: Bart Van Assche <bart.vanassche@sandisk.com>
+
+commit 06eb061f48594aa369f6e852b352410298b317a8 upstream.
+
+If blk_get_request() returns ENODEV then multipath_clone_and_map()
+causes a request to be requeued immediately. This can cause a kworker
+thread to spend 100% of the CPU time of a single core in
+__blk_mq_run_hw_queue() and also can cause device removal to never
+finish.
+
+Avoid this by only requeuing after a delay if blk_get_request() fails.
+Additionally, reduce the requeue delay.
+
+Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-mpath.c | 5 ++---
+ drivers/md/dm-rq.c | 2 +-
+ 2 files changed, 3 insertions(+), 4 deletions(-)
+
+--- a/drivers/md/dm-mpath.c
++++ b/drivers/md/dm-mpath.c
+@@ -484,7 +484,6 @@ static int multipath_clone_and_map(struc
+ struct request **__clone)
+ {
+ struct multipath *m = ti->private;
+- int r = DM_MAPIO_REQUEUE;
+ size_t nr_bytes = blk_rq_bytes(rq);
+ struct pgpath *pgpath;
+ struct block_device *bdev;
+@@ -503,7 +502,7 @@ static int multipath_clone_and_map(struc
+ } else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
+ test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
+ pg_init_all_paths(m);
+- return r;
++ return DM_MAPIO_REQUEUE;
+ }
+
+ memset(mpio, 0, sizeof(*mpio));
+@@ -517,7 +516,7 @@ static int multipath_clone_and_map(struc
+ GFP_ATOMIC);
+ if (IS_ERR(clone)) {
+ /* EBUSY, ENODEV or EWOULDBLOCK: requeue */
+- return r;
++ return DM_MAPIO_DELAY_REQUEUE;
+ }
+ clone->bio = clone->biotail = NULL;
+ clone->rq_disk = bdev->bd_disk;
+--- a/drivers/md/dm-rq.c
++++ b/drivers/md/dm-rq.c
+@@ -280,7 +280,7 @@ static void dm_requeue_original_request(
+ if (!rq->q->mq_ops)
+ dm_old_requeue_request(rq);
+ else
+- dm_mq_delay_requeue_request(rq, delay_requeue ? 5000 : 0);
++ dm_mq_delay_requeue_request(rq, delay_requeue ? 100/*ms*/ : 0);
+
+ rq_completed(md, rw, false);
+ }
--- /dev/null
+From 89bfce763e43fa4897e0d3af6b29ed909df64cfd Mon Sep 17 00:00:00 2001
+From: Bart Van Assche <bart.vanassche@sandisk.com>
+Date: Thu, 27 Apr 2017 10:11:14 -0700
+Subject: dm mpath: split and rename activate_path() to prepare for its expanded use
+
+From: Bart Van Assche <bart.vanassche@sandisk.com>
+
+commit 89bfce763e43fa4897e0d3af6b29ed909df64cfd upstream.
+
+activate_path() is renamed to activate_path_work() which now calls
+activate_or_offline_path(). activate_or_offline_path() will be used
+by the next commit.
+
+Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
+Cc: Hannes Reinecke <hare@suse.com>
+Cc: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-mpath.c | 17 ++++++++++++-----
+ 1 file changed, 12 insertions(+), 5 deletions(-)
+
+--- a/drivers/md/dm-mpath.c
++++ b/drivers/md/dm-mpath.c
+@@ -111,7 +111,8 @@ typedef int (*action_fn) (struct pgpath
+
+ static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
+ static void trigger_event(struct work_struct *work);
+-static void activate_path(struct work_struct *work);
++static void activate_or_offline_path(struct pgpath *pgpath);
++static void activate_path_work(struct work_struct *work);
+ static void process_queued_bios(struct work_struct *work);
+
+ /*-----------------------------------------------
+@@ -136,7 +137,7 @@ static struct pgpath *alloc_pgpath(void)
+
+ if (pgpath) {
+ pgpath->is_active = true;
+- INIT_DELAYED_WORK(&pgpath->activate_path, activate_path);
++ INIT_DELAYED_WORK(&pgpath->activate_path, activate_path_work);
+ }
+
+ return pgpath;
+@@ -1436,10 +1437,8 @@ out:
+ spin_unlock_irqrestore(&m->lock, flags);
+ }
+
+-static void activate_path(struct work_struct *work)
++static void activate_or_offline_path(struct pgpath *pgpath)
+ {
+- struct pgpath *pgpath =
+- container_of(work, struct pgpath, activate_path.work);
+ struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
+
+ if (pgpath->is_active && !blk_queue_dying(q))
+@@ -1448,6 +1447,14 @@ static void activate_path(struct work_st
+ pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
+ }
+
++static void activate_path_work(struct work_struct *work)
++{
++ struct pgpath *pgpath =
++ container_of(work, struct pgpath, activate_path.work);
++
++ activate_or_offline_path(pgpath);
++}
++
+ static int noretry_error(int error)
+ {
+ switch (error) {
--- /dev/null
+From 7b81ef8b14f80033e4a4168d199a0f5fd79b9426 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Tue, 28 Mar 2017 12:53:39 -0400
+Subject: dm raid: select the Kconfig option CONFIG_MD_RAID0
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit 7b81ef8b14f80033e4a4168d199a0f5fd79b9426 upstream.
+
+Since the commit 0cf4503174c1 ("dm raid: add support for the MD RAID0
+personality"), the dm-raid subsystem can activate a RAID-0 array.
+Therefore, add MD_RAID0 to the dependencies of DM_RAID, so that MD_RAID0
+will be selected when DM_RAID is selected.
+
+Fixes: 0cf4503174c1 ("dm raid: add support for the MD RAID0 personality")
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/md/Kconfig
++++ b/drivers/md/Kconfig
+@@ -365,6 +365,7 @@ config DM_LOG_USERSPACE
+ config DM_RAID
+ tristate "RAID 1/4/5/6/10 target"
+ depends on BLK_DEV_DM
++ select MD_RAID0
+ select MD_RAID1
+ select MD_RAID10
+ select MD_RAID456
--- /dev/null
+From 0377a07c7a035e0d033cd8b29f0cb15244c0916a Mon Sep 17 00:00:00 2001
+From: Joe Thornber <ejt@redhat.com>
+Date: Mon, 15 May 2017 09:45:40 -0400
+Subject: dm space map disk: fix some book keeping in the disk space map
+
+From: Joe Thornber <ejt@redhat.com>
+
+commit 0377a07c7a035e0d033cd8b29f0cb15244c0916a upstream.
+
+When decrementing the reference count for a block, the free count wasn't
+being updated if the reference count went to zero.
+
+Signed-off-by: Joe Thornber <ejt@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/persistent-data/dm-space-map-disk.c | 15 ++++++++++++++-
+ 1 file changed, 14 insertions(+), 1 deletion(-)
+
+--- a/drivers/md/persistent-data/dm-space-map-disk.c
++++ b/drivers/md/persistent-data/dm-space-map-disk.c
+@@ -142,10 +142,23 @@ static int sm_disk_inc_block(struct dm_s
+
+ static int sm_disk_dec_block(struct dm_space_map *sm, dm_block_t b)
+ {
++ int r;
++ uint32_t old_count;
+ enum allocation_event ev;
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+
+- return sm_ll_dec(&smd->ll, b, &ev);
++ r = sm_ll_dec(&smd->ll, b, &ev);
++ if (!r && (ev == SM_FREE)) {
++ /*
++ * It's only free if it's also free in the last
++ * transaction.
++ */
++ r = sm_ll_lookup(&smd->old_ll, b, &old_count);
++ if (!r && !old_count)
++ smd->nr_allocated_this_transaction--;
++ }
++
++ return r;
+ }
+
+ static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b)
--- /dev/null
+From 91bcdb92d39711d1adb40c26b653b7978d93eb98 Mon Sep 17 00:00:00 2001
+From: Joe Thornber <ejt@redhat.com>
+Date: Mon, 15 May 2017 09:43:05 -0400
+Subject: dm thin metadata: call precommit before saving the roots
+
+From: Joe Thornber <ejt@redhat.com>
+
+commit 91bcdb92d39711d1adb40c26b653b7978d93eb98 upstream.
+
+These calls were the wrong way round in __write_initial_superblock.
+
+Signed-off-by: Joe Thornber <ejt@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-thin-metadata.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/dm-thin-metadata.c
++++ b/drivers/md/dm-thin-metadata.c
+@@ -485,11 +485,11 @@ static int __write_initial_superblock(st
+ if (r < 0)
+ return r;
+
+- r = save_sm_roots(pmd);
++ r = dm_tm_pre_commit(pmd->tm);
+ if (r < 0)
+ return r;
+
+- r = dm_tm_pre_commit(pmd->tm);
++ r = save_sm_roots(pmd);
+ if (r < 0)
+ return r;
+
--- /dev/null
+From 4ff33aafd32e084f5ee7faa54ba06e95f8b1b8af Mon Sep 17 00:00:00 2001
+From: Amir Goldstein <amir73il@gmail.com>
+Date: Tue, 25 Apr 2017 14:29:35 +0300
+Subject: fanotify: don't expose EOPENSTALE to userspace
+
+From: Amir Goldstein <amir73il@gmail.com>
+
+commit 4ff33aafd32e084f5ee7faa54ba06e95f8b1b8af upstream.
+
+When delivering an event to userspace for a file on an NFS share,
+if the file is deleted on server side before user reads the event,
+user will not get the event.
+
+If the event queue contained several events, the stale event is
+quietly dropped and read() returns to user with events read so far
+in the buffer.
+
+If the event queue contains a single stale event or if the stale
+event is a permission event, read() returns to user with the kernel
+internal error code 518 (EOPENSTALE), which is not a POSIX error code.
+
+Check the internal return value -EOPENSTALE in fanotify_read(), just
+the same as it is checked in path_openat() and drop the event in the
+cases that it is not already dropped.
+
+This is a reproducer from Marko Rauhamaa:
+
+Just take the example program listed under "man fanotify" ("fantest")
+and follow these steps:
+
+ ==============================================================
+ NFS Server NFS Client(1) NFS Client(2)
+ ==============================================================
+ # echo foo >/nfsshare/bar.txt
+ # cat /nfsshare/bar.txt
+ foo
+ # ./fantest /nfsshare
+ Press enter key to terminate.
+ Listening for events.
+ # rm -f /nfsshare/bar.txt
+ # cat /nfsshare/bar.txt
+ read: Unknown error 518
+ cat: /nfsshare/bar.txt: Operation not permitted
+ ==============================================================
+
+where NFS Client (1) and (2) are two terminal sessions on a single NFS
+Client machine.
+
+Reported-by: Marko Rauhamaa <marko.rauhamaa@f-secure.com>
+Tested-by: Marko Rauhamaa <marko.rauhamaa@f-secure.com>
+Cc: <linux-api@vger.kernel.org>
+Signed-off-by: Amir Goldstein <amir73il@gmail.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/notify/fanotify/fanotify_user.c | 26 ++++++++++++++++++--------
+ 1 file changed, 18 insertions(+), 8 deletions(-)
+
+--- a/fs/notify/fanotify/fanotify_user.c
++++ b/fs/notify/fanotify/fanotify_user.c
+@@ -295,27 +295,37 @@ static ssize_t fanotify_read(struct file
+ }
+
+ ret = copy_event_to_user(group, kevent, buf);
++ if (unlikely(ret == -EOPENSTALE)) {
++ /*
++ * We cannot report events with stale fd so drop it.
++ * Setting ret to 0 will continue the event loop and
++ * do the right thing if there are no more events to
++ * read (i.e. return bytes read, -EAGAIN or wait).
++ */
++ ret = 0;
++ }
++
+ /*
+ * Permission events get queued to wait for response. Other
+ * events can be destroyed now.
+ */
+ if (!(kevent->mask & FAN_ALL_PERM_EVENTS)) {
+ fsnotify_destroy_event(group, kevent);
+- if (ret < 0)
+- break;
+ } else {
+ #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+- if (ret < 0) {
++ if (ret <= 0) {
+ FANOTIFY_PE(kevent)->response = FAN_DENY;
+ wake_up(&group->fanotify_data.access_waitq);
+- break;
++ } else {
++ spin_lock(&group->notification_lock);
++ list_add_tail(&kevent->list,
++ &group->fanotify_data.access_list);
++ spin_unlock(&group->notification_lock);
+ }
+- spin_lock(&group->notification_lock);
+- list_add_tail(&kevent->list,
+- &group->fanotify_data.access_list);
+- spin_unlock(&group->notification_lock);
+ #endif
+ }
++ if (ret < 0)
++ break;
+ buf += ret;
+ count -= ret;
+ }
--- /dev/null
+From eea40b8f624f25cbc02d55f2d93203f60cee9341 Mon Sep 17 00:00:00 2001
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Fri, 28 Apr 2017 11:20:01 +0200
+Subject: infiniband: call ipv6 route lookup via the stub interface
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+commit eea40b8f624f25cbc02d55f2d93203f60cee9341 upstream.
+
+The infiniband address handle can be triggered to resolve an ipv6
+address in response to MAD packets, regardless of the ipv6
+module being disabled via the kernel command line argument.
+
+That will cause a call into the ipv6 routing code, which is not
+initialized, and a conseguent oops.
+
+This commit addresses the above issue replacing the direct lookup
+call with an indirect one via the ipv6 stub, which is properly
+initialized according to the ipv6 status (e.g. if ipv6 is
+disabled, the routing lookup fails gracefully)
+
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/core/addr.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/infiniband/core/addr.c
++++ b/drivers/infiniband/core/addr.c
+@@ -444,8 +444,8 @@ static int addr6_resolve(struct sockaddr
+ fl6.saddr = src_in->sin6_addr;
+ fl6.flowi6_oif = addr->bound_dev_if;
+
+- dst = ip6_route_output(addr->net, NULL, &fl6);
+- if ((ret = dst->error))
++ ret = ipv6_stub->ipv6_dst_lookup(addr->net, NULL, &dst, &fl6);
++ if (ret < 0)
+ goto put;
+
+ rt = (struct rt6_info *)dst;
--- /dev/null
+From 065e519e71b2c1f41936cce75b46b5ab34adb588 Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.com>
+Date: Thu, 6 Apr 2017 11:16:33 +0800
+Subject: md: MD_CLOSING needs to be cleared after called md_set_readonly or do_md_stop
+
+From: NeilBrown <neilb@suse.com>
+
+commit 065e519e71b2c1f41936cce75b46b5ab34adb588 upstream.
+
+if called md_set_readonly and set MD_CLOSING bit, the mddev cannot
+be opened any more due to the MD_CLOING bit wasn't cleared. Thus it
+needs to be cleared in md_ioctl after any call to md_set_readonly()
+or do_md_stop().
+
+Signed-off-by: NeilBrown <neilb@suse.com>
+Fixes: af8d8e6f0315 ("md: changes for MD_STILL_CLOSED flag")
+Signed-off-by: Zhilong Liu <zlliu@suse.com>
+Signed-off-by: Shaohua Li <shli@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/md.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -6776,6 +6776,7 @@ static int md_ioctl(struct block_device
+ void __user *argp = (void __user *)arg;
+ struct mddev *mddev = NULL;
+ int ro;
++ bool did_set_md_closing = false;
+
+ if (!md_ioctl_valid(cmd))
+ return -ENOTTY;
+@@ -6865,7 +6866,9 @@ static int md_ioctl(struct block_device
+ err = -EBUSY;
+ goto out;
+ }
++ WARN_ON_ONCE(test_bit(MD_CLOSING, &mddev->flags));
+ set_bit(MD_CLOSING, &mddev->flags);
++ did_set_md_closing = true;
+ mutex_unlock(&mddev->open_mutex);
+ sync_blockdev(bdev);
+ }
+@@ -7058,6 +7061,8 @@ unlock:
+ mddev->hold_active = 0;
+ mddev_unlock(mddev);
+ out:
++ if(did_set_md_closing)
++ clear_bit(MD_CLOSING, &mddev->flags);
+ return err;
+ }
+ #ifdef CONFIG_COMPAT
--- /dev/null
+From 583da48e388f472e8818d9bb60ef6a1d40ee9f9d Mon Sep 17 00:00:00 2001
+From: Dennis Yang <dennisyang@qnap.com>
+Date: Wed, 29 Mar 2017 15:46:13 +0800
+Subject: md: update slab_cache before releasing new stripes when stripes resizing
+
+From: Dennis Yang <dennisyang@qnap.com>
+
+commit 583da48e388f472e8818d9bb60ef6a1d40ee9f9d upstream.
+
+When growing raid5 device on machine with small memory, there is chance that
+mdadm will be killed and the following bug report can be observed. The same
+bug could also be reproduced in linux-4.10.6.
+
+[57600.075774] BUG: unable to handle kernel NULL pointer dereference at (null)
+[57600.083796] IP: [<ffffffff81a6aa87>] _raw_spin_lock+0x7/0x20
+[57600.110378] PGD 421cf067 PUD 4442d067 PMD 0
+[57600.114678] Oops: 0002 [#1] SMP
+[57600.180799] CPU: 1 PID: 25990 Comm: mdadm Tainted: P O 4.2.8 #1
+[57600.187849] Hardware name: To be filled by O.E.M. To be filled by O.E.M./MAHOBAY, BIOS QV05AR66 03/06/2013
+[57600.197490] task: ffff880044e47240 ti: ffff880043070000 task.ti: ffff880043070000
+[57600.204963] RIP: 0010:[<ffffffff81a6aa87>] [<ffffffff81a6aa87>] _raw_spin_lock+0x7/0x20
+[57600.213057] RSP: 0018:ffff880043073810 EFLAGS: 00010046
+[57600.218359] RAX: 0000000000000000 RBX: 000000000000000c RCX: ffff88011e296dd0
+[57600.225486] RDX: 0000000000000001 RSI: ffffe8ffffcb46c0 RDI: 0000000000000000
+[57600.232613] RBP: ffff880043073878 R08: ffff88011e5f8170 R09: 0000000000000282
+[57600.239739] R10: 0000000000000005 R11: 28f5c28f5c28f5c3 R12: ffff880043073838
+[57600.246872] R13: ffffe8ffffcb46c0 R14: 0000000000000000 R15: ffff8800b9706a00
+[57600.253999] FS: 00007f576106c700(0000) GS:ffff88011e280000(0000) knlGS:0000000000000000
+[57600.262078] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[57600.267817] CR2: 0000000000000000 CR3: 00000000428fe000 CR4: 00000000001406e0
+[57600.274942] Stack:
+[57600.276949] ffffffff8114ee35 ffff880043073868 0000000000000282 000000000000eb3f
+[57600.284383] ffffffff81119043 ffff880043073838 ffff880043073838 ffff88003e197b98
+[57600.291820] ffffe8ffffcb46c0 ffff88003e197360 0000000000000286 ffff880043073968
+[57600.299254] Call Trace:
+[57600.301698] [<ffffffff8114ee35>] ? cache_flusharray+0x35/0xe0
+[57600.307523] [<ffffffff81119043>] ? __page_cache_release+0x23/0x110
+[57600.313779] [<ffffffff8114eb53>] kmem_cache_free+0x63/0xc0
+[57600.319344] [<ffffffff81579942>] drop_one_stripe+0x62/0x90
+[57600.324915] [<ffffffff81579b5b>] raid5_cache_scan+0x8b/0xb0
+[57600.330563] [<ffffffff8111b98a>] shrink_slab.part.36+0x19a/0x250
+[57600.336650] [<ffffffff8111e38c>] shrink_zone+0x23c/0x250
+[57600.342039] [<ffffffff8111e4f3>] do_try_to_free_pages+0x153/0x420
+[57600.348210] [<ffffffff8111e851>] try_to_free_pages+0x91/0xa0
+[57600.353959] [<ffffffff811145b1>] __alloc_pages_nodemask+0x4d1/0x8b0
+[57600.360303] [<ffffffff8157a30b>] check_reshape+0x62b/0x770
+[57600.365866] [<ffffffff8157a4a5>] raid5_check_reshape+0x55/0xa0
+[57600.371778] [<ffffffff81583df7>] update_raid_disks+0xc7/0x110
+[57600.377604] [<ffffffff81592b73>] md_ioctl+0xd83/0x1b10
+[57600.382827] [<ffffffff81385380>] blkdev_ioctl+0x170/0x690
+[57600.388307] [<ffffffff81195238>] block_ioctl+0x38/0x40
+[57600.393525] [<ffffffff811731c5>] do_vfs_ioctl+0x2b5/0x480
+[57600.399010] [<ffffffff8115e07b>] ? vfs_write+0x14b/0x1f0
+[57600.404400] [<ffffffff811733cc>] SyS_ioctl+0x3c/0x70
+[57600.409447] [<ffffffff81a6ad97>] entry_SYSCALL_64_fastpath+0x12/0x6a
+[57600.415875] Code: 00 00 00 00 55 48 89 e5 8b 07 85 c0 74 04 31 c0 5d c3 ba 01 00 00 00 f0 0f b1 17 85 c0 75 ef b0 01 5d c3 90 31 c0 ba 01 00 00 00 <f0> 0f b1 17 85 c0 75 01 c3 55 89 c6 48 89 e5 e8 85 d1 63 ff 5d
+[57600.435460] RIP [<ffffffff81a6aa87>] _raw_spin_lock+0x7/0x20
+[57600.441208] RSP <ffff880043073810>
+[57600.444690] CR2: 0000000000000000
+[57600.448000] ---[ end trace cbc6b5cc4bf9831d ]---
+
+The problem is that resize_stripes() releases new stripe_heads before assigning new
+slab cache to conf->slab_cache. If the shrinker function raid5_cache_scan() gets called
+after resize_stripes() starting releasing new stripes but right before new slab cache
+being assigned, it is possible that these new stripe_heads will be freed with the old
+slab_cache which was already been destoryed and that triggers this bug.
+
+Signed-off-by: Dennis Yang <dennisyang@qnap.com>
+Fixes: edbe83ab4c27 ("md/raid5: allow the stripe_cache to grow and shrink.")
+Reviewed-by: NeilBrown <neilb@suse.com>
+Signed-off-by: Shaohua Li <shli@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/raid5.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -2323,6 +2323,10 @@ static int resize_stripes(struct r5conf
+ err = -ENOMEM;
+
+ mutex_unlock(&conf->cache_size_mutex);
++
++ conf->slab_cache = sc;
++ conf->active_name = 1-conf->active_name;
++
+ /* Step 4, return new stripes to service */
+ while(!list_empty(&newstripes)) {
+ nsh = list_entry(newstripes.next, struct stripe_head, lru);
+@@ -2340,8 +2344,6 @@ static int resize_stripes(struct r5conf
+ }
+ /* critical section pass, GFP_NOIO no longer needed */
+
+- conf->slab_cache = sc;
+- conf->active_name = 1-conf->active_name;
+ if (!err)
+ conf->pool_size = newsize;
+ return err;
--- /dev/null
+From 0a49f2c31c3efbeb0de3e4b5598764887f629be2 Mon Sep 17 00:00:00 2001
+From: Sagi Grimberg <sagi@grimberg.me>
+Date: Sun, 23 Apr 2017 14:31:42 +0300
+Subject: mlx5: Fix mlx5_ib_map_mr_sg mr length
+
+From: Sagi Grimberg <sagi@grimberg.me>
+
+commit 0a49f2c31c3efbeb0de3e4b5598764887f629be2 upstream.
+
+In case we got an initial sg_offset, we need to
+account for it in the mr length.
+
+Fixes: ff2ba9936591 ("IB/core: Add passing an offset into the SG to ib_map_mr_sg")
+Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
+Tested-by: Israel Rukshin <israelr@mellanox.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/mlx5/mr.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/mlx5/mr.c
++++ b/drivers/infiniband/hw/mlx5/mr.c
+@@ -1782,7 +1782,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr
+ klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
+ klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
+ klms[i].key = cpu_to_be32(lkey);
+- mr->ibmr.length += sg_dma_len(sg);
++ mr->ibmr.length += sg_dma_len(sg) - sg_offset;
+
+ sg_offset = 0;
+ }
--- /dev/null
+From 7e2f18f06408ff56d7f75e68de8064777137b319 Mon Sep 17 00:00:00 2001
+From: Brian Norris <briannorris@chromium.org>
+Date: Wed, 5 Apr 2017 15:26:40 -0700
+Subject: mwifiex: MAC randomization should not be persistent
+
+From: Brian Norris <briannorris@chromium.org>
+
+commit 7e2f18f06408ff56d7f75e68de8064777137b319 upstream.
+
+nl80211 provides the NL80211_SCAN_FLAG_RANDOM_ADDR for every scan
+request that should be randomized; the absence of such a flag means we
+should not randomize. However, mwifiex was stashing the latest
+randomization request and *always* using it for future scans, even those
+that didn't set the flag.
+
+Let's zero out the randomization info whenever we get a scan request
+without NL80211_SCAN_FLAG_RANDOM_ADDR. I'd prefer to remove
+priv->random_mac entirely (and plumb the randomization MAC properly
+through the call sequence), but the spaghetti is a little difficult to
+unravel here for me.
+
+Fixes: c2a8f0ff9c6c ("mwifiex: support random MAC address for scanning")
+Signed-off-by: Brian Norris <briannorris@chromium.org>
+Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/marvell/mwifiex/cfg80211.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
++++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+@@ -2528,9 +2528,11 @@ mwifiex_cfg80211_scan(struct wiphy *wiph
+ priv->random_mac[i] |= get_random_int() &
+ ~(request->mac_addr_mask[i]);
+ }
++ ether_addr_copy(user_scan_cfg->random_mac, priv->random_mac);
++ } else {
++ eth_zero_addr(priv->random_mac);
+ }
+
+- ether_addr_copy(user_scan_cfg->random_mac, priv->random_mac);
+ user_scan_cfg->num_ssids = request->n_ssids;
+ user_scan_cfg->ssid_list = request->ssids;
+
--- /dev/null
+From 3c8cb9ad032d737b874e402c59eb51e3c991a144 Mon Sep 17 00:00:00 2001
+From: Brian Norris <briannorris@chromium.org>
+Date: Fri, 14 Apr 2017 14:51:17 -0700
+Subject: mwifiex: pcie: fix cmd_buf use-after-free in remove/reset
+
+From: Brian Norris <briannorris@chromium.org>
+
+commit 3c8cb9ad032d737b874e402c59eb51e3c991a144 upstream.
+
+Command buffers (skb's) are allocated by the main driver, and freed upon
+the last use. That last use is often in mwifiex_free_cmd_buffer(). In
+the meantime, if the command buffer gets used by the PCI driver, we map
+it as DMA-able, and store the mapping information in the 'cb' memory.
+
+However, if a command was in-flight when resetting the device (and
+therefore was still mapped), we don't get a chance to unmap this memory
+until after the core has cleaned up its command handling.
+
+Let's keep a refcount within the PCI driver, so we ensure the memory
+only gets freed after we've finished unmapping it.
+
+Noticed by KASAN when forcing a reset via:
+
+ echo 1 > /sys/bus/pci/.../reset
+
+The same code path can presumably be exercised in remove() and
+shutdown().
+
+[ 205.390377] mwifiex_pcie 0000:01:00.0: info: shutdown mwifiex...
+[ 205.400393] ==================================================================
+[ 205.407719] BUG: KASAN: use-after-free in mwifiex_unmap_pci_memory.isra.14+0x4c/0x100 [mwifiex_pcie] at addr ffffffc0ad471b28
+[ 205.419040] Read of size 16 by task bash/1913
+[ 205.423421] =============================================================================
+[ 205.431625] BUG skbuff_head_cache (Tainted: G B ): kasan: bad access detected
+[ 205.439815] -----------------------------------------------------------------------------
+[ 205.439815]
+[ 205.449534] INFO: Allocated in __build_skb+0x48/0x114 age=1311 cpu=4 pid=1913
+[ 205.456709] alloc_debug_processing+0x124/0x178
+[ 205.461282] ___slab_alloc.constprop.58+0x528/0x608
+[ 205.466196] __slab_alloc.isra.54.constprop.57+0x44/0x54
+[ 205.471542] kmem_cache_alloc+0xcc/0x278
+[ 205.475497] __build_skb+0x48/0x114
+[ 205.479019] __netdev_alloc_skb+0xe0/0x170
+[ 205.483244] mwifiex_alloc_cmd_buffer+0x68/0xdc [mwifiex]
+[ 205.488759] mwifiex_init_fw+0x40/0x6cc [mwifiex]
+[ 205.493584] _mwifiex_fw_dpc+0x158/0x520 [mwifiex]
+[ 205.498491] mwifiex_reinit_sw+0x2c4/0x398 [mwifiex]
+[ 205.503510] mwifiex_pcie_reset_notify+0x114/0x15c [mwifiex_pcie]
+[ 205.509643] pci_reset_notify+0x5c/0x6c
+[ 205.513519] pci_reset_function+0x6c/0x7c
+[ 205.517567] reset_store+0x68/0x98
+[ 205.521003] dev_attr_store+0x54/0x60
+[ 205.524705] sysfs_kf_write+0x9c/0xb0
+[ 205.528413] INFO: Freed in __kfree_skb+0xb0/0xbc age=131 cpu=4 pid=1913
+[ 205.535064] free_debug_processing+0x264/0x370
+[ 205.539550] __slab_free+0x84/0x40c
+[ 205.543075] kmem_cache_free+0x1c8/0x2a0
+[ 205.547030] __kfree_skb+0xb0/0xbc
+[ 205.550465] consume_skb+0x164/0x178
+[ 205.554079] __dev_kfree_skb_any+0x58/0x64
+[ 205.558304] mwifiex_free_cmd_buffer+0xa0/0x158 [mwifiex]
+[ 205.563817] mwifiex_shutdown_drv+0x578/0x5c4 [mwifiex]
+[ 205.569164] mwifiex_shutdown_sw+0x178/0x310 [mwifiex]
+[ 205.574353] mwifiex_pcie_reset_notify+0xd4/0x15c [mwifiex_pcie]
+[ 205.580398] pci_reset_notify+0x5c/0x6c
+[ 205.584274] pci_dev_save_and_disable+0x24/0x6c
+[ 205.588837] pci_reset_function+0x30/0x7c
+[ 205.592885] reset_store+0x68/0x98
+[ 205.596324] dev_attr_store+0x54/0x60
+[ 205.600017] sysfs_kf_write+0x9c/0xb0
+...
+[ 205.800488] Call trace:
+[ 205.802980] [<ffffffc00020a69c>] dump_backtrace+0x0/0x190
+[ 205.808415] [<ffffffc00020a96c>] show_stack+0x20/0x28
+[ 205.813506] [<ffffffc0005d020c>] dump_stack+0xa4/0xcc
+[ 205.818598] [<ffffffc0003be44c>] print_trailer+0x158/0x168
+[ 205.824120] [<ffffffc0003be5f0>] object_err+0x4c/0x5c
+[ 205.829210] [<ffffffc0003c45bc>] kasan_report+0x334/0x500
+[ 205.834641] [<ffffffc0003c3994>] check_memory_region+0x20/0x14c
+[ 205.840593] [<ffffffc0003c3b14>] __asan_loadN+0x14/0x1c
+[ 205.845879] [<ffffffbffc46171c>] mwifiex_unmap_pci_memory.isra.14+0x4c/0x100 [mwifiex_pcie]
+[ 205.854282] [<ffffffbffc461864>] mwifiex_pcie_delete_cmdrsp_buf+0x94/0xa8 [mwifiex_pcie]
+[ 205.862421] [<ffffffbffc462028>] mwifiex_pcie_free_buffers+0x11c/0x158 [mwifiex_pcie]
+[ 205.870302] [<ffffffbffc4620d4>] mwifiex_pcie_down_dev+0x70/0x80 [mwifiex_pcie]
+[ 205.877736] [<ffffffbffc1397a8>] mwifiex_shutdown_sw+0x190/0x310 [mwifiex]
+[ 205.884658] [<ffffffbffc4606b4>] mwifiex_pcie_reset_notify+0xd4/0x15c [mwifiex_pcie]
+[ 205.892446] [<ffffffc000635f54>] pci_reset_notify+0x5c/0x6c
+[ 205.898048] [<ffffffc00063a044>] pci_dev_save_and_disable+0x24/0x6c
+[ 205.904350] [<ffffffc00063cf0c>] pci_reset_function+0x30/0x7c
+[ 205.910134] [<ffffffc000641118>] reset_store+0x68/0x98
+[ 205.915312] [<ffffffc000771588>] dev_attr_store+0x54/0x60
+[ 205.920750] [<ffffffc00046f53c>] sysfs_kf_write+0x9c/0xb0
+[ 205.926182] [<ffffffc00046dfb0>] kernfs_fop_write+0x184/0x1f8
+[ 205.931963] [<ffffffc0003d64f4>] __vfs_write+0x6c/0x17c
+[ 205.937221] [<ffffffc0003d7164>] vfs_write+0xf0/0x1c4
+[ 205.942310] [<ffffffc0003d7da0>] SyS_write+0x78/0xd8
+[ 205.947312] [<ffffffc000204634>] el0_svc_naked+0x24/0x28
+...
+[ 205.998268] ==================================================================
+
+This bug has been around in different forms for a while. It was sort of
+noticed in commit 955ab095c51a ("mwifiex: Do not kfree cmd buf while
+unregistering PCIe"), but it just fixed the double-free, without
+acknowledging the potential for use-after-free.
+
+Fixes: fc3314609047 ("mwifiex: use pci_alloc/free_consistent APIs for PCIe")
+Signed-off-by: Brian Norris <briannorris@chromium.org>
+Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/marvell/mwifiex/pcie.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
++++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
+@@ -1039,6 +1039,7 @@ static int mwifiex_pcie_delete_cmdrsp_bu
+ if (card && card->cmd_buf) {
+ mwifiex_unmap_pci_memory(adapter, card->cmd_buf,
+ PCI_DMA_TODEVICE);
++ dev_kfree_skb_any(card->cmd_buf);
+ }
+ return 0;
+ }
+@@ -1608,6 +1609,11 @@ mwifiex_pcie_send_cmd(struct mwifiex_ada
+ return -1;
+
+ card->cmd_buf = skb;
++ /*
++ * Need to keep a reference, since core driver might free up this
++ * buffer before we've unmapped it.
++ */
++ skb_get(skb);
+
+ /* To send a command, the driver will:
+ 1. Write the 64bit physical address of the data buffer to
+@@ -1711,6 +1717,7 @@ static int mwifiex_pcie_process_cmd_comp
+ if (card->cmd_buf) {
+ mwifiex_unmap_pci_memory(adapter, card->cmd_buf,
+ PCI_DMA_TODEVICE);
++ dev_kfree_skb_any(card->cmd_buf);
+ card->cmd_buf = NULL;
+ }
+
--- /dev/null
+From 46cfa2148e7371c537efff1a1c693e58f523089d Mon Sep 17 00:00:00 2001
+From: Larry Finger <Larry.Finger@lwfinger.net>
+Date: Sun, 16 Apr 2017 19:32:07 -0500
+Subject: rtlwifi: rtl8821ae: setup 8812ae RFE according to device type
+
+From: Larry Finger <Larry.Finger@lwfinger.net>
+
+commit 46cfa2148e7371c537efff1a1c693e58f523089d upstream.
+
+Current channel switch implementation sets 8812ae RFE reg value assuming
+that device always has type 2.
+
+Extend possible RFE types set and write corresponding reg values.
+
+Source for new code is
+http://dlcdnet.asus.com/pub/ASUS/wireless/PCE-AC51/DR_PCE_AC51_20232801152016.zip
+
+Signed-off-by: Maxim Samoylov <max7255@gmail.com>
+Signed-off-by: Larry Finger <Larry.Finger@lwfinger.net>
+Cc: Yan-Hsuan Chuang <yhchuang@realtek.com>
+Cc: Pkshih <pkshih@realtek.com>
+Cc: Birming Chiu <birming@realtek.com>
+Cc: Shaofu <shaofu@realtek.com>
+Cc: Steven Ting <steventing@realtek.com>
+Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c | 122 ++++++++++++++++---
+ drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h | 1
+ 2 files changed, 107 insertions(+), 16 deletions(-)
+
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+@@ -358,6 +358,107 @@ bool rtl8821ae_phy_rf_config(struct ieee
+ return rtl8821ae_phy_rf6052_config(hw);
+ }
+
++static void _rtl8812ae_phy_set_rfe_reg_24g(struct ieee80211_hw *hw)
++{
++ struct rtl_priv *rtlpriv = rtl_priv(hw);
++ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
++ u8 tmp;
++
++ switch (rtlhal->rfe_type) {
++ case 3:
++ rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x54337770);
++ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x54337770);
++ rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010);
++ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
++ rtl_set_bbreg(hw, 0x900, 0x00000303, 0x1);
++ break;
++ case 4:
++ rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77777777);
++ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777);
++ rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x001);
++ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x001);
++ break;
++ case 5:
++ rtl_write_byte(rtlpriv, RA_RFE_PINMUX + 2, 0x77);
++ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777);
++ tmp = rtl_read_byte(rtlpriv, RA_RFE_INV + 3);
++ rtl_write_byte(rtlpriv, RA_RFE_INV + 3, tmp & ~0x1);
++ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
++ break;
++ case 1:
++ if (rtlpriv->btcoexist.bt_coexistence) {
++ rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xffffff, 0x777777);
++ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
++ 0x77777777);
++ rtl_set_bbreg(hw, RA_RFE_INV, 0x33f00000, 0x000);
++ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
++ break;
++ }
++ case 0:
++ case 2:
++ default:
++ rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77777777);
++ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777);
++ rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x000);
++ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
++ break;
++ }
++}
++
++static void _rtl8812ae_phy_set_rfe_reg_5g(struct ieee80211_hw *hw)
++{
++ struct rtl_priv *rtlpriv = rtl_priv(hw);
++ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
++ u8 tmp;
++
++ switch (rtlhal->rfe_type) {
++ case 0:
++ rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77337717);
++ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337717);
++ rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010);
++ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
++ break;
++ case 1:
++ if (rtlpriv->btcoexist.bt_coexistence) {
++ rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xffffff, 0x337717);
++ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
++ 0x77337717);
++ rtl_set_bbreg(hw, RA_RFE_INV, 0x33f00000, 0x000);
++ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
++ } else {
++ rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD,
++ 0x77337717);
++ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
++ 0x77337717);
++ rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x000);
++ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
++ }
++ break;
++ case 3:
++ rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x54337717);
++ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x54337717);
++ rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010);
++ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
++ rtl_set_bbreg(hw, 0x900, 0x00000303, 0x1);
++ break;
++ case 5:
++ rtl_write_byte(rtlpriv, RA_RFE_PINMUX + 2, 0x33);
++ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337777);
++ tmp = rtl_read_byte(rtlpriv, RA_RFE_INV + 3);
++ rtl_write_byte(rtlpriv, RA_RFE_INV + 3, tmp | 0x1);
++ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
++ break;
++ case 2:
++ case 4:
++ default:
++ rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77337777);
++ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337777);
++ rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010);
++ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
++ break;
++ }
++}
++
+ u32 phy_get_tx_swing_8812A(struct ieee80211_hw *hw, u8 band,
+ u8 rf_path)
+ {
+@@ -552,14 +653,9 @@ void rtl8821ae_phy_switch_wirelessband(s
+ /* 0x82C[1:0] = 2b'00 */
+ rtl_set_bbreg(hw, 0x82c, 0x3, 0);
+ }
+- if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
+- rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD,
+- 0x77777777);
+- rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
+- 0x77777777);
+- rtl_set_bbreg(hw, RA_RFE_INV, 0x3ff00000, 0x000);
+- rtl_set_bbreg(hw, RB_RFE_INV, 0x3ff00000, 0x000);
+- }
++
++ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
++ _rtl8812ae_phy_set_rfe_reg_24g(hw);
+
+ rtl_set_bbreg(hw, RTXPATH, 0xf0, 0x1);
+ rtl_set_bbreg(hw, RCCK_RX, 0x0f000000, 0x1);
+@@ -614,14 +710,8 @@ void rtl8821ae_phy_switch_wirelessband(s
+ /* 0x82C[1:0] = 2'b00 */
+ rtl_set_bbreg(hw, 0x82c, 0x3, 1);
+
+- if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
+- rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD,
+- 0x77337777);
+- rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
+- 0x77337777);
+- rtl_set_bbreg(hw, RA_RFE_INV, 0x3ff00000, 0x010);
+- rtl_set_bbreg(hw, RB_RFE_INV, 0x3ff00000, 0x010);
+- }
++ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
++ _rtl8812ae_phy_set_rfe_reg_5g(hw);
+
+ rtl_set_bbreg(hw, RTXPATH, 0xf0, 0);
+ rtl_set_bbreg(hw, RCCK_RX, 0x0f000000, 0xf);
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h
+@@ -2424,6 +2424,7 @@
+ #define BMASKH4BITS 0xf0000000
+ #define BMASKOFDM_D 0xffc00000
+ #define BMASKCCK 0x3f3f3f3f
++#define BMASKRFEINV 0x3ff00000
+
+ #define BRFREGOFFSETMASK 0xfffff
+
usb-core-replace-p-with-pk.patch
tpm_tis_core-choose-appropriate-timeout-for-reading-burstcount.patch
alsa-hda-fix-cpu-lockup-when-stopping-the-cmd-dmas.patch
+fanotify-don-t-expose-eopenstale-to-userspace.patch
+tpm_tis_spi-use-single-function-to-transfer-data.patch
+tpm_tis_spi-abort-transfer-when-too-many-wait-states-are-signaled.patch
+tpm_tis_spi-check-correct-byte-for-wait-state-indicator.patch
+tpm_tis_spi-remove-limitation-of-transfers-to-max_spi_framesize-bytes.patch
+tpm_tis_spi-add-small-delay-after-last-transfer.patch
+tpm-msleep-delays-replace-with-usleep_range-in-i2c-nuvoton-driver.patch
+tpm-add-sleep-only-for-retry-in-i2c_nuvoton_write_status.patch
+tpm_crb-check-for-bad-response-size.patch
+vtpm-fix-missing-null-check.patch
+tpm-fix-handling-of-the-tpm-2.0-event-logs.patch
+asoc-cs4271-configure-reset-gpio-as-output.patch
+mlx5-fix-mlx5_ib_map_mr_sg-mr-length.patch
+infiniband-call-ipv6-route-lookup-via-the-stub-interface.patch
+dm-btree-fix-for-dm_btree_find_lowest_key.patch
+dm-raid-select-the-kconfig-option-config_md_raid0.patch
+dm-bufio-avoid-a-possible-abba-deadlock.patch
+dm-bufio-check-new-buffer-allocation-watermark-every-30-seconds.patch
+dm-mpath-requeue-after-a-small-delay-if-blk_get_request-fails.patch
+dm-mpath-split-and-rename-activate_path-to-prepare-for-its-expanded-use.patch
+dm-mpath-avoid-that-path-removal-can-trigger-an-infinite-loop.patch
+dm-mpath-delay-requeuing-while-path-initialization-is-in-progress.patch
+dm-cache-metadata-fail-operations-if-fail_io-mode-has-been-established.patch
+dm-bufio-make-the-parameter-retain_bytes-unsigned-long.patch
+dm-thin-metadata-call-precommit-before-saving-the-roots.patch
+dm-space-map-disk-fix-some-book-keeping-in-the-disk-space-map.patch
+md-update-slab_cache-before-releasing-new-stripes-when-stripes-resizing.patch
+md-md_closing-needs-to-be-cleared-after-called-md_set_readonly-or-do_md_stop.patch
+rtlwifi-rtl8821ae-setup-8812ae-rfe-according-to-device-type.patch
+mwifiex-mac-randomization-should-not-be-persistent.patch
+mwifiex-pcie-fix-cmd_buf-use-after-free-in-remove-reset.patch
--- /dev/null
+From 0afb7118ae021e80ecf70f5a3336e0935505518a Mon Sep 17 00:00:00 2001
+From: Nayna Jain <nayna@linux.vnet.ibm.com>
+Date: Fri, 10 Mar 2017 13:45:54 -0500
+Subject: tpm: add sleep only for retry in i2c_nuvoton_write_status()
+
+From: Nayna Jain <nayna@linux.vnet.ibm.com>
+
+commit 0afb7118ae021e80ecf70f5a3336e0935505518a upstream.
+
+Currently, there is an unnecessary 1 msec delay added in
+i2c_nuvoton_write_status() for the successful case. This
+function is called multiple times during send() and recv(),
+which implies adding multiple extra delays for every TPM
+operation.
+
+This patch calls usleep_range() only if retry is to be done.
+
+Signed-off-by: Nayna Jain <nayna@linux.vnet.ibm.com>
+Reviewed-by: Mimi Zohar <zohar@linux.vnet.ibm.com>
+Reviewed-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/tpm/tpm_i2c_nuvoton.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
++++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
+@@ -124,8 +124,9 @@ static s32 i2c_nuvoton_write_status(stru
+ /* this causes the current command to be aborted */
+ for (i = 0, status = -1; i < TPM_I2C_RETRY_COUNT && status < 0; i++) {
+ status = i2c_nuvoton_write_buf(client, TPM_STS, 1, &data);
+- usleep_range(TPM_I2C_BUS_DELAY, TPM_I2C_BUS_DELAY
+- + TPM_I2C_DELAY_RANGE);
++ if (status < 0)
++ usleep_range(TPM_I2C_BUS_DELAY, TPM_I2C_BUS_DELAY
++ + TPM_I2C_DELAY_RANGE);
+ }
+ return status;
+ }
--- /dev/null
+From fd5c78694f3f1c875e293de7a641ba8a3d60d00d Mon Sep 17 00:00:00 2001
+From: Petr Vandrovec <petr@vmware.com>
+Date: Wed, 29 Mar 2017 00:43:30 -0700
+Subject: tpm: fix handling of the TPM 2.0 event logs
+
+From: Petr Vandrovec <petr@vmware.com>
+
+commit fd5c78694f3f1c875e293de7a641ba8a3d60d00d upstream.
+
+When TPM2 log has entries with more than 3 digests, or with digests
+not listed in the log header, log gets misparsed, eventually
+leading to kernel complaint that code tried to vmalloc 512MB of
+memory (I have no idea what would happen on bigger system).
+
+So code should not parse only first 3 digests: both event header
+and event itself are already in memory, so we can parse any number
+of digests, as long as we do not try to parse whole memory when
+given count of 0xFFFFFFFF.
+
+So this change:
+
+* Rejects event entry with more digests than log header describes.
+ Digest types should be unique, and all should be described in
+ log header, so there cannot be more digests in the event than in
+ the header.
+
+* Reject event entry with digest that is not described in the
+ log header. In theory code could hardcode information about
+ digest IDs already assigned by TCG, but if firmware authors
+ cannot get event log format right, why should anyone believe
+ that they got event log content right.
+
+Fixes: 4d23cc323cdb ("tpm: add securityfs support for TPM 2.0 firmware event log")
+Signed-off-by: Petr Vandrovec <petr@vmware.com>
+Reviewed-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/tpm/tpm2_eventlog.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+--- a/drivers/char/tpm/tpm2_eventlog.c
++++ b/drivers/char/tpm/tpm2_eventlog.c
+@@ -56,18 +56,24 @@ static int calc_tpm2_event_size(struct t
+
+ efispecid = (struct tcg_efi_specid_event *)event_header->event;
+
+- for (i = 0; (i < event->count) && (i < TPM2_ACTIVE_PCR_BANKS);
+- i++) {
++ /* Check if event is malformed. */
++ if (event->count > efispecid->num_algs)
++ return 0;
++
++ for (i = 0; i < event->count; i++) {
+ halg_size = sizeof(event->digests[i].alg_id);
+ memcpy(&halg, marker, halg_size);
+ marker = marker + halg_size;
+- for (j = 0; (j < efispecid->num_algs); j++) {
++ for (j = 0; j < efispecid->num_algs; j++) {
+ if (halg == efispecid->digest_sizes[j].alg_id) {
+- marker = marker +
++ marker +=
+ efispecid->digest_sizes[j].digest_size;
+ break;
+ }
+ }
++ /* Algorithm without known length. Such event is unparseable. */
++ if (j == efispecid->num_algs)
++ return 0;
+ }
+
+ event_field = (struct tcg_event_field *)marker;
--- /dev/null
+From a233a0289cf9a96ef9b42c730a7621ccbf9a6f98 Mon Sep 17 00:00:00 2001
+From: Nayna Jain <nayna@linux.vnet.ibm.com>
+Date: Fri, 10 Mar 2017 13:45:53 -0500
+Subject: tpm: msleep() delays - replace with usleep_range() in i2c nuvoton driver
+
+From: Nayna Jain <nayna@linux.vnet.ibm.com>
+
+commit a233a0289cf9a96ef9b42c730a7621ccbf9a6f98 upstream.
+
+Commit 500462a9de65 "timers: Switch to a non-cascading wheel" replaced
+the 'classic' timer wheel, which aimed for near 'exact' expiry of the
+timers. Their analysis was that the vast majority of timeout timers
+are used as safeguards, not as real timers, and are cancelled or
+rearmed before expiration. The only exception noted to this were
+networking timers with a small expiry time.
+
+Not included in the analysis was the TPM polling timer, which resulted
+in a longer normal delay and, every so often, a very long delay. The
+non-cascading wheel delay is based on CONFIG_HZ. For a description of
+the different rings and their delays, refer to the comments in
+kernel/time/timer.c.
+
+Below are the delays given for rings 0 - 2, which explains the longer
+"normal" delays and the very, long delays as seen on systems with
+CONFIG_HZ 250.
+
+* HZ 1000 steps
+ * Level Offset Granularity Range
+ * 0 0 1 ms 0 ms - 63 ms
+ * 1 64 8 ms 64 ms - 511 ms
+ * 2 128 64 ms 512 ms - 4095 ms (512ms - ~4s)
+
+* HZ 250
+ * Level Offset Granularity Range
+ * 0 0 4 ms 0 ms - 255 ms
+ * 1 64 32 ms 256 ms - 2047 ms (256ms - ~2s)
+ * 2 128 256 ms 2048 ms - 16383 ms (~2s - ~16s)
+
+Below is a comparison of extending the TPM with 1000 measurements,
+using msleep() vs. usleep_delay() when configured for 1000 hz vs. 250
+hz, before and after commit 500462a9de65.
+
+linux-4.7 | msleep() usleep_range()
+1000 hz: 0m44.628s | 1m34.497s 29.243s
+250 hz: 1m28.510s | 4m49.269s 32.386s
+
+linux-4.7 | min-max (msleep) min-max (usleep_range)
+1000 hz: 0:017 - 2:760s | 0:015 - 3:967s 0:014 - 0:418s
+250 hz: 0:028 - 1:954s | 0:040 - 4:096s 0:016 - 0:816s
+
+This patch replaces the msleep() with usleep_range() calls in the
+i2c nuvoton driver with a consistent max range value.
+
+Signed-of-by: Mimi Zohar <zohar@linux.vnet.ibm.com>
+Signed-off-by: Nayna Jain <nayna@linux.vnet.ibm.com>
+Reviewed-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/tpm/tpm_i2c_nuvoton.c | 23 +++++++++++++++--------
+ 1 file changed, 15 insertions(+), 8 deletions(-)
+
+--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
++++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
+@@ -49,9 +49,10 @@
+ */
+ #define TPM_I2C_MAX_BUF_SIZE 32
+ #define TPM_I2C_RETRY_COUNT 32
+-#define TPM_I2C_BUS_DELAY 1 /* msec */
+-#define TPM_I2C_RETRY_DELAY_SHORT 2 /* msec */
+-#define TPM_I2C_RETRY_DELAY_LONG 10 /* msec */
++#define TPM_I2C_BUS_DELAY 1000 /* usec */
++#define TPM_I2C_RETRY_DELAY_SHORT (2 * 1000) /* usec */
++#define TPM_I2C_RETRY_DELAY_LONG (10 * 1000) /* usec */
++#define TPM_I2C_DELAY_RANGE 300 /* usec */
+
+ #define OF_IS_TPM2 ((void *)1)
+ #define I2C_IS_TPM2 1
+@@ -123,7 +124,8 @@ static s32 i2c_nuvoton_write_status(stru
+ /* this causes the current command to be aborted */
+ for (i = 0, status = -1; i < TPM_I2C_RETRY_COUNT && status < 0; i++) {
+ status = i2c_nuvoton_write_buf(client, TPM_STS, 1, &data);
+- msleep(TPM_I2C_BUS_DELAY);
++ usleep_range(TPM_I2C_BUS_DELAY, TPM_I2C_BUS_DELAY
++ + TPM_I2C_DELAY_RANGE);
+ }
+ return status;
+ }
+@@ -160,7 +162,8 @@ static int i2c_nuvoton_get_burstcount(st
+ burst_count = min_t(u8, TPM_I2C_MAX_BUF_SIZE, data);
+ break;
+ }
+- msleep(TPM_I2C_BUS_DELAY);
++ usleep_range(TPM_I2C_BUS_DELAY, TPM_I2C_BUS_DELAY
++ + TPM_I2C_DELAY_RANGE);
+ } while (time_before(jiffies, stop));
+
+ return burst_count;
+@@ -203,13 +206,17 @@ static int i2c_nuvoton_wait_for_stat(str
+ return 0;
+
+ /* use polling to wait for the event */
+- ten_msec = jiffies + msecs_to_jiffies(TPM_I2C_RETRY_DELAY_LONG);
++ ten_msec = jiffies + usecs_to_jiffies(TPM_I2C_RETRY_DELAY_LONG);
+ stop = jiffies + timeout;
+ do {
+ if (time_before(jiffies, ten_msec))
+- msleep(TPM_I2C_RETRY_DELAY_SHORT);
++ usleep_range(TPM_I2C_RETRY_DELAY_SHORT,
++ TPM_I2C_RETRY_DELAY_SHORT
++ + TPM_I2C_DELAY_RANGE);
+ else
+- msleep(TPM_I2C_RETRY_DELAY_LONG);
++ usleep_range(TPM_I2C_RETRY_DELAY_LONG,
++ TPM_I2C_RETRY_DELAY_LONG
++ + TPM_I2C_DELAY_RANGE);
+ status_valid = i2c_nuvoton_check_status(chip, mask,
+ value);
+ if (status_valid)
--- /dev/null
+From 8569defde8057258835c51ce01a33de82e14b148 Mon Sep 17 00:00:00 2001
+From: Jerry Snitselaar <jsnitsel@redhat.com>
+Date: Fri, 10 Mar 2017 17:46:04 -0700
+Subject: tpm_crb: check for bad response size
+
+From: Jerry Snitselaar <jsnitsel@redhat.com>
+
+commit 8569defde8057258835c51ce01a33de82e14b148 upstream.
+
+Make sure size of response buffer is at least 6 bytes, or
+we will underflow and pass large size_t to memcpy_fromio().
+This was encountered while testing earlier version of
+locality patchset.
+
+Fixes: 30fc8d138e912 ("tpm: TPM 2.0 CRB Interface")
+Signed-off-by: Jerry Snitselaar <jsnitsel@redhat.com>
+Reviewed-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/tpm/tpm_crb.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/char/tpm/tpm_crb.c
++++ b/drivers/char/tpm/tpm_crb.c
+@@ -176,8 +176,7 @@ static int crb_recv(struct tpm_chip *chi
+
+ memcpy_fromio(buf, priv->rsp, 6);
+ expected = be32_to_cpup((__be32 *) &buf[2]);
+-
+- if (expected > count)
++ if (expected > count || expected < 6)
+ return -EIO;
+
+ memcpy_fromio(&buf[6], &priv->rsp[6], expected - 6);
--- /dev/null
+From 975094ddc369a32f27210248bdd9bbd153061b00 Mon Sep 17 00:00:00 2001
+From: Peter Huewe <peter.huewe@infineon.com>
+Date: Thu, 2 Mar 2017 13:03:12 +0000
+Subject: tpm_tis_spi: Abort transfer when too many wait states are signaled
+
+From: Peter Huewe <peter.huewe@infineon.com>
+
+commit 975094ddc369a32f27210248bdd9bbd153061b00 upstream.
+
+Abort the transfer with ETIMEDOUT when the TPM signals more than
+TPM_RETRY wait states. Continuing with the transfer in this state
+will only lead to arbitrary failures in other parts of the code.
+
+Fixes: 0edbfea537d1 ("tpm/tpm_tis_spi: Add support for spi phy")
+Signed-off-by: Alexander Steffen <Alexander.Steffen@infineon.com>
+Signed-off-by: Peter Huewe <peter.huewe@infineon.com>
+Reviewed-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Tested-by: Benoit Houyere <benoit.houyere@st.com>
+Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/tpm/tpm_tis_spi.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/char/tpm/tpm_tis_spi.c
++++ b/drivers/char/tpm/tpm_tis_spi.c
+@@ -101,6 +101,11 @@ static int tpm_tis_spi_transfer(struct t
+ goto exit;
+ }
+
++ if (i == TPM_RETRY) {
++ ret = -ETIMEDOUT;
++ goto exit;
++ }
++
+ spi_xfer.cs_change = 0;
+ spi_xfer.len = len;
+
--- /dev/null
+From 5cc0101d1f88500f8901d01b035af743215d4c3a Mon Sep 17 00:00:00 2001
+From: Peter Huewe <peter.huewe@infineon.com>
+Date: Thu, 2 Mar 2017 13:03:15 +0000
+Subject: tpm_tis_spi: Add small delay after last transfer
+
+From: Peter Huewe <peter.huewe@infineon.com>
+
+commit 5cc0101d1f88500f8901d01b035af743215d4c3a upstream.
+
+Testing the implementation with a Raspberry Pi 2 showed that under some
+circumstances its SPI master erroneously releases the CS line before the
+transfer is complete, i.e. before the end of the last clock. In this case
+the TPM ignores the transfer and misses for example the GO command. The
+driver is unable to detect this communication problem and will wait for a
+command response that is never going to arrive, timing out eventually.
+
+As a workaround, the small delay ensures that the CS line is held long
+enough, even with a faulty SPI master. Other SPI masters are not affected,
+except for a negligible performance penalty.
+
+Fixes: 0edbfea537d1 ("tpm/tpm_tis_spi: Add support for spi phy")
+Signed-off-by: Alexander Steffen <Alexander.Steffen@infineon.com>
+Signed-off-by: Peter Huewe <peter.huewe@infineon.com>
+Reviewed-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Tested-by: Benoit Houyere <benoit.houyere@st.com>
+Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/tpm/tpm_tis_spi.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/char/tpm/tpm_tis_spi.c
++++ b/drivers/char/tpm/tpm_tis_spi.c
+@@ -111,6 +111,7 @@ static int tpm_tis_spi_transfer(struct t
+
+ spi_xfer.cs_change = 0;
+ spi_xfer.len = transfer_len;
++ spi_xfer.delay_usecs = 5;
+
+ if (direction) {
+ spi_xfer.tx_buf = NULL;
--- /dev/null
+From e110cc69dc2ad679d6d478df636b99b14e6fbbc9 Mon Sep 17 00:00:00 2001
+From: Peter Huewe <peter.huewe@infineon.com>
+Date: Thu, 2 Mar 2017 13:03:13 +0000
+Subject: tpm_tis_spi: Check correct byte for wait state indicator
+
+From: Peter Huewe <peter.huewe@infineon.com>
+
+commit e110cc69dc2ad679d6d478df636b99b14e6fbbc9 upstream.
+
+Wait states are signaled in the last byte received from the TPM in
+response to the header, not the first byte. Check rx_buf[3] instead of
+rx_buf[0].
+
+Fixes: 0edbfea537d1 ("tpm/tpm_tis_spi: Add support for spi phy")
+Signed-off-by: Alexander Steffen <Alexander.Steffen@infineon.com>
+Signed-off-by: Peter Huewe <peter.huewe@infineon.com>
+Reviewed-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Tested-by: Benoit Houyere <benoit.houyere@st.com>
+Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/tpm/tpm_tis_spi.c | 34 +++++++++++++++++-----------------
+ 1 file changed, 17 insertions(+), 17 deletions(-)
+
+--- a/drivers/char/tpm/tpm_tis_spi.c
++++ b/drivers/char/tpm/tpm_tis_spi.c
+@@ -85,25 +85,25 @@ static int tpm_tis_spi_transfer(struct t
+ if (ret < 0)
+ goto exit;
+
+- phy->tx_buf[0] = 0;
++ if ((phy->rx_buf[3] & 0x01) == 0) {
++ // handle SPI wait states
++ phy->tx_buf[0] = 0;
+
+- /* According to TCG PTP specification, if there is no TPM present at
+- * all, then the design has a weak pull-up on MISO. If a TPM is not
+- * present, a pull-up on MISO means that the SB controller sees a 1,
+- * and will latch in 0xFF on the read.
+- */
+- for (i = 0; (phy->rx_buf[0] & 0x01) == 0 && i < TPM_RETRY; i++) {
+- spi_xfer.len = 1;
+- spi_message_init(&m);
+- spi_message_add_tail(&spi_xfer, &m);
+- ret = spi_sync_locked(phy->spi_device, &m);
+- if (ret < 0)
+- goto exit;
+- }
++ for (i = 0; i < TPM_RETRY; i++) {
++ spi_xfer.len = 1;
++ spi_message_init(&m);
++ spi_message_add_tail(&spi_xfer, &m);
++ ret = spi_sync_locked(phy->spi_device, &m);
++ if (ret < 0)
++ goto exit;
++ if (phy->rx_buf[0] & 0x01)
++ break;
++ }
+
+- if (i == TPM_RETRY) {
+- ret = -ETIMEDOUT;
+- goto exit;
++ if (i == TPM_RETRY) {
++ ret = -ETIMEDOUT;
++ goto exit;
++ }
+ }
+
+ spi_xfer.cs_change = 0;
--- /dev/null
+From 591e48c26ced7c455751eef27fb5963e902c2137 Mon Sep 17 00:00:00 2001
+From: Peter Huewe <peter.huewe@infineon.com>
+Date: Thu, 2 Mar 2017 13:03:14 +0000
+Subject: tpm_tis_spi: Remove limitation of transfers to MAX_SPI_FRAMESIZE bytes
+
+From: Peter Huewe <peter.huewe@infineon.com>
+
+commit 591e48c26ced7c455751eef27fb5963e902c2137 upstream.
+
+Limiting transfers to MAX_SPI_FRAMESIZE was not expected by the upper
+layers, as tpm_tis has no such limitation. Add a loop to hide that
+limitation.
+
+v2: Moved scope of spi_message to the top as requested by Jarkko
+Fixes: 0edbfea537d1 ("tpm/tpm_tis_spi: Add support for spi phy")
+Signed-off-by: Alexander Steffen <Alexander.Steffen@infineon.com>
+Signed-off-by: Peter Huewe <peter.huewe@infineon.com>
+Reviewed-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Tested-by: Benoit Houyere <benoit.houyere@st.com>
+Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/tpm/tpm_tis_spi.c | 109 ++++++++++++++++++++++-------------------
+ 1 file changed, 59 insertions(+), 50 deletions(-)
+
+--- a/drivers/char/tpm/tpm_tis_spi.c
++++ b/drivers/char/tpm/tpm_tis_spi.c
+@@ -60,67 +60,76 @@ static int tpm_tis_spi_transfer(struct t
+ u8 *buffer, u8 direction)
+ {
+ struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
+- int ret, i;
++ int ret = 0;
++ int i;
+ struct spi_message m;
+- struct spi_transfer spi_xfer = {
+- .tx_buf = phy->tx_buf,
+- .rx_buf = phy->rx_buf,
+- .len = 4,
+- .cs_change = 1,
+- };
+-
+- if (len > MAX_SPI_FRAMESIZE)
+- return -ENOMEM;
+-
+- phy->tx_buf[0] = direction | (len - 1);
+- phy->tx_buf[1] = 0xd4;
+- phy->tx_buf[2] = addr >> 8;
+- phy->tx_buf[3] = addr;
+-
+- spi_message_init(&m);
+- spi_message_add_tail(&spi_xfer, &m);
++ struct spi_transfer spi_xfer;
++ u8 transfer_len;
+
+ spi_bus_lock(phy->spi_device->master);
+- ret = spi_sync_locked(phy->spi_device, &m);
+- if (ret < 0)
+- goto exit;
+-
+- if ((phy->rx_buf[3] & 0x01) == 0) {
+- // handle SPI wait states
+- phy->tx_buf[0] = 0;
+-
+- for (i = 0; i < TPM_RETRY; i++) {
+- spi_xfer.len = 1;
+- spi_message_init(&m);
+- spi_message_add_tail(&spi_xfer, &m);
+- ret = spi_sync_locked(phy->spi_device, &m);
+- if (ret < 0)
++
++ while (len) {
++ transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE);
++
++ phy->tx_buf[0] = direction | (transfer_len - 1);
++ phy->tx_buf[1] = 0xd4;
++ phy->tx_buf[2] = addr >> 8;
++ phy->tx_buf[3] = addr;
++
++ memset(&spi_xfer, 0, sizeof(spi_xfer));
++ spi_xfer.tx_buf = phy->tx_buf;
++ spi_xfer.rx_buf = phy->rx_buf;
++ spi_xfer.len = 4;
++ spi_xfer.cs_change = 1;
++
++ spi_message_init(&m);
++ spi_message_add_tail(&spi_xfer, &m);
++ ret = spi_sync_locked(phy->spi_device, &m);
++ if (ret < 0)
++ goto exit;
++
++ if ((phy->rx_buf[3] & 0x01) == 0) {
++ // handle SPI wait states
++ phy->tx_buf[0] = 0;
++
++ for (i = 0; i < TPM_RETRY; i++) {
++ spi_xfer.len = 1;
++ spi_message_init(&m);
++ spi_message_add_tail(&spi_xfer, &m);
++ ret = spi_sync_locked(phy->spi_device, &m);
++ if (ret < 0)
++ goto exit;
++ if (phy->rx_buf[0] & 0x01)
++ break;
++ }
++
++ if (i == TPM_RETRY) {
++ ret = -ETIMEDOUT;
+ goto exit;
+- if (phy->rx_buf[0] & 0x01)
+- break;
++ }
+ }
+
+- if (i == TPM_RETRY) {
+- ret = -ETIMEDOUT;
+- goto exit;
++ spi_xfer.cs_change = 0;
++ spi_xfer.len = transfer_len;
++
++ if (direction) {
++ spi_xfer.tx_buf = NULL;
++ spi_xfer.rx_buf = buffer;
++ } else {
++ spi_xfer.tx_buf = buffer;
++ spi_xfer.rx_buf = NULL;
+ }
+- }
+
+- spi_xfer.cs_change = 0;
+- spi_xfer.len = len;
++ spi_message_init(&m);
++ spi_message_add_tail(&spi_xfer, &m);
++ ret = spi_sync_locked(phy->spi_device, &m);
++ if (ret < 0)
++ goto exit;
+
+- if (direction) {
+- spi_xfer.tx_buf = NULL;
+- spi_xfer.rx_buf = buffer;
+- } else {
+- spi_xfer.tx_buf = buffer;
+- spi_xfer.rx_buf = NULL;
++ len -= transfer_len;
++ buffer += transfer_len;
+ }
+
+- spi_message_init(&m);
+- spi_message_add_tail(&spi_xfer, &m);
+- ret = spi_sync_locked(phy->spi_device, &m);
+-
+ exit:
+ spi_bus_unlock(phy->spi_device->master);
+ return ret;
--- /dev/null
+From f848f2143ae42dc0918400039257a893835254d1 Mon Sep 17 00:00:00 2001
+From: Peter Huewe <peter.huewe@infineon.com>
+Date: Thu, 2 Mar 2017 13:03:11 +0000
+Subject: tpm_tis_spi: Use single function to transfer data
+
+From: Peter Huewe <peter.huewe@infineon.com>
+
+commit f848f2143ae42dc0918400039257a893835254d1 upstream.
+
+The algorithm for sending data to the TPM is mostly identical to the
+algorithm for receiving data from the TPM, so a single function is
+sufficient to handle both cases.
+
+This is a prequisite for all the other fixes, so we don't have to fix
+everything twice (send/receive)
+
+v2: u16 instead of u8 for the length.
+Fixes: 0edbfea537d1 ("tpm/tpm_tis_spi: Add support for spi phy")
+Signed-off-by: Alexander Steffen <Alexander.Steffen@infineon.com>
+Signed-off-by: Peter Huewe <peter.huewe@infineon.com>
+Reviewed-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Tested-by: Benoit Houyere <benoit.houyere@st.com>
+Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/tpm/tpm_tis_spi.c | 87 +++++++++++------------------------------
+ 1 file changed, 24 insertions(+), 63 deletions(-)
+
+--- a/drivers/char/tpm/tpm_tis_spi.c
++++ b/drivers/char/tpm/tpm_tis_spi.c
+@@ -47,8 +47,8 @@ struct tpm_tis_spi_phy {
+ struct tpm_tis_data priv;
+ struct spi_device *spi_device;
+
+- u8 tx_buf[MAX_SPI_FRAMESIZE + 4];
+- u8 rx_buf[MAX_SPI_FRAMESIZE + 4];
++ u8 tx_buf[4];
++ u8 rx_buf[4];
+ };
+
+ static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *data)
+@@ -56,8 +56,8 @@ static inline struct tpm_tis_spi_phy *to
+ return container_of(data, struct tpm_tis_spi_phy, priv);
+ }
+
+-static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr,
+- u16 len, u8 *result)
++static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
++ u8 *buffer, u8 direction)
+ {
+ struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
+ int ret, i;
+@@ -66,17 +66,17 @@ static int tpm_tis_spi_read_bytes(struct
+ .tx_buf = phy->tx_buf,
+ .rx_buf = phy->rx_buf,
+ .len = 4,
++ .cs_change = 1,
+ };
+
+ if (len > MAX_SPI_FRAMESIZE)
+ return -ENOMEM;
+
+- phy->tx_buf[0] = 0x80 | (len - 1);
++ phy->tx_buf[0] = direction | (len - 1);
+ phy->tx_buf[1] = 0xd4;
+- phy->tx_buf[2] = (addr >> 8) & 0xFF;
+- phy->tx_buf[3] = addr & 0xFF;
++ phy->tx_buf[2] = addr >> 8;
++ phy->tx_buf[3] = addr;
+
+- spi_xfer.cs_change = 1;
+ spi_message_init(&m);
+ spi_message_add_tail(&spi_xfer, &m);
+
+@@ -85,7 +85,7 @@ static int tpm_tis_spi_read_bytes(struct
+ if (ret < 0)
+ goto exit;
+
+- memset(phy->tx_buf, 0, len);
++ phy->tx_buf[0] = 0;
+
+ /* According to TCG PTP specification, if there is no TPM present at
+ * all, then the design has a weak pull-up on MISO. If a TPM is not
+@@ -103,7 +103,14 @@ static int tpm_tis_spi_read_bytes(struct
+
+ spi_xfer.cs_change = 0;
+ spi_xfer.len = len;
+- spi_xfer.rx_buf = result;
++
++ if (direction) {
++ spi_xfer.tx_buf = NULL;
++ spi_xfer.rx_buf = buffer;
++ } else {
++ spi_xfer.tx_buf = buffer;
++ spi_xfer.rx_buf = NULL;
++ }
+
+ spi_message_init(&m);
+ spi_message_add_tail(&spi_xfer, &m);
+@@ -114,62 +121,16 @@ exit:
+ return ret;
+ }
+
++static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr,
++ u16 len, u8 *result)
++{
++ return tpm_tis_spi_transfer(data, addr, len, result, 0x80);
++}
++
+ static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr,
+ u16 len, u8 *value)
+ {
+- struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
+- int ret, i;
+- struct spi_message m;
+- struct spi_transfer spi_xfer = {
+- .tx_buf = phy->tx_buf,
+- .rx_buf = phy->rx_buf,
+- .len = 4,
+- };
+-
+- if (len > MAX_SPI_FRAMESIZE)
+- return -ENOMEM;
+-
+- phy->tx_buf[0] = len - 1;
+- phy->tx_buf[1] = 0xd4;
+- phy->tx_buf[2] = (addr >> 8) & 0xFF;
+- phy->tx_buf[3] = addr & 0xFF;
+-
+- spi_xfer.cs_change = 1;
+- spi_message_init(&m);
+- spi_message_add_tail(&spi_xfer, &m);
+-
+- spi_bus_lock(phy->spi_device->master);
+- ret = spi_sync_locked(phy->spi_device, &m);
+- if (ret < 0)
+- goto exit;
+-
+- memset(phy->tx_buf, 0, len);
+-
+- /* According to TCG PTP specification, if there is no TPM present at
+- * all, then the design has a weak pull-up on MISO. If a TPM is not
+- * present, a pull-up on MISO means that the SB controller sees a 1,
+- * and will latch in 0xFF on the read.
+- */
+- for (i = 0; (phy->rx_buf[0] & 0x01) == 0 && i < TPM_RETRY; i++) {
+- spi_xfer.len = 1;
+- spi_message_init(&m);
+- spi_message_add_tail(&spi_xfer, &m);
+- ret = spi_sync_locked(phy->spi_device, &m);
+- if (ret < 0)
+- goto exit;
+- }
+-
+- spi_xfer.len = len;
+- spi_xfer.tx_buf = value;
+- spi_xfer.cs_change = 0;
+- spi_xfer.tx_buf = value;
+- spi_message_init(&m);
+- spi_message_add_tail(&spi_xfer, &m);
+- ret = spi_sync_locked(phy->spi_device, &m);
+-
+-exit:
+- spi_bus_unlock(phy->spi_device->master);
+- return ret;
++ return tpm_tis_spi_transfer(data, addr, len, value, 0);
+ }
+
+ static int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result)
--- /dev/null
+From 31574d321c70f6d3b40fe98f9b2eafd9a903fef9 Mon Sep 17 00:00:00 2001
+From: "Hon Ching \\(Vicky\\) Lo" <honclo@linux.vnet.ibm.com>
+Date: Wed, 15 Mar 2017 01:28:07 -0400
+Subject: vTPM: Fix missing NULL check
+
+From: Hon Ching \(Vicky\) Lo <honclo@linux.vnet.ibm.com>
+
+commit 31574d321c70f6d3b40fe98f9b2eafd9a903fef9 upstream.
+
+The current code passes the address of tpm_chip as the argument to
+dev_get_drvdata() without prior NULL check in
+tpm_ibmvtpm_get_desired_dma. This resulted an oops during kernel
+boot when vTPM is enabled in Power partition configured in active
+memory sharing mode.
+
+The vio_driver's get_desired_dma() is called before the probe(), which
+for vtpm is tpm_ibmvtpm_probe, and it's this latter function that
+initializes the driver and set data. Attempting to get data before
+the probe() caused the problem.
+
+This patch adds a NULL check to the tpm_ibmvtpm_get_desired_dma.
+
+fixes: 9e0d39d8a6a0 ("tpm: Remove useless priv field in struct tpm_vendor_specific")
+Signed-off-by: Hon Ching(Vicky) Lo <honclo@linux.vnet.ibm.com>
+Reviewed-by: Jarkko Sakkine <jarkko.sakkinen@linux.intel.com>
+Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/tpm/tpm_ibmvtpm.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/drivers/char/tpm/tpm_ibmvtpm.c
++++ b/drivers/char/tpm/tpm_ibmvtpm.c
+@@ -299,6 +299,8 @@ static int tpm_ibmvtpm_remove(struct vio
+ }
+
+ kfree(ibmvtpm);
++ /* For tpm_ibmvtpm_get_desired_dma */
++ dev_set_drvdata(&vdev->dev, NULL);
+
+ return 0;
+ }
+@@ -313,14 +315,16 @@ static int tpm_ibmvtpm_remove(struct vio
+ static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev)
+ {
+ struct tpm_chip *chip = dev_get_drvdata(&vdev->dev);
+- struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
++ struct ibmvtpm_dev *ibmvtpm;
+
+ /*
+ * ibmvtpm initializes at probe time, so the data we are
+ * asking for may not be set yet. Estimate that 4K required
+ * for TCE-mapped buffer in addition to CRQ.
+ */
+- if (!ibmvtpm)
++ if (chip)
++ ibmvtpm = dev_get_drvdata(&chip->dev);
++ else
+ return CRQ_RES_BUF_SIZE + PAGE_SIZE;
+
+ return CRQ_RES_BUF_SIZE + ibmvtpm->rtce_size;