--- /dev/null
+From 10aa14527f458e9867cf3d2cc6b8cb0f6704448b Mon Sep 17 00:00:00 2001
+From: Tomas Bortoli <tomasbortoli@gmail.com>
+Date: Fri, 27 Jul 2018 13:05:58 +0200
+Subject: 9p: fix multiple NULL-pointer-dereferences
+
+From: Tomas Bortoli <tomasbortoli@gmail.com>
+
+commit 10aa14527f458e9867cf3d2cc6b8cb0f6704448b upstream.
+
+Added checks to prevent GPFs from raising.
+
+Link: http://lkml.kernel.org/r/20180727110558.5479-1-tomasbortoli@gmail.com
+Signed-off-by: Tomas Bortoli <tomasbortoli@gmail.com>
+Reported-by: syzbot+1a262da37d3bead15c39@syzkaller.appspotmail.com
+Cc: stable@vger.kernel.org
+Signed-off-by: Dominique Martinet <dominique.martinet@cea.fr>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/9p/trans_fd.c | 5 ++++-
+ net/9p/trans_rdma.c | 3 +++
+ net/9p/trans_virtio.c | 3 +++
+ net/9p/trans_xen.c | 3 +++
+ 4 files changed, 13 insertions(+), 1 deletion(-)
+
+--- a/net/9p/trans_fd.c
++++ b/net/9p/trans_fd.c
+@@ -951,7 +951,7 @@ p9_fd_create_tcp(struct p9_client *clien
+ if (err < 0)
+ return err;
+
+- if (valid_ipaddr4(addr) < 0)
++ if (addr == NULL || valid_ipaddr4(addr) < 0)
+ return -EINVAL;
+
+ csocket = NULL;
+@@ -1001,6 +1001,9 @@ p9_fd_create_unix(struct p9_client *clie
+
+ csocket = NULL;
+
++ if (addr == NULL)
++ return -EINVAL;
++
+ if (strlen(addr) >= UNIX_PATH_MAX) {
+ pr_err("%s (%d): address too long: %s\n",
+ __func__, task_pid_nr(current), addr);
+--- a/net/9p/trans_rdma.c
++++ b/net/9p/trans_rdma.c
+@@ -646,6 +646,9 @@ rdma_create_trans(struct p9_client *clie
+ struct rdma_conn_param conn_param;
+ struct ib_qp_init_attr qp_attr;
+
++ if (addr == NULL)
++ return -EINVAL;
++
+ /* Parse the transport specific mount options */
+ err = parse_opts(args, &opts);
+ if (err < 0)
+--- a/net/9p/trans_virtio.c
++++ b/net/9p/trans_virtio.c
+@@ -650,6 +650,9 @@ p9_virtio_create(struct p9_client *clien
+ int ret = -ENOENT;
+ int found = 0;
+
++ if (devname == NULL)
++ return -EINVAL;
++
+ mutex_lock(&virtio_9p_lock);
+ list_for_each_entry(chan, &virtio_chan_list, chan_list) {
+ if (!strncmp(devname, chan->tag, chan->tag_len) &&
+--- a/net/9p/trans_xen.c
++++ b/net/9p/trans_xen.c
+@@ -95,6 +95,9 @@ static int p9_xen_create(struct p9_clien
+ {
+ struct xen_9pfs_front_priv *priv;
+
++ if (addr == NULL)
++ return -EINVAL;
++
+ read_lock(&xen_9pfs_lock);
+ list_for_each_entry(priv, &xen_9pfs_devs, list) {
+ if (!strcmp(priv->tag, addr)) {
--- /dev/null
+From d28c756caee6e414d9ba367d0b92da24145af2a8 Mon Sep 17 00:00:00 2001
+From: Chirantan Ekbote <chirantan@chromium.org>
+Date: Mon, 16 Jul 2018 17:35:29 -0700
+Subject: 9p/net: Fix zero-copy path in the 9p virtio transport
+
+From: Chirantan Ekbote <chirantan@chromium.org>
+
+commit d28c756caee6e414d9ba367d0b92da24145af2a8 upstream.
+
+The zero-copy optimization when reading or writing large chunks of data
+is quite useful. However, the 9p messages created through the zero-copy
+write path have an incorrect message size: it should be the size of the
+header + size of the data being written but instead it's just the size
+of the header.
+
+This only works if the server ignores the size field of the message and
+otherwise breaks the framing of the protocol. Fix this by re-writing the
+message size field with the correct value.
+
+Tested by running `dd if=/dev/zero of=out bs=4k count=1` inside a
+virtio-9p mount.
+
+Link: http://lkml.kernel.org/r/20180717003529.114368-1-chirantan@chromium.org
+Signed-off-by: Chirantan Ekbote <chirantan@chromium.org>
+Reviewed-by: Greg Kurz <groug@kaod.org>
+Tested-by: Greg Kurz <groug@kaod.org>
+Cc: Dylan Reid <dgreid@chromium.org>
+Cc: Guenter Roeck <groeck@chromium.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Dominique Martinet <dominique.martinet@cea.fr>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/9p/trans_virtio.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/net/9p/trans_virtio.c
++++ b/net/9p/trans_virtio.c
+@@ -406,6 +406,7 @@ p9_virtio_zc_request(struct p9_client *c
+ p9_debug(P9_DEBUG_TRANS, "virtio request\n");
+
+ if (uodata) {
++ __le32 sz;
+ int n = p9_get_mapped_pages(chan, &out_pages, uodata,
+ outlen, &offs, &need_drop);
+ if (n < 0)
+@@ -416,6 +417,12 @@ p9_virtio_zc_request(struct p9_client *c
+ memcpy(&req->tc->sdata[req->tc->size - 4], &v, 4);
+ outlen = n;
+ }
++ /* The size field of the message must include the length of the
++ * header and the length of the data. We didn't actually know
++ * the length of the data until this point so add it in now.
++ */
++ sz = cpu_to_le32(req->tc->size + outlen);
++ memcpy(&req->tc->sdata[0], &sz, sizeof(sz));
+ } else if (uidata) {
+ int n = p9_get_mapped_pages(chan, &in_pages, uidata,
+ inlen, &offs, &need_drop);
--- /dev/null
+From 23cba9cbde0bba05d772b335fe5f66aa82b9ad19 Mon Sep 17 00:00:00 2001
+From: jiangyiwen <jiangyiwen@huawei.com>
+Date: Fri, 3 Aug 2018 12:11:34 +0800
+Subject: 9p/virtio: fix off-by-one error in sg list bounds check
+
+From: jiangyiwen <jiangyiwen@huawei.com>
+
+commit 23cba9cbde0bba05d772b335fe5f66aa82b9ad19 upstream.
+
+Because the value of limit is VIRTQUEUE_NUM, if index is equal to
+limit, it will cause sg array out of bounds, so correct the judgement
+of BUG_ON.
+
+Link: http://lkml.kernel.org/r/5B63D5F6.6080109@huawei.com
+Signed-off-by: Yiwen Jiang <jiangyiwen@huawei.com>
+Reported-By: Dan Carpenter <dan.carpenter@oracle.com>
+Acked-by: Jun Piao <piaojun@huawei.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Dominique Martinet <dominique.martinet@cea.fr>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/9p/trans_virtio.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/9p/trans_virtio.c
++++ b/net/9p/trans_virtio.c
+@@ -189,7 +189,7 @@ static int pack_sg_list(struct scatterli
+ s = rest_of_page(data);
+ if (s > count)
+ s = count;
+- BUG_ON(index > limit);
++ BUG_ON(index >= limit);
+ /* Make sure we don't terminate early. */
+ sg_unmark_end(&sg[index]);
+ sg_set_buf(&sg[index++], data, s);
+@@ -234,6 +234,7 @@ pack_sg_list_p(struct scatterlist *sg, i
+ s = PAGE_SIZE - data_off;
+ if (s > count)
+ s = count;
++ BUG_ON(index >= limit);
+ /* Make sure we don't terminate early. */
+ sg_unmark_end(&sg[index]);
+ sg_set_page(&sg[index++], pdata[i++], s, data_off);
--- /dev/null
+From 54648cf1ec2d7f4b6a71767799c45676a138ca24 Mon Sep 17 00:00:00 2001
+From: xiao jin <jin.xiao@intel.com>
+Date: Mon, 30 Jul 2018 14:11:12 +0800
+Subject: block: blk_init_allocated_queue() set q->fq as NULL in the fail case
+
+From: xiao jin <jin.xiao@intel.com>
+
+commit 54648cf1ec2d7f4b6a71767799c45676a138ca24 upstream.
+
+We find the memory use-after-free issue in __blk_drain_queue()
+on the kernel 4.14. After read the latest kernel 4.18-rc6 we
+think it has the same problem.
+
+Memory is allocated for q->fq in the blk_init_allocated_queue().
+If the elevator init function called with error return, it will
+run into the fail case to free the q->fq.
+
+Then the __blk_drain_queue() uses the same memory after the free
+of the q->fq, it will lead to the unpredictable event.
+
+The patch is to set q->fq as NULL in the fail case of
+blk_init_allocated_queue().
+
+Fixes: commit 7c94e1c157a2 ("block: introduce blk_flush_queue to drive flush machinery")
+Cc: <stable@vger.kernel.org>
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Reviewed-by: Bart Van Assche <bart.vanassche@wdc.com>
+Signed-off-by: xiao jin <jin.xiao@intel.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/blk-core.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -1025,6 +1025,7 @@ out_exit_flush_rq:
+ q->exit_rq_fn(q, q->fq->flush_rq);
+ out_free_flush_queue:
+ blk_free_flush_queue(q->fq);
++ q->fq = NULL;
+ return -ENOMEM;
+ }
+ EXPORT_SYMBOL(blk_init_allocated_queue);
--- /dev/null
+From b233f127042dba991229e3882c6217c80492f6ef Mon Sep 17 00:00:00 2001
+From: Ming Lei <ming.lei@redhat.com>
+Date: Mon, 30 Jul 2018 20:02:19 +0800
+Subject: block: really disable runtime-pm for blk-mq
+
+From: Ming Lei <ming.lei@redhat.com>
+
+commit b233f127042dba991229e3882c6217c80492f6ef upstream.
+
+Runtime PM isn't ready for blk-mq yet, and commit 765e40b675a9 ("block:
+disable runtime-pm for blk-mq") tried to disable it. Unfortunately,
+it can't take effect in that way since user space still can switch
+it on via 'echo auto > /sys/block/sdN/device/power/control'.
+
+This patch disables runtime-pm for blk-mq really by pm_runtime_disable()
+and fixes all kinds of PM related kernel crash.
+
+Cc: Tomas Janousek <tomi@nomi.cz>
+Cc: Przemek Socha <soprwa@gmail.com>
+Cc: Alan Stern <stern@rowland.harvard.edu>
+Cc: <stable@vger.kernel.org>
+Reviewed-by: Bart Van Assche <bart.vanassche@wdc.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Tested-by: Patrick Steinhardt <ps@pks.im>
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/blk-core.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -3459,9 +3459,11 @@ EXPORT_SYMBOL(blk_finish_plug);
+ */
+ void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
+ {
+- /* not support for RQF_PM and ->rpm_status in blk-mq yet */
+- if (q->mq_ops)
++ /* Don't enable runtime PM for blk-mq until it is ready */
++ if (q->mq_ops) {
++ pm_runtime_disable(dev);
+ return;
++ }
+
+ q->dev = dev;
+ q->rpm_status = RPM_ACTIVE;
--- /dev/null
+From ef6cb5f1a048fdf91ccee6d63d2bfa293338502d Mon Sep 17 00:00:00 2001
+From: Vaibhav Jain <vaibhav@linux.ibm.com>
+Date: Wed, 4 Jul 2018 20:58:33 +0530
+Subject: cxl: Fix wrong comparison in cxl_adapter_context_get()
+
+From: Vaibhav Jain <vaibhav@linux.ibm.com>
+
+commit ef6cb5f1a048fdf91ccee6d63d2bfa293338502d upstream.
+
+Function atomic_inc_unless_negative() returns a bool to indicate
+success/failure. However cxl_adapter_context_get() wrongly compares
+the return value against '>=0' which will always be true. The patch
+fixes this comparison to '==0' there by also fixing this compile time
+warning:
+
+ drivers/misc/cxl/main.c:290 cxl_adapter_context_get()
+ warn: 'atomic_inc_unless_negative(&adapter->contexts_num)' is unsigned
+
+Fixes: 70b565bbdb91 ("cxl: Prevent adapter reset if an active context exists")
+Cc: stable@vger.kernel.org # v4.9+
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Vaibhav Jain <vaibhav@linux.ibm.com>
+Acked-by: Andrew Donnellan <andrew.donnellan@au1.ibm.com>
+Acked-by: Frederic Barrat <fbarrat@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/misc/cxl/main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/misc/cxl/main.c
++++ b/drivers/misc/cxl/main.c
+@@ -287,7 +287,7 @@ int cxl_adapter_context_get(struct cxl *
+ int rc;
+
+ rc = atomic_inc_unless_negative(&adapter->contexts_num);
+- return rc >= 0 ? 0 : -EBUSY;
++ return rc ? 0 : -EBUSY;
+ }
+
+ void cxl_adapter_context_put(struct cxl *adapter)
--- /dev/null
+From fd2fa95416188a767a63979296fa3e169a9ef5ec Mon Sep 17 00:00:00 2001
+From: Mike Snitzer <snitzer@redhat.com>
+Date: Thu, 2 Aug 2018 16:08:52 -0400
+Subject: dm cache metadata: save in-core policy_hint_size to on-disk superblock
+
+From: Mike Snitzer <snitzer@redhat.com>
+
+commit fd2fa95416188a767a63979296fa3e169a9ef5ec upstream.
+
+policy_hint_size starts as 0 during __write_initial_superblock(). It
+isn't until the policy is loaded that policy_hint_size is set in-core
+(cmd->policy_hint_size). But it never got recorded in the on-disk
+superblock because __commit_transaction() didn't deal with transfering
+the in-core cmd->policy_hint_size to the on-disk superblock.
+
+The in-core cmd->policy_hint_size gets initialized by metadata_open()'s
+__begin_transaction_flags() which re-reads all superblock fields.
+Because the superblock's policy_hint_size was never properly stored, when
+the cache was created, hints_array_available() would always return false
+when re-activating a previously created cache. This means
+__load_mappings() always considered the hints invalid and never made use
+of the hints (these hints served to optimize).
+
+Another detremental side-effect of this oversight is the cache_check
+utility would fail with: "invalid hint width: 0"
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-cache-metadata.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/md/dm-cache-metadata.c
++++ b/drivers/md/dm-cache-metadata.c
+@@ -362,7 +362,7 @@ static int __write_initial_superblock(st
+ disk_super->version = cpu_to_le32(cmd->version);
+ memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name));
+ memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version));
+- disk_super->policy_hint_size = 0;
++ disk_super->policy_hint_size = cpu_to_le32(0);
+
+ __copy_sm_root(cmd, disk_super);
+
+@@ -700,6 +700,7 @@ static int __commit_transaction(struct d
+ disk_super->policy_version[0] = cpu_to_le32(cmd->policy_version[0]);
+ disk_super->policy_version[1] = cpu_to_le32(cmd->policy_version[1]);
+ disk_super->policy_version[2] = cpu_to_le32(cmd->policy_version[2]);
++ disk_super->policy_hint_size = cpu_to_le32(cmd->policy_hint_size);
+
+ disk_super->read_hits = cpu_to_le32(cmd->stats.read_hits);
+ disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses);
--- /dev/null
+From 5b1fe7bec8a8d0cc547a22e7ddc2bd59acd67de4 Mon Sep 17 00:00:00 2001
+From: Ilya Dryomov <idryomov@gmail.com>
+Date: Thu, 9 Aug 2018 12:38:28 +0200
+Subject: dm cache metadata: set dirty on all cache blocks after a crash
+
+From: Ilya Dryomov <idryomov@gmail.com>
+
+commit 5b1fe7bec8a8d0cc547a22e7ddc2bd59acd67de4 upstream.
+
+Quoting Documentation/device-mapper/cache.txt:
+
+ The 'dirty' state for a cache block changes far too frequently for us
+ to keep updating it on the fly. So we treat it as a hint. In normal
+ operation it will be written when the dm device is suspended. If the
+ system crashes all cache blocks will be assumed dirty when restarted.
+
+This got broken in commit f177940a8091 ("dm cache metadata: switch to
+using the new cursor api for loading metadata") in 4.9, which removed
+the code that consulted cmd->clean_when_opened (CLEAN_SHUTDOWN on-disk
+flag) when loading cache blocks. This results in data corruption on an
+unclean shutdown with dirty cache blocks on the fast device. After the
+crash those blocks are considered clean and may get evicted from the
+cache at any time. This can be demonstrated by doing a lot of reads
+to trigger individual evictions, but uncache is more predictable:
+
+ ### Disable auto-activation in lvm.conf to be able to do uncache in
+ ### time (i.e. see uncache doing flushing) when the fix is applied.
+
+ # xfs_io -d -c 'pwrite -b 4M -S 0xaa 0 1G' /dev/vdb
+ # vgcreate vg_cache /dev/vdb /dev/vdc
+ # lvcreate -L 1G -n lv_slowdev vg_cache /dev/vdb
+ # lvcreate -L 512M -n lv_cachedev vg_cache /dev/vdc
+ # lvcreate -L 256M -n lv_metadev vg_cache /dev/vdc
+ # lvconvert --type cache-pool --cachemode writeback vg_cache/lv_cachedev --poolmetadata vg_cache/lv_metadev
+ # lvconvert --type cache vg_cache/lv_slowdev --cachepool vg_cache/lv_cachedev
+ # xfs_io -d -c 'pwrite -b 4M -S 0xbb 0 512M' /dev/mapper/vg_cache-lv_slowdev
+ # xfs_io -d -c 'pread -v 254M 512' /dev/mapper/vg_cache-lv_slowdev | head -n 2
+ 0fe00000: bb bb bb bb bb bb bb bb bb bb bb bb bb bb bb bb ................
+ 0fe00010: bb bb bb bb bb bb bb bb bb bb bb bb bb bb bb bb ................
+ # dmsetup status vg_cache-lv_slowdev
+ 0 2097152 cache 8 27/65536 128 8192/8192 1 100 0 0 0 8192 7065 2 metadata2 writeback 2 migration_threshold 2048 smq 0 rw -
+ ^^^^
+ 7065 * 64k = 441M yet to be written to the slow device
+ # echo b >/proc/sysrq-trigger
+
+ # vgchange -ay vg_cache
+ # xfs_io -d -c 'pread -v 254M 512' /dev/mapper/vg_cache-lv_slowdev | head -n 2
+ 0fe00000: bb bb bb bb bb bb bb bb bb bb bb bb bb bb bb bb ................
+ 0fe00010: bb bb bb bb bb bb bb bb bb bb bb bb bb bb bb bb ................
+ # lvconvert --uncache vg_cache/lv_slowdev
+ Flushing 0 blocks for cache vg_cache/lv_slowdev.
+ Logical volume "lv_cachedev" successfully removed
+ Logical volume vg_cache/lv_slowdev is not cached.
+ # xfs_io -d -c 'pread -v 254M 512' /dev/mapper/vg_cache-lv_slowdev | head -n 2
+ 0fe00000: aa aa aa aa aa aa aa aa aa aa aa aa aa aa aa aa ................
+ 0fe00010: aa aa aa aa aa aa aa aa aa aa aa aa aa aa aa aa ................
+
+This is the case with both v1 and v2 cache pool metatata formats.
+
+After applying this patch:
+
+ # vgchange -ay vg_cache
+ # xfs_io -d -c 'pread -v 254M 512' /dev/mapper/vg_cache-lv_slowdev | head -n 2
+ 0fe00000: bb bb bb bb bb bb bb bb bb bb bb bb bb bb bb bb ................
+ 0fe00010: bb bb bb bb bb bb bb bb bb bb bb bb bb bb bb bb ................
+ # lvconvert --uncache vg_cache/lv_slowdev
+ Flushing 3724 blocks for cache vg_cache/lv_slowdev.
+ ...
+ Flushing 71 blocks for cache vg_cache/lv_slowdev.
+ Logical volume "lv_cachedev" successfully removed
+ Logical volume vg_cache/lv_slowdev is not cached.
+ # xfs_io -d -c 'pread -v 254M 512' /dev/mapper/vg_cache-lv_slowdev | head -n 2
+ 0fe00000: bb bb bb bb bb bb bb bb bb bb bb bb bb bb bb bb ................
+ 0fe00010: bb bb bb bb bb bb bb bb bb bb bb bb bb bb bb bb ................
+
+Cc: stable@vger.kernel.org
+Fixes: f177940a8091 ("dm cache metadata: switch to using the new cursor api for loading metadata")
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-cache-metadata.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+--- a/drivers/md/dm-cache-metadata.c
++++ b/drivers/md/dm-cache-metadata.c
+@@ -1322,6 +1322,7 @@ static int __load_mapping_v1(struct dm_c
+
+ dm_oblock_t oblock;
+ unsigned flags;
++ bool dirty = true;
+
+ dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le);
+ memcpy(&mapping, mapping_value_le, sizeof(mapping));
+@@ -1332,8 +1333,10 @@ static int __load_mapping_v1(struct dm_c
+ dm_array_cursor_get_value(hint_cursor, (void **) &hint_value_le);
+ memcpy(&hint, hint_value_le, sizeof(hint));
+ }
++ if (cmd->clean_when_opened)
++ dirty = flags & M_DIRTY;
+
+- r = fn(context, oblock, to_cblock(cb), flags & M_DIRTY,
++ r = fn(context, oblock, to_cblock(cb), dirty,
+ le32_to_cpu(hint), hints_valid);
+ if (r) {
+ DMERR("policy couldn't load cache block %llu",
+@@ -1361,7 +1364,7 @@ static int __load_mapping_v2(struct dm_c
+
+ dm_oblock_t oblock;
+ unsigned flags;
+- bool dirty;
++ bool dirty = true;
+
+ dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le);
+ memcpy(&mapping, mapping_value_le, sizeof(mapping));
+@@ -1372,8 +1375,9 @@ static int __load_mapping_v2(struct dm_c
+ dm_array_cursor_get_value(hint_cursor, (void **) &hint_value_le);
+ memcpy(&hint, hint_value_le, sizeof(hint));
+ }
++ if (cmd->clean_when_opened)
++ dirty = dm_bitset_cursor_get_value(dirty_cursor);
+
+- dirty = dm_bitset_cursor_get_value(dirty_cursor);
+ r = fn(context, oblock, to_cblock(cb), dirty,
+ le32_to_cpu(hint), hints_valid);
+ if (r) {
--- /dev/null
+From bc9e9cf0401f18e33b78d4c8a518661b8346baf7 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Fri, 10 Aug 2018 11:23:56 -0400
+Subject: dm crypt: don't decrease device limits
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit bc9e9cf0401f18e33b78d4c8a518661b8346baf7 upstream.
+
+dm-crypt should only increase device limits, it should not decrease them.
+
+This fixes a bug where the user could creates a crypt device with 1024
+sector size on the top of scsi device that had 4096 logical block size.
+The limit 4096 would be lost and the user could incorrectly send
+1024-I/Os to the crypt device.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-crypt.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -3072,11 +3072,11 @@ static void crypt_io_hints(struct dm_tar
+ */
+ limits->max_segment_size = PAGE_SIZE;
+
+- if (cc->sector_size != (1 << SECTOR_SHIFT)) {
+- limits->logical_block_size = cc->sector_size;
+- limits->physical_block_size = cc->sector_size;
+- blk_limits_io_min(limits, cc->sector_size);
+- }
++ limits->logical_block_size =
++ max_t(unsigned short, limits->logical_block_size, cc->sector_size);
++ limits->physical_block_size =
++ max_t(unsigned, limits->physical_block_size, cc->sector_size);
++ limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size);
+ }
+
+ static struct target_type crypt_target = {
--- /dev/null
+From c21b16392701543d61e366dca84e15fe7f0cf0cf Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Tue, 3 Jul 2018 20:13:25 +0200
+Subject: dm integrity: change 'suspending' variable from bool to int
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit c21b16392701543d61e366dca84e15fe7f0cf0cf upstream.
+
+Early alpha processors can't write a byte or short atomically - they
+read 8 bytes, modify the byte or two bytes in registers and write back
+8 bytes.
+
+The modification of the variable "suspending" may race with
+modification of the variable "failed". Fix this by changing
+"suspending" to an int.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-integrity.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -177,7 +177,7 @@ struct dm_integrity_c {
+ __u8 sectors_per_block;
+
+ unsigned char mode;
+- bool suspending;
++ int suspending;
+
+ int failed;
+
+@@ -2209,7 +2209,7 @@ static void dm_integrity_postsuspend(str
+
+ del_timer_sync(&ic->autocommit_timer);
+
+- ic->suspending = true;
++ WRITE_ONCE(ic->suspending, 1);
+
+ queue_work(ic->commit_wq, &ic->commit_work);
+ drain_workqueue(ic->commit_wq);
+@@ -2219,7 +2219,7 @@ static void dm_integrity_postsuspend(str
+ dm_integrity_flush_buffers(ic);
+ }
+
+- ic->suspending = false;
++ WRITE_ONCE(ic->suspending, 0);
+
+ BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
+
--- /dev/null
+From 75294442d896f2767be34f75aca7cc2b0d01301f Mon Sep 17 00:00:00 2001
+From: Hou Tao <houtao1@huawei.com>
+Date: Thu, 2 Aug 2018 16:18:24 +0800
+Subject: dm thin: stop no_space_timeout worker when switching to write-mode
+
+From: Hou Tao <houtao1@huawei.com>
+
+commit 75294442d896f2767be34f75aca7cc2b0d01301f upstream.
+
+Now both check_for_space() and do_no_space_timeout() will read & write
+pool->pf.error_if_no_space. If these functions run concurrently, as
+shown in the following case, the default setting of "queue_if_no_space"
+can get lost.
+
+precondition:
+ * error_if_no_space = false (aka "queue_if_no_space")
+ * pool is in Out-of-Data-Space (OODS) mode
+ * no_space_timeout worker has been queued
+
+CPU 0: CPU 1:
+// delete a thin device
+process_delete_mesg()
+// check_for_space() invoked by commit()
+set_pool_mode(pool, PM_WRITE)
+ pool->pf.error_if_no_space = \
+ pt->requested_pf.error_if_no_space
+
+ // timeout, pool is still in OODS mode
+ do_no_space_timeout
+ // "queue_if_no_space" config is lost
+ pool->pf.error_if_no_space = true
+ pool->pf.mode = new_mode
+
+Fix it by stopping no_space_timeout worker when switching to write mode.
+
+Fixes: bcc696fac11f ("dm thin: stay in out-of-data-space mode once no_space_timeout expires")
+Cc: stable@vger.kernel.org
+Signed-off-by: Hou Tao <houtao1@huawei.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-thin.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -2514,6 +2514,8 @@ static void set_pool_mode(struct pool *p
+ case PM_WRITE:
+ if (old_mode != new_mode)
+ notify_of_pool_mode_change(pool, "write");
++ if (old_mode == PM_OUT_OF_DATA_SPACE)
++ cancel_delayed_work_sync(&pool->no_space_timeout);
+ pool->out_of_data_space = false;
+ pool->pf.error_if_no_space = pt->requested_pf.error_if_no_space;
+ dm_pool_metadata_read_write(pool->pmd);
--- /dev/null
+From d3b26dd7cb0e3433bfd3c1d4dcf74c6039bb49fb Mon Sep 17 00:00:00 2001
+From: Dexuan Cui <decui@microsoft.com>
+Date: Thu, 2 Aug 2018 03:08:23 +0000
+Subject: Drivers: hv: vmbus: Reset the channel callback in vmbus_onoffer_rescind()
+
+From: Dexuan Cui <decui@microsoft.com>
+
+commit d3b26dd7cb0e3433bfd3c1d4dcf74c6039bb49fb upstream.
+
+Before setting channel->rescind in vmbus_rescind_cleanup(), we should make
+sure the channel callback won't run any more, otherwise a high-level
+driver like pci_hyperv, which may be infinitely waiting for the host VSP's
+response and notices the channel has been rescinded, can't safely give
+up: e.g., in hv_pci_protocol_negotiation() -> wait_for_response(), it's
+unsafe to exit from wait_for_response() and proceed with the on-stack
+variable "comp_pkt" popped. The issue was originally spotted by
+Michael Kelley <mikelley@microsoft.com>.
+
+In vmbus_close_internal(), the patch also minimizes the range protected by
+disabling/enabling channel->callback_event: we don't really need that for
+the whole function.
+
+Signed-off-by: Dexuan Cui <decui@microsoft.com>
+Reviewed-by: Michael Kelley <mikelley@microsoft.com>
+Cc: stable@vger.kernel.org
+Cc: K. Y. Srinivasan <kys@microsoft.com>
+Cc: Stephen Hemminger <sthemmin@microsoft.com>
+Cc: Michael Kelley <mikelley@microsoft.com>
+Signed-off-by: K. Y. Srinivasan <kys@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/hv/channel.c | 40 ++++++++++++++++++++++++----------------
+ drivers/hv/channel_mgmt.c | 6 ++++++
+ include/linux/hyperv.h | 2 ++
+ 3 files changed, 32 insertions(+), 16 deletions(-)
+
+--- a/drivers/hv/channel.c
++++ b/drivers/hv/channel.c
+@@ -541,11 +541,8 @@ static void reset_channel_cb(void *arg)
+ channel->onchannel_callback = NULL;
+ }
+
+-static int vmbus_close_internal(struct vmbus_channel *channel)
++void vmbus_reset_channel_cb(struct vmbus_channel *channel)
+ {
+- struct vmbus_channel_close_channel *msg;
+- int ret;
+-
+ /*
+ * vmbus_on_event(), running in the per-channel tasklet, can race
+ * with vmbus_close_internal() in the case of SMP guest, e.g., when
+@@ -555,6 +552,29 @@ static int vmbus_close_internal(struct v
+ */
+ tasklet_disable(&channel->callback_event);
+
++ channel->sc_creation_callback = NULL;
++
++ /* Stop the callback asap */
++ if (channel->target_cpu != get_cpu()) {
++ put_cpu();
++ smp_call_function_single(channel->target_cpu, reset_channel_cb,
++ channel, true);
++ } else {
++ reset_channel_cb(channel);
++ put_cpu();
++ }
++
++ /* Re-enable tasklet for use on re-open */
++ tasklet_enable(&channel->callback_event);
++}
++
++static int vmbus_close_internal(struct vmbus_channel *channel)
++{
++ struct vmbus_channel_close_channel *msg;
++ int ret;
++
++ vmbus_reset_channel_cb(channel);
++
+ /*
+ * In case a device driver's probe() fails (e.g.,
+ * util_probe() -> vmbus_open() returns -ENOMEM) and the device is
+@@ -568,16 +588,6 @@ static int vmbus_close_internal(struct v
+ }
+
+ channel->state = CHANNEL_OPEN_STATE;
+- channel->sc_creation_callback = NULL;
+- /* Stop callback and cancel the timer asap */
+- if (channel->target_cpu != get_cpu()) {
+- put_cpu();
+- smp_call_function_single(channel->target_cpu, reset_channel_cb,
+- channel, true);
+- } else {
+- reset_channel_cb(channel);
+- put_cpu();
+- }
+
+ /* Send a closing message */
+
+@@ -620,8 +630,6 @@ static int vmbus_close_internal(struct v
+ get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
+
+ out:
+- /* re-enable tasklet for use on re-open */
+- tasklet_enable(&channel->callback_event);
+ return ret;
+ }
+
+--- a/drivers/hv/channel_mgmt.c
++++ b/drivers/hv/channel_mgmt.c
+@@ -882,6 +882,12 @@ static void vmbus_onoffer_rescind(struct
+ }
+
+ /*
++ * Before setting channel->rescind in vmbus_rescind_cleanup(), we
++ * should make sure the channel callback is not running any more.
++ */
++ vmbus_reset_channel_cb(channel);
++
++ /*
+ * Now wait for offer handling to complete.
+ */
+ vmbus_rescind_cleanup(channel);
+--- a/include/linux/hyperv.h
++++ b/include/linux/hyperv.h
+@@ -1026,6 +1026,8 @@ extern int vmbus_establish_gpadl(struct
+ extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
+ u32 gpadl_handle);
+
++void vmbus_reset_channel_cb(struct vmbus_channel *channel);
++
+ extern int vmbus_recvpacket(struct vmbus_channel *channel,
+ void *buffer,
+ u32 bufferlen,
--- /dev/null
+From c11c7bfd213495784b22ef82a69b6489f8d0092f Mon Sep 17 00:00:00 2001
+From: Matthew Auld <matthew.auld@intel.com>
+Date: Wed, 2 May 2018 20:50:21 +0100
+Subject: drm/i915/userptr: reject zero user_size
+
+From: Matthew Auld <matthew.auld@intel.com>
+
+commit c11c7bfd213495784b22ef82a69b6489f8d0092f upstream.
+
+Operating on a zero sized GEM userptr object will lead to explosions.
+
+Fixes: 5cc9ed4b9a7a ("drm/i915: Introduce mapping of user pages into video memory (userptr) ioctl")
+Testcase: igt/gem_userptr_blits/input-checking
+Signed-off-by: Matthew Auld <matthew.auld@intel.com>
+Cc: Chris Wilson <chris@chris-wilson.co.uk>
+Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Link: https://patchwork.freedesktop.org/patch/msgid/20180502195021.30900-1-matthew.auld@intel.com
+Cc: Loic <hackurx@opensec.fr>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/i915_gem_userptr.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
++++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
+@@ -782,6 +782,9 @@ i915_gem_userptr_ioctl(struct drm_device
+ I915_USERPTR_UNSYNCHRONIZED))
+ return -EINVAL;
+
++ if (!args->user_size)
++ return -EINVAL;
++
+ if (offset_in_page(args->user_ptr | args->user_size))
+ return -EINVAL;
+
--- /dev/null
+From 8a9dbb779fe882325b9a0238494a7afaff2eb444 Mon Sep 17 00:00:00 2001
+From: Chanwoo Choi <cw00.choi@samsung.com>
+Date: Thu, 14 Jun 2018 11:16:29 +0900
+Subject: extcon: Release locking when sending the notification of connector state
+
+From: Chanwoo Choi <cw00.choi@samsung.com>
+
+commit 8a9dbb779fe882325b9a0238494a7afaff2eb444 upstream.
+
+Previously, extcon used the spinlock before calling the notifier_call_chain
+to prevent the scheduled out of task and to prevent the notification delay.
+When spinlock is locked for sending the notification, deadlock issue
+occured on the side of extcon consumer device. To fix this issue,
+extcon consumer device should always use the work. it is always not
+reasonable to use work.
+
+To fix this issue on extcon consumer device, release locking when sending
+the notification of connector state.
+
+Fixes: ab11af049f88 ("extcon: Add the synchronization extcon APIs to support the notification")
+Cc: stable@vger.kernel.org
+Cc: Roger Quadros <rogerq@ti.com>
+Cc: Kishon Vijay Abraham I <kishon@ti.com>
+Signed-off-by: Chanwoo Choi <cw00.choi@samsung.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/extcon/extcon.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/extcon/extcon.c
++++ b/drivers/extcon/extcon.c
+@@ -433,8 +433,8 @@ int extcon_sync(struct extcon_dev *edev,
+ return index;
+
+ spin_lock_irqsave(&edev->lock, flags);
+-
+ state = !!(edev->state & BIT(index));
++ spin_unlock_irqrestore(&edev->lock, flags);
+
+ /*
+ * Call functions in a raw notifier chain for the specific one
+@@ -448,6 +448,7 @@ int extcon_sync(struct extcon_dev *edev,
+ */
+ raw_notifier_call_chain(&edev->nh_all, state, edev);
+
++ spin_lock_irqsave(&edev->lock, flags);
+ /* This could be in interrupt handler */
+ prop_buf = (char *)get_zeroed_page(GFP_ATOMIC);
+ if (!prop_buf) {
--- /dev/null
+From ea93102f32244e3f45c8b26260be77ed0cc1d16c Mon Sep 17 00:00:00 2001
+From: Yannik Sembritzki <yannik@sembritzki.me>
+Date: Thu, 16 Aug 2018 14:05:23 +0100
+Subject: Fix kexec forbidding kernels signed with keys in the secondary keyring to boot
+
+From: Yannik Sembritzki <yannik@sembritzki.me>
+
+commit ea93102f32244e3f45c8b26260be77ed0cc1d16c upstream.
+
+The split of .system_keyring into .builtin_trusted_keys and
+.secondary_trusted_keys broke kexec, thereby preventing kernels signed by
+keys which are now in the secondary keyring from being kexec'd.
+
+Fix this by passing VERIFY_USE_SECONDARY_KEYRING to
+verify_pefile_signature().
+
+Fixes: d3bfe84129f6 ("certs: Add a secondary system keyring that can be added to dynamically")
+Signed-off-by: Yannik Sembritzki <yannik@sembritzki.me>
+Signed-off-by: David Howells <dhowells@redhat.com>
+Cc: kexec@lists.infradead.org
+Cc: keyrings@vger.kernel.org
+Cc: linux-security-module@vger.kernel.org
+Cc: stable@kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/kexec-bzimage64.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/kexec-bzimage64.c
++++ b/arch/x86/kernel/kexec-bzimage64.c
+@@ -532,7 +532,7 @@ static int bzImage64_cleanup(void *loade
+ static int bzImage64_verify_sig(const char *kernel, unsigned long kernel_len)
+ {
+ return verify_pefile_signature(kernel, kernel_len,
+- NULL,
++ VERIFY_USE_SECONDARY_KEYRING,
+ VERIFYING_KEXEC_PE_SIGNATURE);
+ }
+ #endif
--- /dev/null
+From 3111784bee81591ea2815011688d28b65df03627 Mon Sep 17 00:00:00 2001
+From: piaojun <piaojun@huawei.com>
+Date: Wed, 25 Jul 2018 11:13:16 +0800
+Subject: fs/9p/xattr.c: catch the error of p9_client_clunk when setting xattr failed
+
+From: piaojun <piaojun@huawei.com>
+
+commit 3111784bee81591ea2815011688d28b65df03627 upstream.
+
+In my testing, v9fs_fid_xattr_set will return successfully even if the
+backend ext4 filesystem has no space to store xattr key-value. That will
+cause inconsistent behavior between front end and back end. The reason is
+that lsetxattr will be triggered by p9_client_clunk, and unfortunately we
+did not catch the error. This patch will catch the error to notify upper
+caller.
+
+p9_client_clunk (in 9p)
+ p9_client_rpc(clnt, P9_TCLUNK, "d", fid->fid);
+ v9fs_clunk (in qemu)
+ put_fid
+ free_fid
+ v9fs_xattr_fid_clunk
+ v9fs_co_lsetxattr
+ s->ops->lsetxattr
+ ext4_xattr_user_set (in host ext4 filesystem)
+
+Link: http://lkml.kernel.org/r/5B57EACC.2060900@huawei.com
+Signed-off-by: Jun Piao <piaojun@huawei.com>
+Cc: Eric Van Hensbergen <ericvh@gmail.com>
+Cc: Ron Minnich <rminnich@sandia.gov>
+Cc: Latchesar Ionkov <lucho@ionkov.net>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Dominique Martinet <dominique.martinet@cea.fr>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/9p/xattr.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/fs/9p/xattr.c
++++ b/fs/9p/xattr.c
+@@ -105,7 +105,7 @@ int v9fs_fid_xattr_set(struct p9_fid *fi
+ {
+ struct kvec kvec = {.iov_base = (void *)value, .iov_len = value_len};
+ struct iov_iter from;
+- int retval;
++ int retval, err;
+
+ iov_iter_kvec(&from, WRITE | ITER_KVEC, &kvec, 1, value_len);
+
+@@ -126,7 +126,9 @@ int v9fs_fid_xattr_set(struct p9_fid *fi
+ retval);
+ else
+ p9_client_write(fid, 0, &from, &retval);
+- p9_client_clunk(fid);
++ err = p9_client_clunk(fid);
++ if (!retval && err)
++ retval = err;
+ return retval;
+ }
+
--- /dev/null
+From 995250959d22fc341b5424e3343b0ce5df672461 Mon Sep 17 00:00:00 2001
+From: Bart Van Assche <bart.vanassche@wdc.com>
+Date: Mon, 2 Jul 2018 14:08:18 -0700
+Subject: ib_srpt: Fix a use-after-free in srpt_close_ch()
+
+From: Bart Van Assche <bart.vanassche@wdc.com>
+
+commit 995250959d22fc341b5424e3343b0ce5df672461 upstream.
+
+Avoid that KASAN reports the following:
+
+BUG: KASAN: use-after-free in srpt_close_ch+0x4f/0x1b0 [ib_srpt]
+Read of size 4 at addr ffff880151180cb8 by task check/4681
+
+CPU: 15 PID: 4681 Comm: check Not tainted 4.18.0-rc2-dbg+ #4
+Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.0.0-prebuilt.qemu-project.org 04/01/2014
+Call Trace:
+ dump_stack+0xa4/0xf5
+ print_address_description+0x6f/0x270
+ kasan_report+0x241/0x360
+ __asan_load4+0x78/0x80
+ srpt_close_ch+0x4f/0x1b0 [ib_srpt]
+ srpt_set_enabled+0xf7/0x1e0 [ib_srpt]
+ srpt_tpg_enable_store+0xb8/0x120 [ib_srpt]
+ configfs_write_file+0x14e/0x1d0 [configfs]
+ __vfs_write+0xd2/0x3b0
+ vfs_write+0x101/0x270
+ ksys_write+0xab/0x120
+ __x64_sys_write+0x43/0x50
+ do_syscall_64+0x77/0x230
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+Fixes: aaf45bd83eba ("IB/srpt: Detect session shutdown reliably")
+Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/ulp/srpt/ib_srpt.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
+@@ -1713,8 +1713,7 @@ static bool srpt_close_ch(struct srpt_rd
+ int ret;
+
+ if (!srpt_set_ch_state(ch, CH_DRAINING)) {
+- pr_debug("%s-%d: already closed\n", ch->sess_name,
+- ch->qp->qp_num);
++ pr_debug("%s: already closed\n", ch->sess_name);
+ return false;
+ }
+
--- /dev/null
+From 5a4e33c1c53ae7d4425f7d94e60e4458a37b349e Mon Sep 17 00:00:00 2001
+From: Lars-Peter Clausen <lars@metafoo.de>
+Date: Mon, 25 Jun 2018 11:03:07 +0300
+Subject: iio: ad9523: Fix displayed phase
+
+From: Lars-Peter Clausen <lars@metafoo.de>
+
+commit 5a4e33c1c53ae7d4425f7d94e60e4458a37b349e upstream.
+
+Fix the displayed phase for the ad9523 driver. Currently the most
+significant decimal place is dropped and all other digits are shifted one
+to the left. This is due to a multiplication by 10, which is not necessary,
+so remove it.
+
+Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
+Signed-off-by: Alexandru Ardelean <alexandru.ardelean@analog.com>
+Fixes: cd1678f9632 ("iio: frequency: New driver for AD9523 SPI Low Jitter Clock Generator")
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iio/frequency/ad9523.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/iio/frequency/ad9523.c
++++ b/drivers/iio/frequency/ad9523.c
+@@ -642,7 +642,7 @@ static int ad9523_read_raw(struct iio_de
+ code = (AD9523_CLK_DIST_DIV_PHASE_REV(ret) * 3141592) /
+ AD9523_CLK_DIST_DIV_REV(ret);
+ *val = code / 1000000;
+- *val2 = (code % 1000000) * 10;
++ *val2 = code % 1000000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
--- /dev/null
+From 9a5094ca29ea9b1da301b31fd377c0c0c4c23034 Mon Sep 17 00:00:00 2001
+From: Lars-Peter Clausen <lars@metafoo.de>
+Date: Fri, 27 Jul 2018 09:42:45 +0300
+Subject: iio: ad9523: Fix return value for ad952x_store()
+
+From: Lars-Peter Clausen <lars@metafoo.de>
+
+commit 9a5094ca29ea9b1da301b31fd377c0c0c4c23034 upstream.
+
+A sysfs write callback function needs to either return the number of
+consumed characters or an error.
+
+The ad952x_store() function currently returns 0 if the input value was "0",
+this will signal that no characters have been consumed and the function
+will be called repeatedly in a loop indefinitely. Fix this by returning
+number of supplied characters to indicate that the whole input string has
+been consumed.
+
+Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
+Signed-off-by: Alexandru Ardelean <alexandru.ardelean@analog.com>
+Fixes: cd1678f96329 ("iio: frequency: New driver for AD9523 SPI Low Jitter Clock Generator")
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iio/frequency/ad9523.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/iio/frequency/ad9523.c
++++ b/drivers/iio/frequency/ad9523.c
+@@ -508,7 +508,7 @@ static ssize_t ad9523_store(struct devic
+ return ret;
+
+ if (!state)
+- return 0;
++ return len;
+
+ mutex_lock(&indio_dev->mlock);
+ switch ((u32)this_attr->address) {
--- /dev/null
+From c5b974bee9d2ceae4c441ae5a01e498c2674e100 Mon Sep 17 00:00:00 2001
+From: "Gustavo A. R. Silva" <gustavo@embeddedor.com>
+Date: Sat, 7 Jul 2018 12:44:01 -0500
+Subject: iio: sca3000: Fix missing return in switch
+
+From: Gustavo A. R. Silva <gustavo@embeddedor.com>
+
+commit c5b974bee9d2ceae4c441ae5a01e498c2674e100 upstream.
+
+The IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY case is missing a
+return and will fall through to the default case and errorenously
+return -EINVAL.
+
+Fix this by adding in missing *return ret*.
+
+Fixes: 626f971b5b07 ("staging:iio:accel:sca3000 Add write support to the low pass filter control")
+Reported-by: Jonathan Cameron <jic23@kernel.org>
+Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iio/accel/sca3000.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/iio/accel/sca3000.c
++++ b/drivers/iio/accel/sca3000.c
+@@ -797,6 +797,7 @@ static int sca3000_write_raw(struct iio_
+ mutex_lock(&st->lock);
+ ret = sca3000_write_3db_freq(st, val);
+ mutex_unlock(&st->lock);
++ return ret;
+ default:
+ return -EINVAL;
+ }
--- /dev/null
+From 7444a8092906ed44c09459780c56ba57043e39b1 Mon Sep 17 00:00:00 2001
+From: Daniel Mack <daniel@zonque.org>
+Date: Wed, 27 Jun 2018 20:58:45 +0200
+Subject: libertas: fix suspend and resume for SDIO connected cards
+
+From: Daniel Mack <daniel@zonque.org>
+
+commit 7444a8092906ed44c09459780c56ba57043e39b1 upstream.
+
+Prior to commit 573185cc7e64 ("mmc: core: Invoke sdio func driver's PM
+callbacks from the sdio bus"), the MMC core used to call into the power
+management functions of SDIO clients itself and removed the card if the
+return code was non-zero. IOW, the mmc handled errors gracefully and didn't
+upchain them to the pm core.
+
+Since this change, the mmc core relies on generic power management
+functions which treat all errors as a reason to cancel the suspend
+immediately. This causes suspend attempts to fail when the libertas
+driver is loaded.
+
+To fix this, power down the card explicitly in if_sdio_suspend() when we
+know we're about to lose power and return success. Also set a flag in these
+cases, and power up the card again in if_sdio_resume().
+
+Fixes: 573185cc7e64 ("mmc: core: Invoke sdio func driver's PM callbacks from the sdio bus")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Daniel Mack <daniel@zonque.org>
+Reviewed-by: Chris Ball <chris@printf.net>
+Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/marvell/libertas/dev.h | 1
+ drivers/net/wireless/marvell/libertas/if_sdio.c | 30 +++++++++++++++++++-----
+ 2 files changed, 25 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/wireless/marvell/libertas/dev.h
++++ b/drivers/net/wireless/marvell/libertas/dev.h
+@@ -104,6 +104,7 @@ struct lbs_private {
+ u8 fw_ready;
+ u8 surpriseremoved;
+ u8 setup_fw_on_resume;
++ u8 power_up_on_resume;
+ int (*hw_host_to_card) (struct lbs_private *priv, u8 type, u8 *payload, u16 nb);
+ void (*reset_card) (struct lbs_private *priv);
+ int (*power_save) (struct lbs_private *priv);
+--- a/drivers/net/wireless/marvell/libertas/if_sdio.c
++++ b/drivers/net/wireless/marvell/libertas/if_sdio.c
+@@ -1290,15 +1290,23 @@ static void if_sdio_remove(struct sdio_f
+ static int if_sdio_suspend(struct device *dev)
+ {
+ struct sdio_func *func = dev_to_sdio_func(dev);
+- int ret;
+ struct if_sdio_card *card = sdio_get_drvdata(func);
++ struct lbs_private *priv = card->priv;
++ int ret;
+
+ mmc_pm_flag_t flags = sdio_get_host_pm_caps(func);
++ priv->power_up_on_resume = false;
+
+ /* If we're powered off anyway, just let the mmc layer remove the
+ * card. */
+- if (!lbs_iface_active(card->priv))
+- return -ENOSYS;
++ if (!lbs_iface_active(priv)) {
++ if (priv->fw_ready) {
++ priv->power_up_on_resume = true;
++ if_sdio_power_off(card);
++ }
++
++ return 0;
++ }
+
+ dev_info(dev, "%s: suspend: PM flags = 0x%x\n",
+ sdio_func_id(func), flags);
+@@ -1306,9 +1314,14 @@ static int if_sdio_suspend(struct device
+ /* If we aren't being asked to wake on anything, we should bail out
+ * and let the SD stack power down the card.
+ */
+- if (card->priv->wol_criteria == EHS_REMOVE_WAKEUP) {
++ if (priv->wol_criteria == EHS_REMOVE_WAKEUP) {
+ dev_info(dev, "Suspend without wake params -- powering down card\n");
+- return -ENOSYS;
++ if (priv->fw_ready) {
++ priv->power_up_on_resume = true;
++ if_sdio_power_off(card);
++ }
++
++ return 0;
+ }
+
+ if (!(flags & MMC_PM_KEEP_POWER)) {
+@@ -1321,7 +1334,7 @@ static int if_sdio_suspend(struct device
+ if (ret)
+ return ret;
+
+- ret = lbs_suspend(card->priv);
++ ret = lbs_suspend(priv);
+ if (ret)
+ return ret;
+
+@@ -1336,6 +1349,11 @@ static int if_sdio_resume(struct device
+
+ dev_info(dev, "%s: resume: we're back\n", sdio_func_id(func));
+
++ if (card->priv->power_up_on_resume) {
++ if_sdio_power_on(card);
++ wait_event(card->pwron_waitq, card->priv->fw_ready);
++ }
++
+ ret = lbs_resume(card->priv);
+
+ return ret;
--- /dev/null
+From 6e9df95b76cad18f7b217bdad7bb8a26d63b8c47 Mon Sep 17 00:00:00 2001
+From: Kamalesh Babulal <kamalesh@linux.vnet.ibm.com>
+Date: Fri, 20 Jul 2018 15:16:42 +0530
+Subject: livepatch: Validate module/old func name length
+
+From: Kamalesh Babulal <kamalesh@linux.vnet.ibm.com>
+
+commit 6e9df95b76cad18f7b217bdad7bb8a26d63b8c47 upstream.
+
+livepatch module author can pass module name/old function name with more
+than the defined character limit. With obj->name length greater than
+MODULE_NAME_LEN, the livepatch module gets loaded but waits forever on
+the module specified by obj->name to be loaded. It also populates a /sys
+directory with an untruncated object name.
+
+In the case of funcs->old_name length greater then KSYM_NAME_LEN, it
+would not match against any of the symbol table entries. Instead loop
+through the symbol table comparing them against a nonexisting function,
+which can be avoided.
+
+The same issues apply, to misspelled/incorrect names. At least gatekeep
+the modules with over the limit string length, by checking for their
+length during livepatch module registration.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Kamalesh Babulal <kamalesh@linux.vnet.ibm.com>
+Acked-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/livepatch/core.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/kernel/livepatch/core.c
++++ b/kernel/livepatch/core.c
+@@ -605,6 +605,9 @@ static int klp_init_func(struct klp_obje
+ if (!func->old_name || !func->new_func)
+ return -EINVAL;
+
++ if (strlen(func->old_name) >= KSYM_NAME_LEN)
++ return -EINVAL;
++
+ INIT_LIST_HEAD(&func->stack_node);
+ func->patched = false;
+ func->transition = false;
+@@ -678,6 +681,9 @@ static int klp_init_object(struct klp_pa
+ if (!obj->funcs)
+ return -EINVAL;
+
++ if (klp_is_module(obj) && strlen(obj->name) >= MODULE_NAME_LEN)
++ return -EINVAL;
++
+ obj->patched = false;
+ obj->mod = NULL;
+
--- /dev/null
+From 3512a18cbd8d09e22a790540cb9624c3c49827ba Mon Sep 17 00:00:00 2001
+From: "Gustavo A. R. Silva" <gustavo@embeddedor.com>
+Date: Thu, 26 Jul 2018 12:11:39 -0500
+Subject: mailbox: xgene-slimpro: Fix potential NULL pointer dereference
+
+From: Gustavo A. R. Silva <gustavo@embeddedor.com>
+
+commit 3512a18cbd8d09e22a790540cb9624c3c49827ba upstream.
+
+There is a potential execution path in which function
+platform_get_resource() returns NULL. If this happens,
+we will end up having a NULL pointer dereference.
+
+Fix this by replacing devm_ioremap with devm_ioremap_resource,
+which has the NULL check and the memory region request.
+
+This code was detected with the help of Coccinelle.
+
+Cc: stable@vger.kernel.org
+Fixes: f700e84f417b ("mailbox: Add support for APM X-Gene platform mailbox driver")
+Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
+Signed-off-by: Jassi Brar <jaswinder.singh@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mailbox/mailbox-xgene-slimpro.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/mailbox/mailbox-xgene-slimpro.c
++++ b/drivers/mailbox/mailbox-xgene-slimpro.c
+@@ -195,9 +195,9 @@ static int slimpro_mbox_probe(struct pla
+ platform_set_drvdata(pdev, ctx);
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+- mb_base = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
+- if (!mb_base)
+- return -ENOMEM;
++ mb_base = devm_ioremap_resource(&pdev->dev, regs);
++ if (IS_ERR(mb_base))
++ return PTR_ERR(mb_base);
+
+ /* Setup mailbox links */
+ for (i = 0; i < MBOX_CNT; i++) {
--- /dev/null
+From 1831af092308aa5a59ae61e47494e441c8be6b93 Mon Sep 17 00:00:00 2001
+From: Javier Martinez Canillas <javierm@redhat.com>
+Date: Sun, 10 Jun 2018 16:43:02 -0400
+Subject: media: Revert "[media] tvp5150: fix pad format frame height"
+
+From: Javier Martinez Canillas <javierm@redhat.com>
+
+commit 1831af092308aa5a59ae61e47494e441c8be6b93 upstream.
+
+This reverts commit 0866df8dffd514185bfab0d205db76e4c02cf1e4.
+
+The v4l uAPI documentation [0] makes clear that in the case of interlaced
+video (i.e: field is V4L2_FIELD_ALTERNATE) the height refers to the number
+of lines in the field and not the number of lines in the full frame (which
+is twice the field height for interlaced formats).
+
+So the original height calculation was correct, and it shouldn't had been
+changed by the mentioned commit.
+
+[0]:https://linuxtv.org/downloads/v4l-dvb-apis/uapi/v4l/subdev-formats.html
+
+Fixes: 0866df8dffd5 ("[media] tvp5150: fix pad format frame height")
+
+Signed-off-by: Javier Martinez Canillas <javierm@redhat.com>
+Cc: <stable@vger.kernel.org> # for v4.12 and up
+Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/media/i2c/tvp5150.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/media/i2c/tvp5150.c
++++ b/drivers/media/i2c/tvp5150.c
+@@ -871,7 +871,7 @@ static int tvp5150_fill_fmt(struct v4l2_
+ f = &format->format;
+
+ f->width = decoder->rect.width;
+- f->height = decoder->rect.height;
++ f->height = decoder->rect.height / 2;
+
+ f->code = MEDIA_BUS_FMT_UYVY8_2X8;
+ f->field = V4L2_FIELD_ALTERNATE;
--- /dev/null
+From 6afebb70ee7a4bde106dc1a875e7ac7997248f84 Mon Sep 17 00:00:00 2001
+From: Rafael David Tinoco <rafael.tinoco@linaro.org>
+Date: Fri, 6 Jul 2018 14:28:33 -0300
+Subject: mfd: hi655x: Fix regmap area declared size for hi655x
+
+From: Rafael David Tinoco <rafael.tinoco@linaro.org>
+
+commit 6afebb70ee7a4bde106dc1a875e7ac7997248f84 upstream.
+
+Fixes https://bugs.linaro.org/show_bug.cgi?id=3903
+
+LTP Functional tests have caused a bad paging request when triggering
+the regmap_read_debugfs() logic of the device PMIC Hi6553 (reading
+regmap/f8000000.pmic/registers file during read_all test):
+
+Unable to handle kernel paging request at virtual address ffff0
+[ffff00000984e000] pgd=0000000077ffe803, pud=0000000077ffd803,0
+Internal error: Oops: 96000007 [#1] SMP
+...
+Hardware name: HiKey Development Board (DT)
+...
+Call trace:
+ regmap_mmio_read8+0x24/0x40
+ regmap_mmio_read+0x48/0x70
+ _regmap_bus_reg_read+0x38/0x48
+ _regmap_read+0x68/0x170
+ regmap_read+0x50/0x78
+ regmap_read_debugfs+0x1a0/0x308
+ regmap_map_read_file+0x48/0x58
+ full_proxy_read+0x68/0x98
+ __vfs_read+0x48/0x80
+ vfs_read+0x94/0x150
+ SyS_read+0x6c/0xd8
+ el0_svc_naked+0x30/0x34
+Code: aa1e03e0 d503201f f9400280 8b334000 (39400000)
+
+Investigations have showed that, when triggered by debugfs read()
+handler, the mmio regmap logic was reading a bigger (16k) register area
+than the one mapped by devm_ioremap_resource() during hi655x-pmic probe
+time (4k).
+
+This commit changes hi655x's max register, according to HW specs, to be
+the same as the one declared in the pmic device in hi6220's dts, fixing
+the issue.
+
+Cc: <stable@vger.kernel.org> #v4.9 #v4.14 #v4.16 #v4.17
+Signed-off-by: Rafael David Tinoco <rafael.tinoco@linaro.org>
+Signed-off-by: Lee Jones <lee.jones@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mfd/hi655x-pmic.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/mfd/hi655x-pmic.c
++++ b/drivers/mfd/hi655x-pmic.c
+@@ -49,7 +49,7 @@ static struct regmap_config hi655x_regma
+ .reg_bits = 32,
+ .reg_stride = HI655X_STRIDE,
+ .val_bits = 8,
+- .max_register = HI655X_BUS_ADDR(0xFFF),
++ .max_register = HI655X_BUS_ADDR(0x400) - HI655X_STRIDE,
+ };
+
+ static struct resource pwrkey_resources[] = {
--- /dev/null
+From 9faf870e559a710c44e747ba20383ea82d8ac5d2 Mon Sep 17 00:00:00 2001
+From: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
+Date: Wed, 22 Aug 2018 21:28:01 +0300
+Subject: mmc: renesas_sdhi_internal_dmac: fix #define RST_RESERVED_BITS
+
+From: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
+
+commit 9faf870e559a710c44e747ba20383ea82d8ac5d2 upstream.
+
+The DM_CM_RST register actually has bits 0-31 defaulting to 1s and bits
+32-63 defaulting to 0s -- fix off-by-one in #define RST_RESERVED_BITS.
+
+Signed-off-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
+Reviewed-by: Wolfram Sang <wsa+renesas@sang-engineering.com>
+Fixes: 2a68ea7896e3 ("mmc: renesas-sdhi: add support for R-Car Gen3 SDHI DMAC")
+Cc: stable@vger.kernel.org # v4.14+
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/renesas_sdhi_internal_dmac.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
++++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+@@ -44,7 +44,7 @@
+ /* DM_CM_RST */
+ #define RST_DTRANRST1 BIT(9)
+ #define RST_DTRANRST0 BIT(8)
+-#define RST_RESERVED_BITS GENMASK_ULL(32, 0)
++#define RST_RESERVED_BITS GENMASK_ULL(31, 0)
+
+ /* DM_CM_INFO1 and DM_CM_INFO1_MASK */
+ #define INFO1_CLEAR 0
--- /dev/null
+From ac74f87c789af40936a80131c4759f3e72579c3a Mon Sep 17 00:00:00 2001
+From: Alexander Aring <aring@mojatatu.com>
+Date: Sat, 14 Jul 2018 12:52:10 -0400
+Subject: net: 6lowpan: fix reserved space for single frames
+
+From: Alexander Aring <aring@mojatatu.com>
+
+commit ac74f87c789af40936a80131c4759f3e72579c3a upstream.
+
+This patch fixes patch add handling to take care tail and headroom for
+single 6lowpan frames. We need to be sure we have a skb with the right
+head and tailroom for single frames. This patch do it by using
+skb_copy_expand() if head and tailroom is not enough allocated by upper
+layer.
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=195059
+Reported-by: David Palma <david.palma@ntnu.no>
+Reported-by: Rabi Narayan Sahoo <rabinarayans0828@gmail.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Alexander Aring <aring@mojatatu.com>
+Signed-off-by: Stefan Schmidt <stefan@datenfreihafen.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/ieee802154/6lowpan/tx.c | 21 ++++++++++++++++++---
+ 1 file changed, 18 insertions(+), 3 deletions(-)
+
+--- a/net/ieee802154/6lowpan/tx.c
++++ b/net/ieee802154/6lowpan/tx.c
+@@ -265,9 +265,24 @@ netdev_tx_t lowpan_xmit(struct sk_buff *
+ /* We must take a copy of the skb before we modify/replace the ipv6
+ * header as the header could be used elsewhere
+ */
+- skb = skb_unshare(skb, GFP_ATOMIC);
+- if (!skb)
+- return NET_XMIT_DROP;
++ if (unlikely(skb_headroom(skb) < ldev->needed_headroom ||
++ skb_tailroom(skb) < ldev->needed_tailroom)) {
++ struct sk_buff *nskb;
++
++ nskb = skb_copy_expand(skb, ldev->needed_headroom,
++ ldev->needed_tailroom, GFP_ATOMIC);
++ if (likely(nskb)) {
++ consume_skb(skb);
++ skb = nskb;
++ } else {
++ kfree_skb(skb);
++ return NET_XMIT_DROP;
++ }
++ } else {
++ skb = skb_unshare(skb, GFP_ATOMIC);
++ if (!skb)
++ return NET_XMIT_DROP;
++ }
+
+ ret = lowpan_header(skb, ldev, &dgram_size, &dgram_offset);
+ if (ret < 0) {
--- /dev/null
+From 7913690dcc5e18e235769fd87c34143072f5dbea Mon Sep 17 00:00:00 2001
+From: Tomas Bortoli <tomasbortoli@gmail.com>
+Date: Tue, 10 Jul 2018 00:29:43 +0200
+Subject: net/9p/client.c: version pointer uninitialized
+
+From: Tomas Bortoli <tomasbortoli@gmail.com>
+
+commit 7913690dcc5e18e235769fd87c34143072f5dbea upstream.
+
+The p9_client_version() does not initialize the version pointer. If the
+call to p9pdu_readf() returns an error and version has not been allocated
+in p9pdu_readf(), then the program will jump to the "error" label and will
+try to free the version pointer. If version is not initialized, free()
+will be called with uninitialized, garbage data and will provoke a crash.
+
+Link: http://lkml.kernel.org/r/20180709222943.19503-1-tomasbortoli@gmail.com
+Signed-off-by: Tomas Bortoli <tomasbortoli@gmail.com>
+Reported-by: syzbot+65c6b72f284a39d416b4@syzkaller.appspotmail.com
+Reviewed-by: Jun Piao <piaojun@huawei.com>
+Reviewed-by: Yiwen Jiang <jiangyiwen@huawei.com>
+Cc: Eric Van Hensbergen <ericvh@gmail.com>
+Cc: Ron Minnich <rminnich@sandia.gov>
+Cc: Latchesar Ionkov <lucho@ionkov.net>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Dominique Martinet <dominique.martinet@cea.fr>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/9p/client.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/9p/client.c
++++ b/net/9p/client.c
+@@ -955,7 +955,7 @@ static int p9_client_version(struct p9_c
+ {
+ int err = 0;
+ struct p9_req_t *req;
+- char *version;
++ char *version = NULL;
+ int msize;
+
+ p9_debug(P9_DEBUG_9P, ">>> TVERSION msize %d protocol %d\n",
--- /dev/null
+From 430ac66eb4c5b5c4eb846b78ebf65747510b30f1 Mon Sep 17 00:00:00 2001
+From: Tomas Bortoli <tomasbortoli@gmail.com>
+Date: Fri, 20 Jul 2018 11:27:30 +0200
+Subject: net/9p/trans_fd.c: fix race-condition by flushing workqueue before the kfree()
+
+From: Tomas Bortoli <tomasbortoli@gmail.com>
+
+commit 430ac66eb4c5b5c4eb846b78ebf65747510b30f1 upstream.
+
+The patch adds the flush in p9_mux_poll_stop() as it the function used by
+p9_conn_destroy(), in turn called by p9_fd_close() to stop the async
+polling associated with the data regarding the connection.
+
+Link: http://lkml.kernel.org/r/20180720092730.27104-1-tomasbortoli@gmail.com
+Signed-off-by: Tomas Bortoli <tomasbortoli@gmail.com>
+Reported-by: syzbot+39749ed7d9ef6dfb23f6@syzkaller.appspotmail.com
+To: Eric Van Hensbergen <ericvh@gmail.com>
+To: Ron Minnich <rminnich@sandia.gov>
+To: Latchesar Ionkov <lucho@ionkov.net>
+Cc: Yiwen Jiang <jiangyiwen@huwei.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Dominique Martinet <dominique.martinet@cea.fr>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/9p/trans_fd.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/net/9p/trans_fd.c
++++ b/net/9p/trans_fd.c
+@@ -185,6 +185,8 @@ static void p9_mux_poll_stop(struct p9_c
+ spin_lock_irqsave(&p9_poll_lock, flags);
+ list_del_init(&m->poll_pending_link);
+ spin_unlock_irqrestore(&p9_poll_lock, flags);
++
++ flush_work(&p9_poll_work);
+ }
+
+ /**
--- /dev/null
+From f9c52831133050c6b82aa8b6831c92da2bbf2a0b Mon Sep 17 00:00:00 2001
+From: Alexander Aring <aring@mojatatu.com>
+Date: Mon, 2 Jul 2018 16:32:03 -0400
+Subject: net: mac802154: tx: expand tailroom if necessary
+
+From: Alexander Aring <aring@mojatatu.com>
+
+commit f9c52831133050c6b82aa8b6831c92da2bbf2a0b upstream.
+
+This patch is necessary if case of AF_PACKET or other socket interface
+which I am aware of it and didn't allocated the necessary room.
+
+Reported-by: David Palma <david.palma@ntnu.no>
+Reported-by: Rabi Narayan Sahoo <rabinarayans0828@gmail.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Alexander Aring <aring@mojatatu.com>
+Signed-off-by: Stefan Schmidt <stefan@datenfreihafen.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/mac802154/tx.c | 15 ++++++++++++++-
+ 1 file changed, 14 insertions(+), 1 deletion(-)
+
+--- a/net/mac802154/tx.c
++++ b/net/mac802154/tx.c
+@@ -63,8 +63,21 @@ ieee802154_tx(struct ieee802154_local *l
+ int ret;
+
+ if (!(local->hw.flags & IEEE802154_HW_TX_OMIT_CKSUM)) {
+- u16 crc = crc_ccitt(0, skb->data, skb->len);
++ struct sk_buff *nskb;
++ u16 crc;
+
++ if (unlikely(skb_tailroom(skb) < IEEE802154_FCS_LEN)) {
++ nskb = skb_copy_expand(skb, 0, IEEE802154_FCS_LEN,
++ GFP_ATOMIC);
++ if (likely(nskb)) {
++ consume_skb(skb);
++ skb = nskb;
++ } else {
++ goto err_tx;
++ }
++ }
++
++ crc = crc_ccitt(0, skb->data, skb->len);
+ put_unaligned_le16(crc, skb_put(skb, 2));
+ }
+
--- /dev/null
+From 67810693077afc1ebf9e1646af300436cb8103c2 Mon Sep 17 00:00:00 2001
+From: Amir Goldstein <amir73il@gmail.com>
+Date: Tue, 17 Jul 2018 16:05:38 +0300
+Subject: ovl: fix wrong use of impure dir cache in ovl_iterate()
+
+From: Amir Goldstein <amir73il@gmail.com>
+
+commit 67810693077afc1ebf9e1646af300436cb8103c2 upstream.
+
+Only upper dir can be impure, but if we are in the middle of
+iterating a lower real dir, dir could be copied up and marked
+impure. We only want the impure cache if we started iterating
+a real upper dir to begin with.
+
+Aditya Kali reported that the following reproducer hits the
+WARN_ON(!cache->refcount) in ovl_get_cache():
+
+ docker run --rm drupal:8.5.4-fpm-alpine \
+ sh -c 'cd /var/www/html/vendor/symfony && \
+ chown -R www-data:www-data . && ls -l .'
+
+Reported-by: Aditya Kali <adityakali@google.com>
+Tested-by: Aditya Kali <adityakali@google.com>
+Fixes: 4edb83bb1041 ('ovl: constant d_ino for non-merge dirs')
+Cc: <stable@vger.kernel.org> # v4.14
+Signed-off-by: Amir Goldstein <amir73il@gmail.com>
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/overlayfs/readdir.c | 19 +++++++++++++++++--
+ 1 file changed, 17 insertions(+), 2 deletions(-)
+
+--- a/fs/overlayfs/readdir.c
++++ b/fs/overlayfs/readdir.c
+@@ -623,6 +623,21 @@ static int ovl_fill_real(struct dir_cont
+ return orig_ctx->actor(orig_ctx, name, namelen, offset, ino, d_type);
+ }
+
++static bool ovl_is_impure_dir(struct file *file)
++{
++ struct ovl_dir_file *od = file->private_data;
++ struct inode *dir = d_inode(file->f_path.dentry);
++
++ /*
++ * Only upper dir can be impure, but if we are in the middle of
++ * iterating a lower real dir, dir could be copied up and marked
++ * impure. We only want the impure cache if we started iterating
++ * a real upper dir to begin with.
++ */
++ return od->is_upper && ovl_test_flag(OVL_IMPURE, dir);
++
++}
++
+ static int ovl_iterate_real(struct file *file, struct dir_context *ctx)
+ {
+ int err;
+@@ -646,7 +661,7 @@ static int ovl_iterate_real(struct file
+ rdt.parent_ino = stat.ino;
+ }
+
+- if (ovl_test_flag(OVL_IMPURE, d_inode(dir))) {
++ if (ovl_is_impure_dir(file)) {
+ rdt.cache = ovl_cache_get_impure(&file->f_path);
+ if (IS_ERR(rdt.cache))
+ return PTR_ERR(rdt.cache);
+@@ -676,7 +691,7 @@ static int ovl_iterate(struct file *file
+ * entries.
+ */
+ if (ovl_same_sb(dentry->d_sb) &&
+- (ovl_test_flag(OVL_IMPURE, d_inode(dentry)) ||
++ (ovl_is_impure_dir(file) ||
+ OVL_TYPE_MERGE(ovl_path_type(dentry->d_parent)))) {
+ return ovl_iterate_real(file, ctx);
+ }
--- /dev/null
+From 1bd6a1c4b80a28d975287630644e6b47d0f977a5 Mon Sep 17 00:00:00 2001
+From: Hari Bathini <hbathini@linux.ibm.com>
+Date: Tue, 7 Aug 2018 02:12:45 +0530
+Subject: powerpc/fadump: handle crash memory ranges array index overflow
+
+From: Hari Bathini <hbathini@linux.ibm.com>
+
+commit 1bd6a1c4b80a28d975287630644e6b47d0f977a5 upstream.
+
+Crash memory ranges is an array of memory ranges of the crashing kernel
+to be exported as a dump via /proc/vmcore file. The size of the array
+is set based on INIT_MEMBLOCK_REGIONS, which works alright in most cases
+where memblock memory regions count is less than INIT_MEMBLOCK_REGIONS
+value. But this count can grow beyond INIT_MEMBLOCK_REGIONS value since
+commit 142b45a72e22 ("memblock: Add array resizing support").
+
+On large memory systems with a few DLPAR operations, the memblock memory
+regions count could be larger than INIT_MEMBLOCK_REGIONS value. On such
+systems, registering fadump results in crash or other system failures
+like below:
+
+ task: c00007f39a290010 ti: c00000000b738000 task.ti: c00000000b738000
+ NIP: c000000000047df4 LR: c0000000000f9e58 CTR: c00000000010f180
+ REGS: c00000000b73b570 TRAP: 0300 Tainted: G L X (4.4.140+)
+ MSR: 8000000000009033 <SF,EE,ME,IR,DR,RI,LE> CR: 22004484 XER: 20000000
+ CFAR: c000000000008500 DAR: 000007a450000000 DSISR: 40000000 SOFTE: 0
+ ...
+ NIP [c000000000047df4] smp_send_reschedule+0x24/0x80
+ LR [c0000000000f9e58] resched_curr+0x138/0x160
+ Call Trace:
+ resched_curr+0x138/0x160 (unreliable)
+ check_preempt_curr+0xc8/0xf0
+ ttwu_do_wakeup+0x38/0x150
+ try_to_wake_up+0x224/0x4d0
+ __wake_up_common+0x94/0x100
+ ep_poll_callback+0xac/0x1c0
+ __wake_up_common+0x94/0x100
+ __wake_up_sync_key+0x70/0xa0
+ sock_def_readable+0x58/0xa0
+ unix_stream_sendmsg+0x2dc/0x4c0
+ sock_sendmsg+0x68/0xa0
+ ___sys_sendmsg+0x2cc/0x2e0
+ __sys_sendmsg+0x5c/0xc0
+ SyS_socketcall+0x36c/0x3f0
+ system_call+0x3c/0x100
+
+as array index overflow is not checked for while setting up crash memory
+ranges causing memory corruption. To resolve this issue, dynamically
+allocate memory for crash memory ranges and resize it incrementally,
+in units of pagesize, on hitting array size limit.
+
+Fixes: 2df173d9e85d ("fadump: Initialize elfcore header and add PT_LOAD program headers.")
+Cc: stable@vger.kernel.org # v3.4+
+Signed-off-by: Hari Bathini <hbathini@linux.ibm.com>
+Reviewed-by: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
+[mpe: Just use PAGE_SIZE directly, fixup variable placement]
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/fadump.h | 3 -
+ arch/powerpc/kernel/fadump.c | 91 ++++++++++++++++++++++++++++++++------
+ 2 files changed, 77 insertions(+), 17 deletions(-)
+
+--- a/arch/powerpc/include/asm/fadump.h
++++ b/arch/powerpc/include/asm/fadump.h
+@@ -195,9 +195,6 @@ struct fadump_crash_info_header {
+ struct cpumask online_mask;
+ };
+
+-/* Crash memory ranges */
+-#define INIT_CRASHMEM_RANGES (INIT_MEMBLOCK_REGIONS + 2)
+-
+ struct fad_crash_memory_ranges {
+ unsigned long long base;
+ unsigned long long size;
+--- a/arch/powerpc/kernel/fadump.c
++++ b/arch/powerpc/kernel/fadump.c
+@@ -47,8 +47,10 @@ static struct fadump_mem_struct fdm;
+ static const struct fadump_mem_struct *fdm_active;
+
+ static DEFINE_MUTEX(fadump_mutex);
+-struct fad_crash_memory_ranges crash_memory_ranges[INIT_CRASHMEM_RANGES];
++struct fad_crash_memory_ranges *crash_memory_ranges;
++int crash_memory_ranges_size;
+ int crash_mem_ranges;
++int max_crash_mem_ranges;
+
+ /* Scan the Firmware Assisted dump configuration details. */
+ int __init early_init_dt_scan_fw_dump(unsigned long node,
+@@ -843,38 +845,88 @@ static int __init process_fadump(const s
+ return 0;
+ }
+
+-static inline void fadump_add_crash_memory(unsigned long long base,
+- unsigned long long end)
++static void free_crash_memory_ranges(void)
++{
++ kfree(crash_memory_ranges);
++ crash_memory_ranges = NULL;
++ crash_memory_ranges_size = 0;
++ max_crash_mem_ranges = 0;
++}
++
++/*
++ * Allocate or reallocate crash memory ranges array in incremental units
++ * of PAGE_SIZE.
++ */
++static int allocate_crash_memory_ranges(void)
++{
++ struct fad_crash_memory_ranges *new_array;
++ u64 new_size;
++
++ new_size = crash_memory_ranges_size + PAGE_SIZE;
++ pr_debug("Allocating %llu bytes of memory for crash memory ranges\n",
++ new_size);
++
++ new_array = krealloc(crash_memory_ranges, new_size, GFP_KERNEL);
++ if (new_array == NULL) {
++ pr_err("Insufficient memory for setting up crash memory ranges\n");
++ free_crash_memory_ranges();
++ return -ENOMEM;
++ }
++
++ crash_memory_ranges = new_array;
++ crash_memory_ranges_size = new_size;
++ max_crash_mem_ranges = (new_size /
++ sizeof(struct fad_crash_memory_ranges));
++ return 0;
++}
++
++static inline int fadump_add_crash_memory(unsigned long long base,
++ unsigned long long end)
+ {
+ if (base == end)
+- return;
++ return 0;
++
++ if (crash_mem_ranges == max_crash_mem_ranges) {
++ int ret;
++
++ ret = allocate_crash_memory_ranges();
++ if (ret)
++ return ret;
++ }
+
+ pr_debug("crash_memory_range[%d] [%#016llx-%#016llx], %#llx bytes\n",
+ crash_mem_ranges, base, end - 1, (end - base));
+ crash_memory_ranges[crash_mem_ranges].base = base;
+ crash_memory_ranges[crash_mem_ranges].size = end - base;
+ crash_mem_ranges++;
++ return 0;
+ }
+
+-static void fadump_exclude_reserved_area(unsigned long long start,
++static int fadump_exclude_reserved_area(unsigned long long start,
+ unsigned long long end)
+ {
+ unsigned long long ra_start, ra_end;
++ int ret = 0;
+
+ ra_start = fw_dump.reserve_dump_area_start;
+ ra_end = ra_start + fw_dump.reserve_dump_area_size;
+
+ if ((ra_start < end) && (ra_end > start)) {
+ if ((start < ra_start) && (end > ra_end)) {
+- fadump_add_crash_memory(start, ra_start);
+- fadump_add_crash_memory(ra_end, end);
++ ret = fadump_add_crash_memory(start, ra_start);
++ if (ret)
++ return ret;
++
++ ret = fadump_add_crash_memory(ra_end, end);
+ } else if (start < ra_start) {
+- fadump_add_crash_memory(start, ra_start);
++ ret = fadump_add_crash_memory(start, ra_start);
+ } else if (ra_end < end) {
+- fadump_add_crash_memory(ra_end, end);
++ ret = fadump_add_crash_memory(ra_end, end);
+ }
+ } else
+- fadump_add_crash_memory(start, end);
++ ret = fadump_add_crash_memory(start, end);
++
++ return ret;
+ }
+
+ static int fadump_init_elfcore_header(char *bufp)
+@@ -914,10 +966,11 @@ static int fadump_init_elfcore_header(ch
+ * Traverse through memblock structure and setup crash memory ranges. These
+ * ranges will be used create PT_LOAD program headers in elfcore header.
+ */
+-static void fadump_setup_crash_memory_ranges(void)
++static int fadump_setup_crash_memory_ranges(void)
+ {
+ struct memblock_region *reg;
+ unsigned long long start, end;
++ int ret;
+
+ pr_debug("Setup crash memory ranges.\n");
+ crash_mem_ranges = 0;
+@@ -928,7 +981,9 @@ static void fadump_setup_crash_memory_ra
+ * specified during fadump registration. We need to create a separate
+ * program header for this chunk with the correct offset.
+ */
+- fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size);
++ ret = fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size);
++ if (ret)
++ return ret;
+
+ for_each_memblock(memory, reg) {
+ start = (unsigned long long)reg->base;
+@@ -948,8 +1003,12 @@ static void fadump_setup_crash_memory_ra
+ }
+
+ /* add this range excluding the reserved dump area. */
+- fadump_exclude_reserved_area(start, end);
++ ret = fadump_exclude_reserved_area(start, end);
++ if (ret)
++ return ret;
+ }
++
++ return 0;
+ }
+
+ /*
+@@ -1072,6 +1131,7 @@ static int register_fadump(void)
+ {
+ unsigned long addr;
+ void *vaddr;
++ int ret;
+
+ /*
+ * If no memory is reserved then we can not register for firmware-
+@@ -1080,7 +1140,9 @@ static int register_fadump(void)
+ if (!fw_dump.reserve_dump_area_size)
+ return -ENODEV;
+
+- fadump_setup_crash_memory_ranges();
++ ret = fadump_setup_crash_memory_ranges();
++ if (ret)
++ return ret;
+
+ addr = be64_to_cpu(fdm.rmr_region.destination_address) + be64_to_cpu(fdm.rmr_region.source_len);
+ /* Initialize fadump crash info header. */
+@@ -1158,6 +1220,7 @@ void fadump_cleanup(void)
+ } else if (fw_dump.dump_registered) {
+ /* Un-register Firmware-assisted dump if it was registered. */
+ fadump_unregister_dump(&fdm);
++ free_crash_memory_ranges();
+ }
+ }
+
--- /dev/null
+From db2173198b9513f7add8009f225afa1f1c79bcc6 Mon Sep 17 00:00:00 2001
+From: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Date: Fri, 17 Aug 2018 17:30:39 +1000
+Subject: powerpc/powernv/pci: Work around races in PCI bridge enabling
+
+From: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+
+commit db2173198b9513f7add8009f225afa1f1c79bcc6 upstream.
+
+The generic code is racy when multiple children of a PCI bridge try to
+enable it simultaneously.
+
+This leads to drivers trying to access a device through a
+not-yet-enabled bridge, and this EEH errors under various
+circumstances when using parallel driver probing.
+
+There is work going on to fix that properly in the PCI core but it
+will take some time.
+
+x86 gets away with it because (outside of hotplug), the BIOS enables
+all the bridges at boot time.
+
+This patch does the same thing on powernv by enabling all bridges that
+have child devices at boot time, thus avoiding subsequent races. It's
+suitable for backporting to stable and distros, while the proper PCI
+fix will probably be significantly more invasive.
+
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/platforms/powernv/pci-ioda.c | 37 ++++++++++++++++++++++++++++++
+ 1 file changed, 37 insertions(+)
+
+--- a/arch/powerpc/platforms/powernv/pci-ioda.c
++++ b/arch/powerpc/platforms/powernv/pci-ioda.c
+@@ -3286,12 +3286,49 @@ static void pnv_pci_ioda_create_dbgfs(vo
+ #endif /* CONFIG_DEBUG_FS */
+ }
+
++static void pnv_pci_enable_bridge(struct pci_bus *bus)
++{
++ struct pci_dev *dev = bus->self;
++ struct pci_bus *child;
++
++ /* Empty bus ? bail */
++ if (list_empty(&bus->devices))
++ return;
++
++ /*
++ * If there's a bridge associated with that bus enable it. This works
++ * around races in the generic code if the enabling is done during
++ * parallel probing. This can be removed once those races have been
++ * fixed.
++ */
++ if (dev) {
++ int rc = pci_enable_device(dev);
++ if (rc)
++ pci_err(dev, "Error enabling bridge (%d)\n", rc);
++ pci_set_master(dev);
++ }
++
++ /* Perform the same to child busses */
++ list_for_each_entry(child, &bus->children, node)
++ pnv_pci_enable_bridge(child);
++}
++
++static void pnv_pci_enable_bridges(void)
++{
++ struct pci_controller *hose;
++
++ list_for_each_entry(hose, &hose_list, list_node)
++ pnv_pci_enable_bridge(hose->bus);
++}
++
+ static void pnv_pci_ioda_fixup(void)
+ {
+ pnv_pci_ioda_setup_PEs();
+ pnv_pci_ioda_setup_iommu_api();
+ pnv_pci_ioda_create_dbgfs();
+
++ pnv_pci_enable_bridges();
++
+ #ifdef CONFIG_EEH
+ eeh_init();
+ eeh_addr_cache_build();
--- /dev/null
+From cd813e1cd7122f2c261dce5b54d1e0c97f80e1a5 Mon Sep 17 00:00:00 2001
+From: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
+Date: Tue, 7 Aug 2018 19:46:46 +0530
+Subject: powerpc/pseries: Fix endianness while restoring of r3 in MCE handler.
+
+From: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
+
+commit cd813e1cd7122f2c261dce5b54d1e0c97f80e1a5 upstream.
+
+During Machine Check interrupt on pseries platform, register r3 points
+RTAS extended event log passed by hypervisor. Since hypervisor uses r3
+to pass pointer to rtas log, it stores the original r3 value at the
+start of the memory (first 8 bytes) pointed by r3. Since hypervisor
+stores this info and rtas log is in BE format, linux should make
+sure to restore r3 value in correct endian format.
+
+Without this patch when MCE handler, after recovery, returns to code that
+that caused the MCE may end up with Data SLB access interrupt for invalid
+address followed by kernel panic or hang.
+
+ Severe Machine check interrupt [Recovered]
+ NIP [d00000000ca301b8]: init_module+0x1b8/0x338 [bork_kernel]
+ Initiator: CPU
+ Error type: SLB [Multihit]
+ Effective address: d00000000ca70000
+ cpu 0xa: Vector: 380 (Data SLB Access) at [c0000000fc7775b0]
+ pc: c0000000009694c0: vsnprintf+0x80/0x480
+ lr: c0000000009698e0: vscnprintf+0x20/0x60
+ sp: c0000000fc777830
+ msr: 8000000002009033
+ dar: a803a30c000000d0
+ current = 0xc00000000bc9ef00
+ paca = 0xc00000001eca5c00 softe: 3 irq_happened: 0x01
+ pid = 8860, comm = insmod
+ vscnprintf+0x20/0x60
+ vprintk_emit+0xb4/0x4b0
+ vprintk_func+0x5c/0xd0
+ printk+0x38/0x4c
+ init_module+0x1c0/0x338 [bork_kernel]
+ do_one_initcall+0x54/0x230
+ do_init_module+0x8c/0x248
+ load_module+0x12b8/0x15b0
+ sys_finit_module+0xa8/0x110
+ system_call+0x58/0x6c
+ --- Exception: c00 (System Call) at 00007fff8bda0644
+ SP (7fffdfbfe980) is in userspace
+
+This patch fixes this issue.
+
+Fixes: a08a53ea4c97 ("powerpc/le: Enable RTAS events support")
+Cc: stable@vger.kernel.org # v3.15+
+Reviewed-by: Nicholas Piggin <npiggin@gmail.com>
+Signed-off-by: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/platforms/pseries/ras.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/powerpc/platforms/pseries/ras.c
++++ b/arch/powerpc/platforms/pseries/ras.c
+@@ -360,7 +360,7 @@ static struct rtas_error_log *fwnmi_get_
+ }
+
+ savep = __va(regs->gpr[3]);
+- regs->gpr[3] = savep[0]; /* restore original r3 */
++ regs->gpr[3] = be64_to_cpu(savep[0]); /* restore original r3 */
+
+ /* If it isn't an extended log we can use the per cpu 64bit buffer */
+ h = (struct rtas_error_log *)&savep[1];
--- /dev/null
+From d1c392c9e2a301f38998a353f467f76414e38725 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
+Date: Wed, 5 Sep 2018 16:29:49 -0400
+Subject: printk/tracing: Do not trace printk_nmi_enter()
+
+From: Steven Rostedt (VMware) <rostedt@goodmis.org>
+
+commit d1c392c9e2a301f38998a353f467f76414e38725 upstream.
+
+I hit the following splat in my tests:
+
+------------[ cut here ]------------
+IRQs not enabled as expected
+WARNING: CPU: 3 PID: 0 at kernel/time/tick-sched.c:982 tick_nohz_idle_enter+0x44/0x8c
+Modules linked in: ip6t_REJECT nf_reject_ipv6 ip6table_filter ip6_tables ipv6
+CPU: 3 PID: 0 Comm: swapper/3 Not tainted 4.19.0-rc2-test+ #2
+Hardware name: MSI MS-7823/CSM-H87M-G43 (MS-7823), BIOS V1.6 02/22/2014
+EIP: tick_nohz_idle_enter+0x44/0x8c
+Code: ec 05 00 00 00 75 26 83 b8 c0 05 00 00 00 75 1d 80 3d d0 36 3e c1 00
+75 14 68 94 63 12 c1 c6 05 d0 36 3e c1 01 e8 04 ee f8 ff <0f> 0b 58 fa bb a0
+e5 66 c1 e8 25 0f 04 00 64 03 1d 28 31 52 c1 8b
+EAX: 0000001c EBX: f26e7f8c ECX: 00000006 EDX: 00000007
+ESI: f26dd1c0 EDI: 00000000 EBP: f26e7f40 ESP: f26e7f38
+DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068 EFLAGS: 00010296
+CR0: 80050033 CR2: 0813c6b0 CR3: 2f342000 CR4: 001406f0
+Call Trace:
+ do_idle+0x33/0x202
+ cpu_startup_entry+0x61/0x63
+ start_secondary+0x18e/0x1ed
+ startup_32_smp+0x164/0x168
+irq event stamp: 18773830
+hardirqs last enabled at (18773829): [<c040150c>] trace_hardirqs_on_thunk+0xc/0x10
+hardirqs last disabled at (18773830): [<c040151c>] trace_hardirqs_off_thunk+0xc/0x10
+softirqs last enabled at (18773824): [<c0ddaa6f>] __do_softirq+0x25f/0x2bf
+softirqs last disabled at (18773767): [<c0416bbe>] call_on_stack+0x45/0x4b
+---[ end trace b7c64aa79e17954a ]---
+
+After a bit of debugging, I found what was happening. This would trigger
+when performing "perf" with a high NMI interrupt rate, while enabling and
+disabling function tracer. Ftrace uses breakpoints to convert the nops at
+the start of functions to calls to the function trampolines. The breakpoint
+traps disable interrupts and this makes calls into lockdep via the
+trace_hardirqs_off_thunk in the entry.S code. What happens is the following:
+
+ do_idle {
+
+ [interrupts enabled]
+
+ <interrupt> [interrupts disabled]
+ TRACE_IRQS_OFF [lockdep says irqs off]
+ [...]
+ TRACE_IRQS_IRET
+ test if pt_regs say return to interrupts enabled [yes]
+ TRACE_IRQS_ON [lockdep says irqs are on]
+
+ <nmi>
+ nmi_enter() {
+ printk_nmi_enter() [traced by ftrace]
+ [ hit ftrace breakpoint ]
+ <breakpoint exception>
+ TRACE_IRQS_OFF [lockdep says irqs off]
+ [...]
+ TRACE_IRQS_IRET [return from breakpoint]
+ test if pt_regs say interrupts enabled [no]
+ [iret back to interrupt]
+ [iret back to code]
+
+ tick_nohz_idle_enter() {
+
+ lockdep_assert_irqs_enabled() [lockdep say no!]
+
+Although interrupts are indeed enabled, lockdep thinks it is not, and since
+we now do asserts via lockdep, it gives a false warning. The issue here is
+that printk_nmi_enter() is called before lockdep_off(), which disables
+lockdep (for this reason) in NMIs. By simply not allowing ftrace to see
+printk_nmi_enter() (via notrace annotation) we keep lockdep from getting
+confused.
+
+Cc: stable@vger.kernel.org
+Fixes: 42a0bb3f71383 ("printk/nmi: generic solution for safe printk in NMI")
+Acked-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+Acked-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/printk/printk_safe.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/kernel/printk/printk_safe.c
++++ b/kernel/printk/printk_safe.c
+@@ -309,12 +309,12 @@ static __printf(1, 0) int vprintk_nmi(co
+ return printk_safe_log_store(s, fmt, args);
+ }
+
+-void printk_nmi_enter(void)
++void notrace printk_nmi_enter(void)
+ {
+ this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK);
+ }
+
+-void printk_nmi_exit(void)
++void notrace printk_nmi_exit(void)
+ {
+ this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK);
+ }
--- /dev/null
+From 61b717d041b1976530f68f8b539b2e3a7dd8e39c Mon Sep 17 00:00:00 2001
+From: Bart Van Assche <bart.vanassche@wdc.com>
+Date: Tue, 26 Jun 2018 08:39:36 -0700
+Subject: RDMA/rxe: Set wqe->status correctly if an unexpected response is received
+
+From: Bart Van Assche <bart.vanassche@wdc.com>
+
+commit 61b717d041b1976530f68f8b539b2e3a7dd8e39c upstream.
+
+Every function that returns COMPST_ERROR must set wqe->status to another
+value than IB_WC_SUCCESS before returning COMPST_ERROR. Fix the only code
+path for which this is not yet the case.
+
+Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
+Cc: <stable@vger.kernel.org>
+Reviewed-by: Yuval Shaia <yuval.shaia@oracle.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/sw/rxe/rxe_comp.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/infiniband/sw/rxe/rxe_comp.c
++++ b/drivers/infiniband/sw/rxe/rxe_comp.c
+@@ -276,6 +276,7 @@ static inline enum comp_state check_ack(
+ case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
+ if (wqe->wr.opcode != IB_WR_RDMA_READ &&
+ wqe->wr.opcode != IB_WR_RDMA_READ_WITH_INV) {
++ wqe->status = IB_WC_FATAL_ERR;
+ return COMPST_ERROR;
+ }
+ reset_retry_counters(qp);
--- /dev/null
+From dc30b96ab6d569060741572cf30517d3179429a8 Mon Sep 17 00:00:00 2001
+From: Markus Stockhausen <stockhausen@collogia.de>
+Date: Fri, 27 Jul 2018 09:09:53 -0600
+Subject: readahead: stricter check for bdi io_pages
+
+From: Markus Stockhausen <stockhausen@collogia.de>
+
+commit dc30b96ab6d569060741572cf30517d3179429a8 upstream.
+
+ondemand_readahead() checks bdi->io_pages to cap the maximum pages
+that need to be processed. This works until the readit section. If
+we would do an async only readahead (async size = sync size) and
+target is at beginning of window we expand the pages by another
+get_next_ra_size() pages. Btrace for large reads shows that kernel
+always issues a doubled size read at the beginning of processing.
+Add an additional check for io_pages in the lower part of the func.
+The fix helps devices that hard limit bio pages and rely on proper
+handling of max_hw_read_sectors (e.g. older FusionIO cards). For
+that reason it could qualify for stable.
+
+Fixes: 9491ae4a ("mm: don't cap request size based on read-ahead setting")
+Cc: stable@vger.kernel.org
+Signed-off-by: Markus Stockhausen stockhausen@collogia.de
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/readahead.c | 12 ++++++++++--
+ 1 file changed, 10 insertions(+), 2 deletions(-)
+
+--- a/mm/readahead.c
++++ b/mm/readahead.c
+@@ -380,6 +380,7 @@ ondemand_readahead(struct address_space
+ {
+ struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
+ unsigned long max_pages = ra->ra_pages;
++ unsigned long add_pages;
+ pgoff_t prev_offset;
+
+ /*
+@@ -469,10 +470,17 @@ readit:
+ * Will this read hit the readahead marker made by itself?
+ * If so, trigger the readahead marker hit now, and merge
+ * the resulted next readahead window into the current one.
++ * Take care of maximum IO pages as above.
+ */
+ if (offset == ra->start && ra->size == ra->async_size) {
+- ra->async_size = get_next_ra_size(ra, max_pages);
+- ra->size += ra->async_size;
++ add_pages = get_next_ra_size(ra, max_pages);
++ if (ra->size + add_pages <= max_pages) {
++ ra->async_size = add_pages;
++ ra->size += add_pages;
++ } else {
++ ra->size = max_pages;
++ ra->async_size = max_pages >> 1;
++ }
+ }
+
+ return ra_submit(ra, mapping, filp);
--- /dev/null
+From 817aef260037f33ee0f44c17fe341323d3aebd6d Mon Sep 17 00:00:00 2001
+From: Yannik Sembritzki <yannik@sembritzki.me>
+Date: Thu, 16 Aug 2018 14:05:10 +0100
+Subject: Replace magic for trusting the secondary keyring with #define
+
+From: Yannik Sembritzki <yannik@sembritzki.me>
+
+commit 817aef260037f33ee0f44c17fe341323d3aebd6d upstream.
+
+Replace the use of a magic number that indicates that verify_*_signature()
+should use the secondary keyring with a symbol.
+
+Signed-off-by: Yannik Sembritzki <yannik@sembritzki.me>
+Signed-off-by: David Howells <dhowells@redhat.com>
+Cc: keyrings@vger.kernel.org
+Cc: linux-security-module@vger.kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ certs/system_keyring.c | 3 ++-
+ crypto/asymmetric_keys/pkcs7_key_type.c | 2 +-
+ include/linux/verification.h | 6 ++++++
+ 3 files changed, 9 insertions(+), 2 deletions(-)
+
+--- a/certs/system_keyring.c
++++ b/certs/system_keyring.c
+@@ -15,6 +15,7 @@
+ #include <linux/cred.h>
+ #include <linux/err.h>
+ #include <linux/slab.h>
++#include <linux/verification.h>
+ #include <keys/asymmetric-type.h>
+ #include <keys/system_keyring.h>
+ #include <crypto/pkcs7.h>
+@@ -230,7 +231,7 @@ int verify_pkcs7_signature(const void *d
+
+ if (!trusted_keys) {
+ trusted_keys = builtin_trusted_keys;
+- } else if (trusted_keys == (void *)1UL) {
++ } else if (trusted_keys == VERIFY_USE_SECONDARY_KEYRING) {
+ #ifdef CONFIG_SECONDARY_TRUSTED_KEYRING
+ trusted_keys = secondary_trusted_keys;
+ #else
+--- a/crypto/asymmetric_keys/pkcs7_key_type.c
++++ b/crypto/asymmetric_keys/pkcs7_key_type.c
+@@ -62,7 +62,7 @@ static int pkcs7_preparse(struct key_pre
+
+ return verify_pkcs7_signature(NULL, 0,
+ prep->data, prep->datalen,
+- (void *)1UL, usage,
++ VERIFY_USE_SECONDARY_KEYRING, usage,
+ pkcs7_view_content, prep);
+ }
+
+--- a/include/linux/verification.h
++++ b/include/linux/verification.h
+@@ -13,6 +13,12 @@
+ #define _LINUX_VERIFICATION_H
+
+ /*
++ * Indicate that both builtin trusted keys and secondary trusted keys
++ * should be used.
++ */
++#define VERIFY_USE_SECONDARY_KEYRING ((struct key *)1UL)
++
++/*
+ * The use to which an asymmetric key is being put.
+ */
+ enum key_being_used_for {
--- /dev/null
+From 5c8b84f410b3819d14cb1ebf32e4b3714b5a6e0b Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan@kernel.org>
+Date: Wed, 4 Jul 2018 11:05:55 +0200
+Subject: rtc: omap: fix potential crash on power off
+
+From: Johan Hovold <johan@kernel.org>
+
+commit 5c8b84f410b3819d14cb1ebf32e4b3714b5a6e0b upstream.
+
+Do not set the system power-off callback and omap power-off rtc pointer
+until we're done setting up our device to avoid leaving stale pointers
+around after a late probe error.
+
+Fixes: 97ea1906b3c2 ("rtc: omap: Support ext_wakeup configuration")
+Cc: stable <stable@vger.kernel.org> # 4.9
+Cc: Marcin Niestroj <m.niestroj@grinn-global.com>
+Cc: Tony Lindgren <tony@atomide.com>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Acked-by: Tony Lindgren <tony@atomide.com>
+Reviewed-by: Marcin Niestroj <m.niestroj@grinn-global.com>
+Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/rtc/rtc-omap.c | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+--- a/drivers/rtc/rtc-omap.c
++++ b/drivers/rtc/rtc-omap.c
+@@ -817,13 +817,6 @@ static int omap_rtc_probe(struct platfor
+ goto err;
+ }
+
+- if (rtc->is_pmic_controller) {
+- if (!pm_power_off) {
+- omap_rtc_power_off_rtc = rtc;
+- pm_power_off = omap_rtc_power_off;
+- }
+- }
+-
+ /* Support ext_wakeup pinconf */
+ rtc_pinctrl_desc.name = dev_name(&pdev->dev);
+
+@@ -833,6 +826,13 @@ static int omap_rtc_probe(struct platfor
+ return PTR_ERR(rtc->pctldev);
+ }
+
++ if (rtc->is_pmic_controller) {
++ if (!pm_power_off) {
++ omap_rtc_power_off_rtc = rtc;
++ pm_power_off = omap_rtc_power_off;
++ }
++ }
++
+ return 0;
+
+ err:
--- /dev/null
+From 931c4e9a72ae91d59c5332ffb6812911a749da8e Mon Sep 17 00:00:00 2001
+From: Janek Kotas <jank@cadence.com>
+Date: Mon, 4 Jun 2018 11:24:44 +0000
+Subject: spi: cadence: Change usleep_range() to udelay(), for atomic context
+
+From: Janek Kotas <jank@cadence.com>
+
+commit 931c4e9a72ae91d59c5332ffb6812911a749da8e upstream.
+
+The path "spi: cadence: Add usleep_range() for
+cdns_spi_fill_tx_fifo()" added a usleep_range() function call,
+which cannot be used in atomic context.
+However the cdns_spi_fill_tx_fifo() function can be called during
+an interrupt which may result in a kernel panic:
+
+BUG: scheduling while atomic: grep/561/0x00010002
+Modules linked in:
+Preemption disabled at:
+[<ffffff800858ea28>] wait_for_common+0x48/0x178
+CPU: 0 PID: 561 Comm: grep Not tainted 4.17.0 #1
+Hardware name: Cadence CSP (DT)
+Call trace:
+ dump_backtrace+0x0/0x198
+ show_stack+0x14/0x20
+ dump_stack+0x8c/0xac
+ __schedule_bug+0x6c/0xb8
+ __schedule+0x570/0x5d8
+ schedule+0x34/0x98
+ schedule_hrtimeout_range_clock+0x98/0x110
+ schedule_hrtimeout_range+0x10/0x18
+ usleep_range+0x64/0x98
+ cdns_spi_fill_tx_fifo+0x70/0xb0
+ cdns_spi_irq+0xd0/0xe0
+ __handle_irq_event_percpu+0x9c/0x128
+ handle_irq_event_percpu+0x34/0x88
+ handle_irq_event+0x48/0x78
+ handle_fasteoi_irq+0xbc/0x1b0
+ generic_handle_irq+0x24/0x38
+ __handle_domain_irq+0x84/0xf8
+ gic_handle_irq+0xc4/0x180
+
+This patch replaces the function call with udelay() which can be
+used in an atomic context, like an interrupt.
+
+Signed-off-by: Jan Kotas <jank@cadence.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/spi/spi-cadence.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/spi/spi-cadence.c
++++ b/drivers/spi/spi-cadence.c
+@@ -319,7 +319,7 @@ static void cdns_spi_fill_tx_fifo(struct
+ */
+ if (cdns_spi_read(xspi, CDNS_SPI_ISR) &
+ CDNS_SPI_IXR_TXFULL)
+- usleep_range(10, 20);
++ udelay(10);
+
+ if (xspi->txbuf)
+ cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++);
--- /dev/null
+From 563a53f3906a6b43692498e5b3ae891fac93a4af Mon Sep 17 00:00:00 2001
+From: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+Date: Fri, 10 Aug 2018 11:13:52 +0200
+Subject: spi: davinci: fix a NULL pointer dereference
+
+From: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+
+commit 563a53f3906a6b43692498e5b3ae891fac93a4af upstream.
+
+On non-OF systems spi->controlled_data may be NULL. This causes a NULL
+pointer derefence on dm365-evm.
+
+Signed-off-by: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/spi/spi-davinci.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/spi/spi-davinci.c
++++ b/drivers/spi/spi-davinci.c
+@@ -217,7 +217,7 @@ static void davinci_spi_chipselect(struc
+ pdata = &dspi->pdata;
+
+ /* program delay transfers if tx_delay is non zero */
+- if (spicfg->wdelay)
++ if (spicfg && spicfg->wdelay)
+ spidat1 |= SPIDAT1_WDEL;
+
+ /*
--- /dev/null
+From 22d71a5097ec7059b6cbbee678a4f88484695941 Mon Sep 17 00:00:00 2001
+From: Mika Westerberg <mika.westerberg@linux.intel.com>
+Date: Thu, 28 Jun 2018 13:52:23 +0300
+Subject: spi: pxa2xx: Add support for Intel Ice Lake
+
+From: Mika Westerberg <mika.westerberg@linux.intel.com>
+
+commit 22d71a5097ec7059b6cbbee678a4f88484695941 upstream.
+
+Intel Ice Lake SPI host controller follows the Intel Cannon Lake but the
+PCI IDs are different. Add the new PCI IDs to the driver supported
+devices list.
+
+Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Jarkko Nikula <jarkko.nikula@linux.intel.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/spi/spi-pxa2xx.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/spi/spi-pxa2xx.c
++++ b/drivers/spi/spi-pxa2xx.c
+@@ -1480,6 +1480,10 @@ static const struct pci_device_id pxa2xx
+ { PCI_VDEVICE(INTEL, 0x31c2), LPSS_BXT_SSP },
+ { PCI_VDEVICE(INTEL, 0x31c4), LPSS_BXT_SSP },
+ { PCI_VDEVICE(INTEL, 0x31c6), LPSS_BXT_SSP },
++ /* ICL-LP */
++ { PCI_VDEVICE(INTEL, 0x34aa), LPSS_CNL_SSP },
++ { PCI_VDEVICE(INTEL, 0x34ab), LPSS_CNL_SSP },
++ { PCI_VDEVICE(INTEL, 0x34fb), LPSS_CNL_SSP },
+ /* APL */
+ { PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP },
+ { PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP },
--- /dev/null
+From d8ffee2f551a627ffb7b216e2da322cb9a037f77 Mon Sep 17 00:00:00 2001
+From: Krzysztof Kozlowski <krzk@kernel.org>
+Date: Fri, 29 Jun 2018 13:33:09 +0200
+Subject: spi: spi-fsl-dspi: Fix imprecise abort on VF500 during probe
+
+From: Krzysztof Kozlowski <krzk@kernel.org>
+
+commit d8ffee2f551a627ffb7b216e2da322cb9a037f77 upstream.
+
+Registers of DSPI should not be accessed before enabling its clock. On
+Toradex Colibri VF50 on Iris carrier board this could be seen during
+bootup as imprecise abort:
+
+ Unhandled fault: imprecise external abort (0x1c06) at 0x00000000
+ Internal error: : 1c06 [#1] ARM
+ Modules linked in:
+ CPU: 0 PID: 1 Comm: swapper Not tainted 4.14.39-dirty #97
+ Hardware name: Freescale Vybrid VF5xx/VF6xx (Device Tree)
+ Backtrace:
+ [<804166a8>] (regmap_write) from [<80466b5c>] (dspi_probe+0x1f0/0x8dc)
+ [<8046696c>] (dspi_probe) from [<8040107c>] (platform_drv_probe+0x54/0xb8)
+ [<80401028>] (platform_drv_probe) from [<803ff53c>] (driver_probe_device+0x280/0x2f8)
+ [<803ff2bc>] (driver_probe_device) from [<803ff674>] (__driver_attach+0xc0/0xc4)
+ [<803ff5b4>] (__driver_attach) from [<803fd818>] (bus_for_each_dev+0x70/0xa4)
+ [<803fd7a8>] (bus_for_each_dev) from [<803fee74>] (driver_attach+0x24/0x28)
+ [<803fee50>] (driver_attach) from [<803fe980>] (bus_add_driver+0x1a0/0x218)
+ [<803fe7e0>] (bus_add_driver) from [<803fffe8>] (driver_register+0x80/0x100)
+ [<803fff68>] (driver_register) from [<80400fdc>] (__platform_driver_register+0x48/0x50)
+ [<80400f94>] (__platform_driver_register) from [<8091cf7c>] (fsl_dspi_driver_init+0x1c/0x20)
+ [<8091cf60>] (fsl_dspi_driver_init) from [<8010195c>] (do_one_initcall+0x4c/0x174)
+ [<80101910>] (do_one_initcall) from [<80900e8c>] (kernel_init_freeable+0x144/0x1d8)
+ [<80900d48>] (kernel_init_freeable) from [<805ff6a8>] (kernel_init+0x10/0x114)
+ [<805ff698>] (kernel_init) from [<80107be8>] (ret_from_fork+0x14/0x2c)
+
+Cc: <stable@vger.kernel.org>
+Fixes: 5ee67b587a2b ("spi: dspi: clear SPI_SR before enable interrupt")
+Signed-off-by: Krzysztof Kozlowski <krzk@kernel.org>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/spi/spi-fsl-dspi.c | 24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
+
+--- a/drivers/spi/spi-fsl-dspi.c
++++ b/drivers/spi/spi-fsl-dspi.c
+@@ -1006,31 +1006,31 @@ static int dspi_probe(struct platform_de
+ goto out_master_put;
+ }
+
++ dspi->clk = devm_clk_get(&pdev->dev, "dspi");
++ if (IS_ERR(dspi->clk)) {
++ ret = PTR_ERR(dspi->clk);
++ dev_err(&pdev->dev, "unable to get clock\n");
++ goto out_master_put;
++ }
++ ret = clk_prepare_enable(dspi->clk);
++ if (ret)
++ goto out_master_put;
++
+ dspi_init(dspi);
+ dspi->irq = platform_get_irq(pdev, 0);
+ if (dspi->irq < 0) {
+ dev_err(&pdev->dev, "can't get platform irq\n");
+ ret = dspi->irq;
+- goto out_master_put;
++ goto out_clk_put;
+ }
+
+ ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt, 0,
+ pdev->name, dspi);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n");
+- goto out_master_put;
++ goto out_clk_put;
+ }
+
+- dspi->clk = devm_clk_get(&pdev->dev, "dspi");
+- if (IS_ERR(dspi->clk)) {
+- ret = PTR_ERR(dspi->clk);
+- dev_err(&pdev->dev, "unable to get clock\n");
+- goto out_master_put;
+- }
+- ret = clk_prepare_enable(dspi->clk);
+- if (ret)
+- goto out_master_put;
+-
+ if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
+ ret = dspi_request_dma(dspi, res->start);
+ if (ret < 0) {
--- /dev/null
+From 757d9140072054528b13bbe291583d9823cde195 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
+Date: Thu, 16 Aug 2018 16:08:37 -0400
+Subject: tracing/blktrace: Fix to allow setting same value
+
+From: Steven Rostedt (VMware) <rostedt@goodmis.org>
+
+commit 757d9140072054528b13bbe291583d9823cde195 upstream.
+
+Masami Hiramatsu reported:
+
+ Current trace-enable attribute in sysfs returns an error
+ if user writes the same setting value as current one,
+ e.g.
+
+ # cat /sys/block/sda/trace/enable
+ 0
+ # echo 0 > /sys/block/sda/trace/enable
+ bash: echo: write error: Invalid argument
+ # echo 1 > /sys/block/sda/trace/enable
+ # echo 1 > /sys/block/sda/trace/enable
+ bash: echo: write error: Device or resource busy
+
+ But this is not a preferred behavior, it should ignore
+ if new setting is same as current one. This fixes the
+ problem as below.
+
+ # cat /sys/block/sda/trace/enable
+ 0
+ # echo 0 > /sys/block/sda/trace/enable
+ # echo 1 > /sys/block/sda/trace/enable
+ # echo 1 > /sys/block/sda/trace/enable
+
+Link: http://lkml.kernel.org/r/20180816103802.08678002@gandalf.local.home
+
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Jens Axboe <axboe@kernel.dk>
+Cc: linux-block@vger.kernel.org
+Cc: stable@vger.kernel.org
+Fixes: cd649b8bb830d ("blktrace: remove sysfs_blk_trace_enable_show/store()")
+Reported-by: Masami Hiramatsu <mhiramat@kernel.org>
+Tested-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/blktrace.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/kernel/trace/blktrace.c
++++ b/kernel/trace/blktrace.c
+@@ -1809,6 +1809,10 @@ static ssize_t sysfs_blk_trace_attr_stor
+ mutex_lock(&q->blk_trace_mutex);
+
+ if (attr == &dev_attr_enable) {
++ if (!!value == !!q->blk_trace) {
++ ret = 0;
++ goto out_unlock_bdev;
++ }
+ if (value)
+ ret = blk_trace_setup_queue(q, bdev);
+ else
--- /dev/null
+From f143641bfef9a4a60c57af30de26c63057e7e695 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
+Date: Wed, 1 Aug 2018 15:40:57 -0400
+Subject: tracing: Do not call start/stop() functions when tracing_on does not change
+
+From: Steven Rostedt (VMware) <rostedt@goodmis.org>
+
+commit f143641bfef9a4a60c57af30de26c63057e7e695 upstream.
+
+Currently, when one echo's in 1 into tracing_on, the current tracer's
+"start()" function is executed, even if tracing_on was already one. This can
+lead to strange side effects. One being that if the hwlat tracer is enabled,
+and someone does "echo 1 > tracing_on" into tracing_on, the hwlat tracer's
+start() function is called again which will recreate another kernel thread,
+and make it unable to remove the old one.
+
+Link: http://lkml.kernel.org/r/1533120354-22923-1-git-send-email-erica.bugden@linutronix.de
+
+Cc: stable@vger.kernel.org
+Fixes: 2df8f8a6a897e ("tracing: Fix regression with irqsoff tracer and tracing_on file")
+Reported-by: Erica Bugden <erica.bugden@linutronix.de>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -7545,7 +7545,9 @@ rb_simple_write(struct file *filp, const
+
+ if (buffer) {
+ mutex_lock(&trace_types_lock);
+- if (val) {
++ if (!!val == tracer_tracing_is_on(tr)) {
++ val = 0; /* do nothing */
++ } else if (val) {
+ tracer_tracing_on(tr);
+ if (tr->current_trace->start)
+ tr->current_trace->start(tr);
--- /dev/null
+From a5ba1d95e46ecaea638ddd7cd144107c783acb5d Mon Sep 17 00:00:00 2001
+From: Tycho Andersen <tycho@tycho.ws>
+Date: Fri, 6 Jul 2018 10:24:57 -0600
+Subject: uart: fix race between uart_put_char() and uart_shutdown()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Tycho Andersen <tycho@tycho.ws>
+
+commit a5ba1d95e46ecaea638ddd7cd144107c783acb5d upstream.
+
+We have reports of the following crash:
+
+ PID: 7 TASK: ffff88085c6d61c0 CPU: 1 COMMAND: "kworker/u25:0"
+ #0 [ffff88085c6db710] machine_kexec at ffffffff81046239
+ #1 [ffff88085c6db760] crash_kexec at ffffffff810fc248
+ #2 [ffff88085c6db830] oops_end at ffffffff81008ae7
+ #3 [ffff88085c6db860] no_context at ffffffff81050b8f
+ #4 [ffff88085c6db8b0] __bad_area_nosemaphore at ffffffff81050d75
+ #5 [ffff88085c6db900] bad_area_nosemaphore at ffffffff81050e83
+ #6 [ffff88085c6db910] __do_page_fault at ffffffff8105132e
+ #7 [ffff88085c6db9b0] do_page_fault at ffffffff8105152c
+ #8 [ffff88085c6db9c0] page_fault at ffffffff81a3f122
+ [exception RIP: uart_put_char+149]
+ RIP: ffffffff814b67b5 RSP: ffff88085c6dba78 RFLAGS: 00010006
+ RAX: 0000000000000292 RBX: ffffffff827c5120 RCX: 0000000000000081
+ RDX: 0000000000000000 RSI: 000000000000005f RDI: ffffffff827c5120
+ RBP: ffff88085c6dba98 R8: 000000000000012c R9: ffffffff822ea320
+ R10: ffff88085fe4db04 R11: 0000000000000001 R12: ffff881059f9c000
+ R13: 0000000000000001 R14: 000000000000005f R15: 0000000000000fba
+ ORIG_RAX: ffffffffffffffff CS: 0010 SS: 0018
+ #9 [ffff88085c6dbaa0] tty_put_char at ffffffff81497544
+ #10 [ffff88085c6dbac0] do_output_char at ffffffff8149c91c
+ #11 [ffff88085c6dbae0] __process_echoes at ffffffff8149cb8b
+ #12 [ffff88085c6dbb30] commit_echoes at ffffffff8149cdc2
+ #13 [ffff88085c6dbb60] n_tty_receive_buf_fast at ffffffff8149e49b
+ #14 [ffff88085c6dbbc0] __receive_buf at ffffffff8149ef5a
+ #15 [ffff88085c6dbc20] n_tty_receive_buf_common at ffffffff8149f016
+ #16 [ffff88085c6dbca0] n_tty_receive_buf2 at ffffffff8149f194
+ #17 [ffff88085c6dbcb0] flush_to_ldisc at ffffffff814a238a
+ #18 [ffff88085c6dbd50] process_one_work at ffffffff81090be2
+ #19 [ffff88085c6dbe20] worker_thread at ffffffff81091b4d
+ #20 [ffff88085c6dbeb0] kthread at ffffffff81096384
+ #21 [ffff88085c6dbf50] ret_from_fork at ffffffff81a3d69f
+
+after slogging through some dissasembly:
+
+ffffffff814b6720 <uart_put_char>:
+ffffffff814b6720: 55 push %rbp
+ffffffff814b6721: 48 89 e5 mov %rsp,%rbp
+ffffffff814b6724: 48 83 ec 20 sub $0x20,%rsp
+ffffffff814b6728: 48 89 1c 24 mov %rbx,(%rsp)
+ffffffff814b672c: 4c 89 64 24 08 mov %r12,0x8(%rsp)
+ffffffff814b6731: 4c 89 6c 24 10 mov %r13,0x10(%rsp)
+ffffffff814b6736: 4c 89 74 24 18 mov %r14,0x18(%rsp)
+ffffffff814b673b: e8 b0 8e 58 00 callq ffffffff81a3f5f0 <mcount>
+ffffffff814b6740: 4c 8b a7 88 02 00 00 mov 0x288(%rdi),%r12
+ffffffff814b6747: 45 31 ed xor %r13d,%r13d
+ffffffff814b674a: 41 89 f6 mov %esi,%r14d
+ffffffff814b674d: 49 83 bc 24 70 01 00 cmpq $0x0,0x170(%r12)
+ffffffff814b6754: 00 00
+ffffffff814b6756: 49 8b 9c 24 80 01 00 mov 0x180(%r12),%rbx
+ffffffff814b675d: 00
+ffffffff814b675e: 74 2f je ffffffff814b678f <uart_put_char+0x6f>
+ffffffff814b6760: 48 89 df mov %rbx,%rdi
+ffffffff814b6763: e8 a8 67 58 00 callq ffffffff81a3cf10 <_raw_spin_lock_irqsave>
+ffffffff814b6768: 41 8b 8c 24 78 01 00 mov 0x178(%r12),%ecx
+ffffffff814b676f: 00
+ffffffff814b6770: 89 ca mov %ecx,%edx
+ffffffff814b6772: f7 d2 not %edx
+ffffffff814b6774: 41 03 94 24 7c 01 00 add 0x17c(%r12),%edx
+ffffffff814b677b: 00
+ffffffff814b677c: 81 e2 ff 0f 00 00 and $0xfff,%edx
+ffffffff814b6782: 75 23 jne ffffffff814b67a7 <uart_put_char+0x87>
+ffffffff814b6784: 48 89 c6 mov %rax,%rsi
+ffffffff814b6787: 48 89 df mov %rbx,%rdi
+ffffffff814b678a: e8 e1 64 58 00 callq ffffffff81a3cc70 <_raw_spin_unlock_irqrestore>
+ffffffff814b678f: 44 89 e8 mov %r13d,%eax
+ffffffff814b6792: 48 8b 1c 24 mov (%rsp),%rbx
+ffffffff814b6796: 4c 8b 64 24 08 mov 0x8(%rsp),%r12
+ffffffff814b679b: 4c 8b 6c 24 10 mov 0x10(%rsp),%r13
+ffffffff814b67a0: 4c 8b 74 24 18 mov 0x18(%rsp),%r14
+ffffffff814b67a5: c9 leaveq
+ffffffff814b67a6: c3 retq
+ffffffff814b67a7: 49 8b 94 24 70 01 00 mov 0x170(%r12),%rdx
+ffffffff814b67ae: 00
+ffffffff814b67af: 48 63 c9 movslq %ecx,%rcx
+ffffffff814b67b2: 41 b5 01 mov $0x1,%r13b
+ffffffff814b67b5: 44 88 34 0a mov %r14b,(%rdx,%rcx,1)
+ffffffff814b67b9: 41 8b 94 24 78 01 00 mov 0x178(%r12),%edx
+ffffffff814b67c0: 00
+ffffffff814b67c1: 83 c2 01 add $0x1,%edx
+ffffffff814b67c4: 81 e2 ff 0f 00 00 and $0xfff,%edx
+ffffffff814b67ca: 41 89 94 24 78 01 00 mov %edx,0x178(%r12)
+ffffffff814b67d1: 00
+ffffffff814b67d2: eb b0 jmp ffffffff814b6784 <uart_put_char+0x64>
+ffffffff814b67d4: 66 66 66 2e 0f 1f 84 data32 data32 nopw %cs:0x0(%rax,%rax,1)
+ffffffff814b67db: 00 00 00 00 00
+
+for our build, this is crashing at:
+
+ circ->buf[circ->head] = c;
+
+Looking in uart_port_startup(), it seems that circ->buf (state->xmit.buf)
+protected by the "per-port mutex", which based on uart_port_check() is
+state->port.mutex. Indeed, the lock acquired in uart_put_char() is
+uport->lock, i.e. not the same lock.
+
+Anyway, since the lock is not acquired, if uart_shutdown() is called, the
+last chunk of that function may release state->xmit.buf before its assigned
+to null, and cause the race above.
+
+To fix it, let's lock uport->lock when allocating/deallocating
+state->xmit.buf in addition to the per-port mutex.
+
+v2: switch to locking uport->lock on allocation/deallocation instead of
+ locking the per-port mutex in uart_put_char. Note that since
+ uport->lock is a spin lock, we have to switch the allocation to
+ GFP_ATOMIC.
+v3: move the allocation outside the lock, so we can switch back to
+ GFP_KERNEL
+
+Signed-off-by: Tycho Andersen <tycho@tycho.ws>
+Cc: stable <stable@vger.kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/tty/serial/serial_core.c | 17 ++++++++++++-----
+ 1 file changed, 12 insertions(+), 5 deletions(-)
+
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -195,6 +195,7 @@ static int uart_port_startup(struct tty_
+ {
+ struct uart_port *uport = uart_port_check(state);
+ unsigned long page;
++ unsigned long flags = 0;
+ int retval = 0;
+
+ if (uport->type == PORT_UNKNOWN)
+@@ -209,15 +210,18 @@ static int uart_port_startup(struct tty_
+ * Initialise and allocate the transmit and temporary
+ * buffer.
+ */
+- if (!state->xmit.buf) {
+- /* This is protected by the per port mutex */
+- page = get_zeroed_page(GFP_KERNEL);
+- if (!page)
+- return -ENOMEM;
++ page = get_zeroed_page(GFP_KERNEL);
++ if (!page)
++ return -ENOMEM;
+
++ uart_port_lock(state, flags);
++ if (!state->xmit.buf) {
+ state->xmit.buf = (unsigned char *) page;
+ uart_circ_clear(&state->xmit);
++ } else {
++ free_page(page);
+ }
++ uart_port_unlock(uport, flags);
+
+ retval = uport->ops->startup(uport);
+ if (retval == 0) {
+@@ -276,6 +280,7 @@ static void uart_shutdown(struct tty_str
+ {
+ struct uart_port *uport = uart_port_check(state);
+ struct tty_port *port = &state->port;
++ unsigned long flags = 0;
+
+ /*
+ * Set the TTY IO error marker
+@@ -308,10 +313,12 @@ static void uart_shutdown(struct tty_str
+ /*
+ * Free the transmit buffer page.
+ */
++ uart_port_lock(state, flags);
+ if (state->xmit.buf) {
+ free_page((unsigned long)state->xmit.buf);
+ state->xmit.buf = NULL;
+ }
++ uart_port_unlock(uport, flags);
+ }
+
+ /**
--- /dev/null
+From 016f8ffc48cb01d1e7701649c728c5d2e737d295 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
+Date: Thu, 9 Aug 2018 15:37:59 -0400
+Subject: uprobes: Use synchronize_rcu() not synchronize_sched()
+
+From: Steven Rostedt (VMware) <rostedt@goodmis.org>
+
+commit 016f8ffc48cb01d1e7701649c728c5d2e737d295 upstream.
+
+While debugging another bug, I was looking at all the synchronize*()
+functions being used in kernel/trace, and noticed that trace_uprobes was
+using synchronize_sched(), with a comment to synchronize with
+{u,ret}_probe_trace_func(). When looking at those functions, the data is
+protected with "rcu_read_lock()" and not with "rcu_read_lock_sched()". This
+is using the wrong synchronize_*() function.
+
+Link: http://lkml.kernel.org/r/20180809160553.469e1e32@gandalf.local.home
+
+Cc: stable@vger.kernel.org
+Fixes: 70ed91c6ec7f8 ("tracing/uprobes: Support ftrace_event_file base multibuffer")
+Acked-by: Oleg Nesterov <oleg@redhat.com>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace_uprobe.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/trace/trace_uprobe.c
++++ b/kernel/trace/trace_uprobe.c
+@@ -967,7 +967,7 @@ probe_event_disable(struct trace_uprobe
+
+ list_del_rcu(&link->list);
+ /* synchronize with u{,ret}probe_trace_func */
+- synchronize_sched();
++ synchronize_rcu();
+ kfree(link);
+
+ if (!list_empty(&tu->tp.files))
--- /dev/null
+From 5081efd112560d3febb328e627176235b250d59d Mon Sep 17 00:00:00 2001
+From: Nadav Amit <namit@vmware.com>
+Date: Tue, 19 Jun 2018 16:00:25 -0700
+Subject: vmw_balloon: do not use 2MB without batching
+
+From: Nadav Amit <namit@vmware.com>
+
+commit 5081efd112560d3febb328e627176235b250d59d upstream.
+
+If the hypervisor sets 2MB batching is on, while batching is cleared,
+the balloon code breaks. In this case the legacy mechanism is used with
+2MB page. The VM would report a 2MB page is ballooned, and the
+hypervisor would only take the first 4KB.
+
+While the hypervisor should not report such settings, make the code more
+robust by not enabling 2MB support without batching.
+
+Fixes: 365bd7ef7ec8e ("VMware balloon: Support 2m page ballooning.")
+Cc: stable@vger.kernel.org
+Reviewed-by: Xavier Deguillard <xdeguillard@vmware.com>
+Signed-off-by: Nadav Amit <nadav.amit@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/misc/vmw_balloon.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/drivers/misc/vmw_balloon.c
++++ b/drivers/misc/vmw_balloon.c
+@@ -341,7 +341,13 @@ static bool vmballoon_send_start(struct
+ success = false;
+ }
+
+- if (b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS)
++ /*
++ * 2MB pages are only supported with batching. If batching is for some
++ * reason disabled, do not use 2MB pages, since otherwise the legacy
++ * mechanism is used with 2MB pages, causing a failure.
++ */
++ if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) &&
++ (b->capabilities & VMW_BALLOON_BATCHED_CMDS))
+ b->supported_page_sizes = 2;
+ else
+ b->supported_page_sizes = 1;
--- /dev/null
+From 09755690c6b7c1eabdc4651eb3b276f8feb1e447 Mon Sep 17 00:00:00 2001
+From: Nadav Amit <namit@vmware.com>
+Date: Tue, 19 Jun 2018 16:00:24 -0700
+Subject: vmw_balloon: fix inflation of 64-bit GFNs
+
+From: Nadav Amit <namit@vmware.com>
+
+commit 09755690c6b7c1eabdc4651eb3b276f8feb1e447 upstream.
+
+When balloon batching is not supported by the hypervisor, the guest
+frame number (GFN) must fit in 32-bit. However, due to a bug, this check
+was mistakenly ignored. In practice, when total RAM is greater than
+16TB, the balloon does not work currently, making this bug unlikely to
+happen.
+
+Fixes: ef0f8f112984 ("VMware balloon: partially inline vmballoon_reserve_page.")
+Cc: stable@vger.kernel.org
+Reviewed-by: Xavier Deguillard <xdeguillard@vmware.com>
+Signed-off-by: Nadav Amit <namit@vmware.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/misc/vmw_balloon.c | 13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
+
+--- a/drivers/misc/vmw_balloon.c
++++ b/drivers/misc/vmw_balloon.c
+@@ -450,7 +450,7 @@ static int vmballoon_send_lock_page(stru
+
+ pfn32 = (u32)pfn;
+ if (pfn32 != pfn)
+- return -1;
++ return -EINVAL;
+
+ STATS_INC(b->stats.lock[false]);
+
+@@ -460,7 +460,7 @@ static int vmballoon_send_lock_page(stru
+
+ pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
+ STATS_INC(b->stats.lock_fail[false]);
+- return 1;
++ return -EIO;
+ }
+
+ static int vmballoon_send_batched_lock(struct vmballoon *b,
+@@ -597,11 +597,12 @@ static int vmballoon_lock_page(struct vm
+
+ locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status,
+ target);
+- if (locked > 0) {
++ if (locked) {
+ STATS_INC(b->stats.refused_alloc[false]);
+
+- if (hv_status == VMW_BALLOON_ERROR_RESET ||
+- hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) {
++ if (locked == -EIO &&
++ (hv_status == VMW_BALLOON_ERROR_RESET ||
++ hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED)) {
+ vmballoon_free_page(page, false);
+ return -EIO;
+ }
+@@ -617,7 +618,7 @@ static int vmballoon_lock_page(struct vm
+ } else {
+ vmballoon_free_page(page, false);
+ }
+- return -EIO;
++ return locked;
+ }
+
+ /* track allocated page */
--- /dev/null
+From c3cc1b0fc27508da53fe955a3b23d03964410682 Mon Sep 17 00:00:00 2001
+From: Nadav Amit <namit@vmware.com>
+Date: Tue, 19 Jun 2018 16:00:27 -0700
+Subject: vmw_balloon: fix VMCI use when balloon built into kernel
+
+From: Nadav Amit <namit@vmware.com>
+
+commit c3cc1b0fc27508da53fe955a3b23d03964410682 upstream.
+
+Currently, when all modules, including VMCI and VMware balloon are built
+into the kernel, the initialization of the balloon happens before the
+VMCI is probed. As a result, the balloon fails to initialize the VMCI
+doorbell, which it uses to get asynchronous requests for balloon size
+changes.
+
+The problem can be seen in the logs, in the form of the following
+message:
+ "vmw_balloon: failed to initialize vmci doorbell"
+
+The driver would work correctly but slightly less efficiently, probing
+for requests periodically. This patch changes the balloon to be
+initialized using late_initcall() instead of module_init() to address
+this issue. It does not address a situation in which VMCI is built as a
+module and the balloon is built into the kernel.
+
+Fixes: 48e3d668b790 ("VMware balloon: Enable notification via VMCI")
+Cc: stable@vger.kernel.org
+Reviewed-by: Xavier Deguillard <xdeguillard@vmware.com>
+Signed-off-by: Nadav Amit <namit@vmware.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/misc/vmw_balloon.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/drivers/misc/vmw_balloon.c
++++ b/drivers/misc/vmw_balloon.c
+@@ -1297,7 +1297,14 @@ static int __init vmballoon_init(void)
+
+ return 0;
+ }
+-module_init(vmballoon_init);
++
++/*
++ * Using late_initcall() instead of module_init() allows the balloon to use the
++ * VMCI doorbell even when the balloon is built into the kernel. Otherwise the
++ * VMCI is probed only after the balloon is initialized. If the balloon is used
++ * as a module, late_initcall() is equivalent to module_init().
++ */
++late_initcall(vmballoon_init);
+
+ static void __exit vmballoon_exit(void)
+ {
--- /dev/null
+From ce664331b2487a5d244a51cbdd8cb54f866fbe5d Mon Sep 17 00:00:00 2001
+From: Nadav Amit <namit@vmware.com>
+Date: Tue, 19 Jun 2018 16:00:26 -0700
+Subject: vmw_balloon: VMCI_DOORBELL_SET does not check status
+
+From: Nadav Amit <namit@vmware.com>
+
+commit ce664331b2487a5d244a51cbdd8cb54f866fbe5d upstream.
+
+When vmballoon_vmci_init() sets a doorbell using VMCI_DOORBELL_SET, for
+some reason it does not consider the status and looks at the result.
+However, the hypervisor does not update the result - it updates the
+status. This might cause VMCI doorbell not to be enabled, resulting in
+degraded performance.
+
+Fixes: 48e3d668b790 ("VMware balloon: Enable notification via VMCI")
+Cc: stable@vger.kernel.org
+Reviewed-by: Xavier Deguillard <xdeguillard@vmware.com>
+Signed-off-by: Nadav Amit <namit@vmware.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/misc/vmw_balloon.c | 37 +++++++++++++++++++------------------
+ 1 file changed, 19 insertions(+), 18 deletions(-)
+
+--- a/drivers/misc/vmw_balloon.c
++++ b/drivers/misc/vmw_balloon.c
+@@ -1036,29 +1036,30 @@ static void vmballoon_vmci_cleanup(struc
+ */
+ static int vmballoon_vmci_init(struct vmballoon *b)
+ {
+- int error = 0;
++ unsigned long error, dummy;
+
+- if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) != 0) {
+- error = vmci_doorbell_create(&b->vmci_doorbell,
+- VMCI_FLAG_DELAYED_CB,
+- VMCI_PRIVILEGE_FLAG_RESTRICTED,
+- vmballoon_doorbell, b);
+-
+- if (error == VMCI_SUCCESS) {
+- VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET,
+- b->vmci_doorbell.context,
+- b->vmci_doorbell.resource, error);
+- STATS_INC(b->stats.doorbell_set);
+- }
+- }
++ if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0)
++ return 0;
+
+- if (error != 0) {
+- vmballoon_vmci_cleanup(b);
++ error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB,
++ VMCI_PRIVILEGE_FLAG_RESTRICTED,
++ vmballoon_doorbell, b);
+
+- return -EIO;
+- }
++ if (error != VMCI_SUCCESS)
++ goto fail;
++
++ error = VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET, b->vmci_doorbell.context,
++ b->vmci_doorbell.resource, dummy);
++
++ STATS_INC(b->stats.doorbell_set);
++
++ if (error != VMW_BALLOON_SUCCESS)
++ goto fail;
+
+ return 0;
++fail:
++ vmballoon_vmci_cleanup(b);
++ return -EIO;
+ }
+
+ /*