--- /dev/null
+From 109728ccc5933151c68d1106e4065478a487a323 Mon Sep 17 00:00:00 2001
+From: Kirill Tkhai <ktkhai@virtuozzo.com>
+Date: Thu, 19 Jul 2018 15:49:39 +0300
+Subject: fuse: Add missed unlock_page() to fuse_readpages_fill()
+
+From: Kirill Tkhai <ktkhai@virtuozzo.com>
+
+commit 109728ccc5933151c68d1106e4065478a487a323 upstream.
+
+The above error path returns with page unlocked, so this place seems also
+to behave the same.
+
+Fixes: f8dbdf81821b ("fuse: rework fuse_readpages()")
+Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/fuse/file.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -866,6 +866,7 @@ static int fuse_readpages_fill(void *_da
+ }
+
+ if (WARN_ON(req->num_pages >= req->max_pages)) {
++ unlock_page(page);
+ fuse_put_request(fc, req);
+ return -EIO;
+ }
--- /dev/null
+From a2477b0e67c52f4364a47c3ad70902bc2a61bd4c Mon Sep 17 00:00:00 2001
+From: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Date: Tue, 17 Jul 2018 19:00:33 +0300
+Subject: fuse: Don't access pipe->buffers without pipe_lock()
+
+From: Andrey Ryabinin <aryabinin@virtuozzo.com>
+
+commit a2477b0e67c52f4364a47c3ad70902bc2a61bd4c upstream.
+
+fuse_dev_splice_write() reads pipe->buffers to determine the size of
+'bufs' array before taking the pipe_lock(). This is not safe as
+another thread might change the 'pipe->buffers' between the allocation
+and taking the pipe_lock(). So we end up with too small 'bufs' array.
+
+Move the bufs allocations inside pipe_lock()/pipe_unlock() to fix this.
+
+Fixes: dd3bb14f44a6 ("fuse: support splice() writing to fuse device")
+Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Cc: <stable@vger.kernel.org> # v2.6.35
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/fuse/dev.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -1944,12 +1944,15 @@ static ssize_t fuse_dev_splice_write(str
+ if (!fud)
+ return -EPERM;
+
++ pipe_lock(pipe);
++
+ bufs = kmalloc_array(pipe->buffers, sizeof(struct pipe_buffer),
+ GFP_KERNEL);
+- if (!bufs)
++ if (!bufs) {
++ pipe_unlock(pipe);
+ return -ENOMEM;
++ }
+
+- pipe_lock(pipe);
+ nbuf = 0;
+ rem = 0;
+ for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
--- /dev/null
+From 87114373ea507895a62afb10d2910bd9adac35a8 Mon Sep 17 00:00:00 2001
+From: Miklos Szeredi <mszeredi@redhat.com>
+Date: Thu, 26 Jul 2018 16:13:11 +0200
+Subject: fuse: fix double request_end()
+
+From: Miklos Szeredi <mszeredi@redhat.com>
+
+commit 87114373ea507895a62afb10d2910bd9adac35a8 upstream.
+
+Refcounting of request is broken when fuse_abort_conn() is called and
+request is on the fpq->io list:
+
+ - ref is taken too late
+ - then it is not dropped
+
+Fixes: 0d8e84b0432b ("fuse: simplify request abort")
+Cc: <stable@vger.kernel.org> # v4.2
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/fuse/dev.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -371,7 +371,7 @@ static void request_end(struct fuse_conn
+ struct fuse_iqueue *fiq = &fc->iq;
+
+ if (test_and_set_bit(FR_FINISHED, &req->flags))
+- return;
++ goto out_put_req;
+
+ spin_lock(&fiq->waitq.lock);
+ list_del_init(&req->intr_entry);
+@@ -400,6 +400,7 @@ static void request_end(struct fuse_conn
+ wake_up(&req->waitq);
+ if (req->end)
+ req->end(fc, req);
++out_put_req:
+ fuse_put_request(fc, req);
+ }
+
+@@ -2108,6 +2109,7 @@ void fuse_abort_conn(struct fuse_conn *f
+ set_bit(FR_ABORTED, &req->flags);
+ if (!test_bit(FR_LOCKED, &req->flags)) {
+ set_bit(FR_PRIVATE, &req->flags);
++ __fuse_get_request(req);
+ list_move(&req->list, &to_end1);
+ }
+ spin_unlock(&req->waitq.lock);
+@@ -2134,7 +2136,6 @@ void fuse_abort_conn(struct fuse_conn *f
+
+ while (!list_empty(&to_end1)) {
+ req = list_first_entry(&to_end1, struct fuse_req, list);
+- __fuse_get_request(req);
+ list_del_init(&req->list);
+ request_end(fc, req);
+ }
--- /dev/null
+From 63576c13bd17848376c8ba4a98f5d5151140c4ac Mon Sep 17 00:00:00 2001
+From: Miklos Szeredi <mszeredi@redhat.com>
+Date: Thu, 26 Jul 2018 16:13:11 +0200
+Subject: fuse: fix initial parallel dirops
+
+From: Miklos Szeredi <mszeredi@redhat.com>
+
+commit 63576c13bd17848376c8ba4a98f5d5151140c4ac upstream.
+
+If parallel dirops are enabled in FUSE_INIT reply, then first operation may
+leave fi->mutex held.
+
+Reported-by: syzbot <syzbot+3f7b29af1baa9d0a55be@syzkaller.appspotmail.com>
+Fixes: 5c672ab3f0ee ("fuse: serialize dirops by default")
+Cc: <stable@vger.kernel.org> # v4.7
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/fuse/dir.c | 10 ++++++----
+ fs/fuse/fuse_i.h | 4 ++--
+ fs/fuse/inode.c | 14 ++++++++++----
+ 3 files changed, 18 insertions(+), 10 deletions(-)
+
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -355,11 +355,12 @@ static struct dentry *fuse_lookup(struct
+ struct inode *inode;
+ struct dentry *newent;
+ bool outarg_valid = true;
++ bool locked;
+
+- fuse_lock_inode(dir);
++ locked = fuse_lock_inode(dir);
+ err = fuse_lookup_name(dir->i_sb, get_node_id(dir), &entry->d_name,
+ &outarg, &inode);
+- fuse_unlock_inode(dir);
++ fuse_unlock_inode(dir, locked);
+ if (err == -ENOENT) {
+ outarg_valid = false;
+ err = 0;
+@@ -1340,6 +1341,7 @@ static int fuse_readdir(struct file *fil
+ struct fuse_conn *fc = get_fuse_conn(inode);
+ struct fuse_req *req;
+ u64 attr_version = 0;
++ bool locked;
+
+ if (is_bad_inode(inode))
+ return -EIO;
+@@ -1367,9 +1369,9 @@ static int fuse_readdir(struct file *fil
+ fuse_read_fill(req, file, ctx->pos, PAGE_SIZE,
+ FUSE_READDIR);
+ }
+- fuse_lock_inode(inode);
++ locked = fuse_lock_inode(inode);
+ fuse_request_send(fc, req);
+- fuse_unlock_inode(inode);
++ fuse_unlock_inode(inode, locked);
+ nbytes = req->out.args[0].size;
+ err = req->out.h.error;
+ fuse_put_request(fc, req);
+--- a/fs/fuse/fuse_i.h
++++ b/fs/fuse/fuse_i.h
+@@ -974,8 +974,8 @@ int fuse_do_setattr(struct dentry *dentr
+
+ void fuse_set_initialized(struct fuse_conn *fc);
+
+-void fuse_unlock_inode(struct inode *inode);
+-void fuse_lock_inode(struct inode *inode);
++void fuse_unlock_inode(struct inode *inode, bool locked);
++bool fuse_lock_inode(struct inode *inode);
+
+ int fuse_setxattr(struct inode *inode, const char *name, const void *value,
+ size_t size, int flags);
+--- a/fs/fuse/inode.c
++++ b/fs/fuse/inode.c
+@@ -357,15 +357,21 @@ int fuse_reverse_inval_inode(struct supe
+ return 0;
+ }
+
+-void fuse_lock_inode(struct inode *inode)
++bool fuse_lock_inode(struct inode *inode)
+ {
+- if (!get_fuse_conn(inode)->parallel_dirops)
++ bool locked = false;
++
++ if (!get_fuse_conn(inode)->parallel_dirops) {
+ mutex_lock(&get_fuse_inode(inode)->mutex);
++ locked = true;
++ }
++
++ return locked;
+ }
+
+-void fuse_unlock_inode(struct inode *inode)
++void fuse_unlock_inode(struct inode *inode, bool locked)
+ {
+- if (!get_fuse_conn(inode)->parallel_dirops)
++ if (locked)
+ mutex_unlock(&get_fuse_inode(inode)->mutex);
+ }
+
--- /dev/null
+From e8f3bd773d22f488724dffb886a1618da85c2966 Mon Sep 17 00:00:00 2001
+From: Miklos Szeredi <mszeredi@redhat.com>
+Date: Thu, 26 Jul 2018 16:13:11 +0200
+Subject: fuse: Fix oops at process_init_reply()
+
+From: Miklos Szeredi <mszeredi@redhat.com>
+
+commit e8f3bd773d22f488724dffb886a1618da85c2966 upstream.
+
+syzbot is hitting NULL pointer dereference at process_init_reply().
+This is because deactivate_locked_super() is called before response for
+initial request is processed.
+
+Fix this by aborting and waiting for all requests (including FUSE_INIT)
+before resetting fc->sb.
+
+Original patch by Tetsuo Handa <penguin-kernel@I-love.SKAURA.ne.jp>.
+
+Reported-by: syzbot <syzbot+b62f08f4d5857755e3bc@syzkaller.appspotmail.com>
+Fixes: e27c9d3877a0 ("fuse: fuse: add time_gran to INIT_OUT")
+Cc: <stable@vger.kernel.org> # v3.19
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/fuse/inode.c | 25 +++++++++++--------------
+ 1 file changed, 11 insertions(+), 14 deletions(-)
+
+--- a/fs/fuse/inode.c
++++ b/fs/fuse/inode.c
+@@ -397,11 +397,6 @@ static void fuse_put_super(struct super_
+ {
+ struct fuse_conn *fc = get_fuse_conn_super(sb);
+
+- fuse_send_destroy(fc);
+-
+- fuse_abort_conn(fc, false);
+- fuse_wait_aborted(fc);
+-
+ mutex_lock(&fuse_mutex);
+ list_del(&fc->entry);
+ fuse_ctl_remove_conn(fc);
+@@ -1218,16 +1213,25 @@ static struct dentry *fuse_mount(struct
+ return mount_nodev(fs_type, flags, raw_data, fuse_fill_super);
+ }
+
+-static void fuse_kill_sb_anon(struct super_block *sb)
++static void fuse_sb_destroy(struct super_block *sb)
+ {
+ struct fuse_conn *fc = get_fuse_conn_super(sb);
+
+ if (fc) {
++ fuse_send_destroy(fc);
++
++ fuse_abort_conn(fc, false);
++ fuse_wait_aborted(fc);
++
+ down_write(&fc->killsb);
+ fc->sb = NULL;
+ up_write(&fc->killsb);
+ }
++}
+
++static void fuse_kill_sb_anon(struct super_block *sb)
++{
++ fuse_sb_destroy(sb);
+ kill_anon_super(sb);
+ }
+
+@@ -1250,14 +1254,7 @@ static struct dentry *fuse_mount_blk(str
+
+ static void fuse_kill_sb_blk(struct super_block *sb)
+ {
+- struct fuse_conn *fc = get_fuse_conn_super(sb);
+-
+- if (fc) {
+- down_write(&fc->killsb);
+- fc->sb = NULL;
+- up_write(&fc->killsb);
+- }
+-
++ fuse_sb_destroy(sb);
+ kill_block_super(sb);
+ }
+
--- /dev/null
+From 45ff350bbd9d0f0977ff270a0d427c71520c0c37 Mon Sep 17 00:00:00 2001
+From: Miklos Szeredi <mszeredi@redhat.com>
+Date: Thu, 26 Jul 2018 16:13:11 +0200
+Subject: fuse: fix unlocked access to processing queue
+
+From: Miklos Szeredi <mszeredi@redhat.com>
+
+commit 45ff350bbd9d0f0977ff270a0d427c71520c0c37 upstream.
+
+fuse_dev_release() assumes that it's the only one referencing the
+fpq->processing list, but that's not true, since fuse_abort_conn() can be
+doing the same without any serialization between the two.
+
+Fixes: c3696046beb3 ("fuse: separate pqueue for clones")
+Cc: <stable@vger.kernel.org> # v4.2
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/fuse/dev.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -2153,9 +2153,15 @@ int fuse_dev_release(struct inode *inode
+ if (fud) {
+ struct fuse_conn *fc = fud->fc;
+ struct fuse_pqueue *fpq = &fud->pq;
++ LIST_HEAD(to_end);
+
++ spin_lock(&fpq->lock);
+ WARN_ON(!list_empty(&fpq->io));
+- end_requests(fc, &fpq->processing);
++ list_splice_init(&fpq->processing, &to_end);
++ spin_unlock(&fpq->lock);
++
++ end_requests(fc, &to_end);
++
+ /* Are we the last open device? */
+ if (atomic_dec_and_test(&fc->dev_count)) {
+ WARN_ON(fc->iq.fasync != NULL);
--- /dev/null
+From b8f95e5d13f5f0191dcb4b9113113d241636e7cb Mon Sep 17 00:00:00 2001
+From: Miklos Szeredi <mszeredi@redhat.com>
+Date: Thu, 26 Jul 2018 16:13:11 +0200
+Subject: fuse: umount should wait for all requests
+
+From: Miklos Szeredi <mszeredi@redhat.com>
+
+commit b8f95e5d13f5f0191dcb4b9113113d241636e7cb upstream.
+
+fuse_abort_conn() does not guarantee that all async requests have actually
+finished aborting (i.e. their ->end() function is called). This could
+actually result in still used inodes after umount.
+
+Add a helper to wait until all requests are fully done. This is done by
+looking at the "num_waiting" counter. When this counter drops to zero, we
+can be sure that no more requests are outstanding.
+
+Fixes: 0d8e84b0432b ("fuse: simplify request abort")
+Cc: <stable@vger.kernel.org> # v4.2
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/fuse/dev.c | 23 +++++++++++++++++++----
+ fs/fuse/fuse_i.h | 1 +
+ fs/fuse/inode.c | 2 ++
+ 3 files changed, 22 insertions(+), 4 deletions(-)
+
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -127,6 +127,16 @@ static bool fuse_block_alloc(struct fuse
+ return !fc->initialized || (for_background && fc->blocked);
+ }
+
++static void fuse_drop_waiting(struct fuse_conn *fc)
++{
++ if (fc->connected) {
++ atomic_dec(&fc->num_waiting);
++ } else if (atomic_dec_and_test(&fc->num_waiting)) {
++ /* wake up aborters */
++ wake_up_all(&fc->blocked_waitq);
++ }
++}
++
+ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
+ bool for_background)
+ {
+@@ -175,7 +185,7 @@ static struct fuse_req *__fuse_get_req(s
+ return req;
+
+ out:
+- atomic_dec(&fc->num_waiting);
++ fuse_drop_waiting(fc);
+ return ERR_PTR(err);
+ }
+
+@@ -285,7 +295,7 @@ void fuse_put_request(struct fuse_conn *
+
+ if (test_bit(FR_WAITING, &req->flags)) {
+ __clear_bit(FR_WAITING, &req->flags);
+- atomic_dec(&fc->num_waiting);
++ fuse_drop_waiting(fc);
+ }
+
+ if (req->stolen_file)
+@@ -371,7 +381,7 @@ static void request_end(struct fuse_conn
+ struct fuse_iqueue *fiq = &fc->iq;
+
+ if (test_and_set_bit(FR_FINISHED, &req->flags))
+- goto out_put_req;
++ goto put_request;
+
+ spin_lock(&fiq->waitq.lock);
+ list_del_init(&req->intr_entry);
+@@ -400,7 +410,7 @@ static void request_end(struct fuse_conn
+ wake_up(&req->waitq);
+ if (req->end)
+ req->end(fc, req);
+-out_put_req:
++put_request:
+ fuse_put_request(fc, req);
+ }
+
+@@ -2146,6 +2156,11 @@ void fuse_abort_conn(struct fuse_conn *f
+ }
+ EXPORT_SYMBOL_GPL(fuse_abort_conn);
+
++void fuse_wait_aborted(struct fuse_conn *fc)
++{
++ wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0);
++}
++
+ int fuse_dev_release(struct inode *inode, struct file *file)
+ {
+ struct fuse_dev *fud = fuse_get_dev(file);
+--- a/fs/fuse/fuse_i.h
++++ b/fs/fuse/fuse_i.h
+@@ -862,6 +862,7 @@ void fuse_request_send_background_locked
+
+ /* Abort all requests */
+ void fuse_abort_conn(struct fuse_conn *fc, bool is_abort);
++void fuse_wait_aborted(struct fuse_conn *fc);
+
+ /**
+ * Invalidate inode attributes
+--- a/fs/fuse/inode.c
++++ b/fs/fuse/inode.c
+@@ -400,6 +400,8 @@ static void fuse_put_super(struct super_
+ fuse_send_destroy(fc);
+
+ fuse_abort_conn(fc, false);
++ fuse_wait_aborted(fc);
++
+ mutex_lock(&fuse_mutex);
+ list_del(&fc->entry);
+ fuse_ctl_remove_conn(fc);
--- /dev/null
+From 44883f01fe6ae436a8604c47d8435276fef369b0 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Thu, 26 Jul 2018 13:01:52 +0200
+Subject: KVM: x86: ensure all MSRs can always be KVM_GET/SET_MSR'd
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 44883f01fe6ae436a8604c47d8435276fef369b0 upstream.
+
+Some of the MSRs returned by GET_MSR_INDEX_LIST currently cannot be sent back
+to KVM_GET_MSR and/or KVM_SET_MSR; either they can never be sent back, or you
+they are only accepted under special conditions. This makes the API a pain to
+use.
+
+To avoid this pain, this patch makes it so that the result of the get-list
+ioctl can always be used for host-initiated get and set. Since we don't have
+a separate way to check for read-only MSRs, this means some Hyper-V MSRs are
+ignored when written. Arguably they should not even be in the result of
+GET_MSR_INDEX_LIST, but I am leaving there in case userspace is using the
+outcome of GET_MSR_INDEX_LIST to derive the support for the corresponding
+Hyper-V feature.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/hyperv.c | 27 ++++++++++++++++++++-------
+ arch/x86/kvm/hyperv.h | 2 +-
+ arch/x86/kvm/x86.c | 15 +++++++++------
+ 3 files changed, 30 insertions(+), 14 deletions(-)
+
+--- a/arch/x86/kvm/hyperv.c
++++ b/arch/x86/kvm/hyperv.c
+@@ -235,7 +235,7 @@ static int synic_set_msr(struct kvm_vcpu
+ struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
+ int ret;
+
+- if (!synic->active)
++ if (!synic->active && !host)
+ return 1;
+
+ trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host);
+@@ -295,11 +295,12 @@ static int synic_set_msr(struct kvm_vcpu
+ return ret;
+ }
+
+-static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata)
++static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata,
++ bool host)
+ {
+ int ret;
+
+- if (!synic->active)
++ if (!synic->active && !host)
+ return 1;
+
+ ret = 0;
+@@ -1014,6 +1015,11 @@ static int kvm_hv_set_msr_pw(struct kvm_
+ case HV_X64_MSR_TSC_EMULATION_STATUS:
+ hv->hv_tsc_emulation_status = data;
+ break;
++ case HV_X64_MSR_TIME_REF_COUNT:
++ /* read-only, but still ignore it if host-initiated */
++ if (!host)
++ return 1;
++ break;
+ default:
+ vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
+ msr, data);
+@@ -1101,6 +1107,12 @@ static int kvm_hv_set_msr(struct kvm_vcp
+ return stimer_set_count(vcpu_to_stimer(vcpu, timer_index),
+ data, host);
+ }
++ case HV_X64_MSR_TSC_FREQUENCY:
++ case HV_X64_MSR_APIC_FREQUENCY:
++ /* read-only, but still ignore it if host-initiated */
++ if (!host)
++ return 1;
++ break;
+ default:
+ vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
+ msr, data);
+@@ -1156,7 +1168,8 @@ static int kvm_hv_get_msr_pw(struct kvm_
+ return 0;
+ }
+
+-static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
++static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
++ bool host)
+ {
+ u64 data = 0;
+ struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
+@@ -1183,7 +1196,7 @@ static int kvm_hv_get_msr(struct kvm_vcp
+ case HV_X64_MSR_SIMP:
+ case HV_X64_MSR_EOM:
+ case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
+- return synic_get_msr(vcpu_to_synic(vcpu), msr, pdata);
++ return synic_get_msr(vcpu_to_synic(vcpu), msr, pdata, host);
+ case HV_X64_MSR_STIMER0_CONFIG:
+ case HV_X64_MSR_STIMER1_CONFIG:
+ case HV_X64_MSR_STIMER2_CONFIG:
+@@ -1229,7 +1242,7 @@ int kvm_hv_set_msr_common(struct kvm_vcp
+ return kvm_hv_set_msr(vcpu, msr, data, host);
+ }
+
+-int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
++int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
+ {
+ if (kvm_hv_msr_partition_wide(msr)) {
+ int r;
+@@ -1239,7 +1252,7 @@ int kvm_hv_get_msr_common(struct kvm_vcp
+ mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
+ return r;
+ } else
+- return kvm_hv_get_msr(vcpu, msr, pdata);
++ return kvm_hv_get_msr(vcpu, msr, pdata, host);
+ }
+
+ static __always_inline int get_sparse_bank_no(u64 valid_bank_mask, int bank_no)
+--- a/arch/x86/kvm/hyperv.h
++++ b/arch/x86/kvm/hyperv.h
+@@ -48,7 +48,7 @@ static inline struct kvm_vcpu *synic_to_
+ }
+
+ int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host);
+-int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
++int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host);
+
+ bool kvm_hv_hypercall_enabled(struct kvm *kvm);
+ int kvm_hv_hypercall(struct kvm_vcpu *vcpu);
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -2185,10 +2185,11 @@ static int set_msr_mce(struct kvm_vcpu *
+ vcpu->arch.mcg_status = data;
+ break;
+ case MSR_IA32_MCG_CTL:
+- if (!(mcg_cap & MCG_CTL_P))
++ if (!(mcg_cap & MCG_CTL_P) &&
++ (data || !msr_info->host_initiated))
+ return 1;
+ if (data != 0 && data != ~(u64)0)
+- return -1;
++ return 1;
+ vcpu->arch.mcg_ctl = data;
+ break;
+ default:
+@@ -2576,7 +2577,7 @@ int kvm_get_msr(struct kvm_vcpu *vcpu, s
+ }
+ EXPORT_SYMBOL_GPL(kvm_get_msr);
+
+-static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
++static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
+ {
+ u64 data;
+ u64 mcg_cap = vcpu->arch.mcg_cap;
+@@ -2591,7 +2592,7 @@ static int get_msr_mce(struct kvm_vcpu *
+ data = vcpu->arch.mcg_cap;
+ break;
+ case MSR_IA32_MCG_CTL:
+- if (!(mcg_cap & MCG_CTL_P))
++ if (!(mcg_cap & MCG_CTL_P) && !host)
+ return 1;
+ data = vcpu->arch.mcg_ctl;
+ break;
+@@ -2724,7 +2725,8 @@ int kvm_get_msr_common(struct kvm_vcpu *
+ case MSR_IA32_MCG_CTL:
+ case MSR_IA32_MCG_STATUS:
+ case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
+- return get_msr_mce(vcpu, msr_info->index, &msr_info->data);
++ return get_msr_mce(vcpu, msr_info->index, &msr_info->data,
++ msr_info->host_initiated);
+ case MSR_K7_CLK_CTL:
+ /*
+ * Provide expected ramp-up count for K7. All other
+@@ -2745,7 +2747,8 @@ int kvm_get_msr_common(struct kvm_vcpu *
+ case HV_X64_MSR_TSC_EMULATION_CONTROL:
+ case HV_X64_MSR_TSC_EMULATION_STATUS:
+ return kvm_hv_get_msr_common(vcpu,
+- msr_info->index, &msr_info->data);
++ msr_info->index, &msr_info->data,
++ msr_info->host_initiated);
+ break;
+ case MSR_IA32_BBL_CR_CTL3:
+ /* This legacy MSR exists but isn't fully documented in current
--- /dev/null
+From 024d83cadc6b2af027e473720f3c3da97496c318 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
+Date: Sun, 12 Aug 2018 20:41:45 +0200
+Subject: KVM: x86: SVM: Call x86_spec_ctrl_set_guest/host() with interrupts disabled
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
+
+commit 024d83cadc6b2af027e473720f3c3da97496c318 upstream.
+
+Mikhail reported the following lockdep splat:
+
+WARNING: possible irq lock inversion dependency detected
+CPU 0/KVM/10284 just changed the state of lock:
+ 000000000d538a88 (&st->lock){+...}, at:
+ speculative_store_bypass_update+0x10b/0x170
+
+but this lock was taken by another, HARDIRQ-safe lock
+in the past:
+
+(&(&sighand->siglock)->rlock){-.-.}
+
+ and interrupts could create inverse lock ordering between them.
+
+Possible interrupt unsafe locking scenario:
+
+ CPU0 CPU1
+ ---- ----
+ lock(&st->lock);
+ local_irq_disable();
+ lock(&(&sighand->siglock)->rlock);
+ lock(&st->lock);
+ <Interrupt>
+ lock(&(&sighand->siglock)->rlock);
+ *** DEADLOCK ***
+
+The code path which connects those locks is:
+
+ speculative_store_bypass_update()
+ ssb_prctl_set()
+ do_seccomp()
+ do_syscall_64()
+
+In svm_vcpu_run() speculative_store_bypass_update() is called with
+interupts enabled via x86_virt_spec_ctrl_set_guest/host().
+
+This is actually a false positive, because GIF=0 so interrupts are
+disabled even if IF=1; however, we can easily move the invocations of
+x86_virt_spec_ctrl_set_guest/host() into the interrupt disabled region to
+cure it, and it's a good idea to keep the GIF=0/IF=1 area as small
+and self-contained as possible.
+
+Fixes: 1f50ddb4f418 ("x86/speculation: Handle HT correctly on AMD")
+Reported-by: Mikhail Gavrilov <mikhail.v.gavrilov@gmail.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Mikhail Gavrilov <mikhail.v.gavrilov@gmail.com>
+Cc: Joerg Roedel <joro@8bytes.org>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Radim Krčmář <rkrcmar@redhat.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: kvm@vger.kernel.org
+Cc: x86@kernel.org
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/svm.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -5580,8 +5580,6 @@ static void svm_vcpu_run(struct kvm_vcpu
+
+ clgi();
+
+- local_irq_enable();
+-
+ /*
+ * If this vCPU has touched SPEC_CTRL, restore the guest's value if
+ * it's non-zero. Since vmentry is serialising on affected CPUs, there
+@@ -5590,6 +5588,8 @@ static void svm_vcpu_run(struct kvm_vcpu
+ */
+ x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
+
++ local_irq_enable();
++
+ asm volatile (
+ "push %%" _ASM_BP "; \n\t"
+ "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t"
+@@ -5712,12 +5712,12 @@ static void svm_vcpu_run(struct kvm_vcpu
+ if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
+ svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
+
+- x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
+-
+ reload_tss(vcpu);
+
+ local_irq_disable();
+
++ x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
++
+ vcpu->arch.cr2 = svm->vmcb->save.cr2;
+ vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
+ vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
--- /dev/null
+From d86564a2f085b79ec046a5cba90188e612352806 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Wed, 22 Aug 2018 17:30:15 +0200
+Subject: mm/tlb, x86/mm: Support invalidating TLB caches for RCU_TABLE_FREE
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit d86564a2f085b79ec046a5cba90188e612352806 upstream.
+
+Jann reported that x86 was missing required TLB invalidates when he
+hit the !*batch slow path in tlb_remove_table().
+
+This is indeed the case; RCU_TABLE_FREE does not provide TLB (cache)
+invalidates, the PowerPC-hash where this code originated and the
+Sparc-hash where this was subsequently used did not need that. ARM
+which later used this put an explicit TLB invalidate in their
+__p*_free_tlb() functions, and PowerPC-radix followed that example.
+
+But when we hooked up x86 we failed to consider this. Fix this by
+(optionally) hooking tlb_remove_table() into the TLB invalidate code.
+
+NOTE: s390 was also needing something like this and might now
+ be able to use the generic code again.
+
+[ Modified to be on top of Nick's cleanups, which simplified this patch
+ now that tlb_flush_mmu_tlbonly() really only flushes the TLB - Linus ]
+
+Fixes: 9e52fc2b50de ("x86/mm: Enable RCU based page table freeing (CONFIG_HAVE_RCU_TABLE_FREE=y)")
+Reported-by: Jann Horn <jannh@google.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Acked-by: Rik van Riel <riel@surriel.com>
+Cc: Nicholas Piggin <npiggin@gmail.com>
+Cc: David Miller <davem@davemloft.net>
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Cc: Michael Ellerman <mpe@ellerman.id.au>
+Cc: stable@kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/Kconfig | 3 +++
+ arch/x86/Kconfig | 1 +
+ mm/memory.c | 18 ++++++++++++++++++
+ 3 files changed, 22 insertions(+)
+
+--- a/arch/Kconfig
++++ b/arch/Kconfig
+@@ -354,6 +354,9 @@ config HAVE_ARCH_JUMP_LABEL
+ config HAVE_RCU_TABLE_FREE
+ bool
+
++config HAVE_RCU_TABLE_INVALIDATE
++ bool
++
+ config ARCH_HAVE_NMI_SAFE_CMPXCHG
+ bool
+
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -179,6 +179,7 @@ config X86
+ select HAVE_PERF_REGS
+ select HAVE_PERF_USER_STACK_DUMP
+ select HAVE_RCU_TABLE_FREE
++ select HAVE_RCU_TABLE_INVALIDATE if HAVE_RCU_TABLE_FREE
+ select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_RELIABLE_STACKTRACE if X86_64 && UNWINDER_FRAME_POINTER && STACK_VALIDATION
+ select HAVE_STACKPROTECTOR if CC_HAS_SANE_STACKPROTECTOR
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -330,6 +330,21 @@ bool __tlb_remove_page_size(struct mmu_g
+ * See the comment near struct mmu_table_batch.
+ */
+
++/*
++ * If we want tlb_remove_table() to imply TLB invalidates.
++ */
++static inline void tlb_table_invalidate(struct mmu_gather *tlb)
++{
++#ifdef CONFIG_HAVE_RCU_TABLE_INVALIDATE
++ /*
++ * Invalidate page-table caches used by hardware walkers. Then we still
++ * need to RCU-sched wait while freeing the pages because software
++ * walkers can still be in-flight.
++ */
++ tlb_flush_mmu_tlbonly(tlb);
++#endif
++}
++
+ static void tlb_remove_table_smp_sync(void *arg)
+ {
+ /* Simply deliver the interrupt */
+@@ -366,6 +381,7 @@ void tlb_table_flush(struct mmu_gather *
+ struct mmu_table_batch **batch = &tlb->batch;
+
+ if (*batch) {
++ tlb_table_invalidate(tlb);
+ call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
+ *batch = NULL;
+ }
+@@ -387,11 +403,13 @@ void tlb_remove_table(struct mmu_gather
+ if (*batch == NULL) {
+ *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
+ if (*batch == NULL) {
++ tlb_table_invalidate(tlb);
+ tlb_remove_table_one(table);
+ return;
+ }
+ (*batch)->nr = 0;
+ }
++
+ (*batch)->tables[(*batch)->nr++] = table;
+ if ((*batch)->nr == MAX_TABLE_BATCH)
+ tlb_table_flush(tlb);
--- /dev/null
+From 58e73aa177850babb947555257fd4f79e5275cf1 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Fri, 22 Jun 2018 10:59:17 +0200
+Subject: platform/x86: ideapad-laptop: Apply no_hw_rfkill to Y20-15IKBM, too
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit 58e73aa177850babb947555257fd4f79e5275cf1 upstream.
+
+The commit 5d9f40b56630 ("platform/x86: ideapad-laptop: Add
+Y520-15IKBN to no_hw_rfkill") added the entry for Y20-15IKBN, and it
+turned out that another variant, Y20-15IKBM, also requires the
+no_hw_rfkill.
+
+Trim the last letter from the string so that it matches to both
+Y20-15IKBN and Y20-15IKBM models.
+
+Bugzilla: https://bugzilla.opensuse.org/show_bug.cgi?id=1098626
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Darren Hart (VMware) <dvhart@infradead.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/platform/x86/ideapad-laptop.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/platform/x86/ideapad-laptop.c
++++ b/drivers/platform/x86/ideapad-laptop.c
+@@ -1133,10 +1133,10 @@ static const struct dmi_system_id no_hw_
+ },
+ },
+ {
+- .ident = "Lenovo Legion Y520-15IKBN",
++ .ident = "Lenovo Legion Y520-15IKB",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Y520-15IKBN"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Y520-15IKB"),
+ },
+ },
+ {
--- /dev/null
+From 6fb741076a9fc53355e56dff2e2b46782b3a46d3 Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Wed, 20 Jun 2018 14:31:41 -0700
+Subject: platform/x86: wmi: Do not mix pages and kmalloc
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Kees Cook <keescook@chromium.org>
+
+commit 6fb741076a9fc53355e56dff2e2b46782b3a46d3 upstream.
+
+The probe handler_data was being allocated with __get_free_pages()
+for no reason I could find. The error path was using kfree(). Since
+other things are happily using kmalloc() in the probe path, switch to
+kmalloc() entirely. This fixes the error path mismatch and will avoid
+issues with CONFIG_HARDENED_USERCOPY_PAGESPAN=y.
+
+Reported-by: Mihai Donțu <mihai.dontu@gmail.com>
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Acked-by: Mario Limonciello <Mario.limonciello@dell.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Darren Hart (VMware) <dvhart@infradead.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/platform/x86/wmi.c | 9 +++------
+ 1 file changed, 3 insertions(+), 6 deletions(-)
+
+--- a/drivers/platform/x86/wmi.c
++++ b/drivers/platform/x86/wmi.c
+@@ -895,7 +895,6 @@ static int wmi_dev_probe(struct device *
+ struct wmi_driver *wdriver =
+ container_of(dev->driver, struct wmi_driver, driver);
+ int ret = 0;
+- int count;
+ char *buf;
+
+ if (ACPI_FAILURE(wmi_method_enable(wblock, 1)))
+@@ -917,9 +916,8 @@ static int wmi_dev_probe(struct device *
+ goto probe_failure;
+ }
+
+- count = get_order(wblock->req_buf_size);
+- wblock->handler_data = (void *)__get_free_pages(GFP_KERNEL,
+- count);
++ wblock->handler_data = kmalloc(wblock->req_buf_size,
++ GFP_KERNEL);
+ if (!wblock->handler_data) {
+ ret = -ENOMEM;
+ goto probe_failure;
+@@ -964,8 +962,7 @@ static int wmi_dev_remove(struct device
+ if (wdriver->filter_callback) {
+ misc_deregister(&wblock->char_dev);
+ kfree(wblock->char_dev.name);
+- free_pages((unsigned long)wblock->handler_data,
+- get_order(wblock->req_buf_size));
++ kfree(wblock->handler_data);
+ }
+
+ if (wdriver->remove)
ext4-reset-error-code-in-ext4_find_entry-in-fallback.patch
ext4-fix-race-when-setting-the-bitmap-corrupted-flag.patch
nvme-pci-add-a-memory-barrier-to-nvme_dbbuf_update_and_check_event.patch
+x86-gpu-reserve-icl-s-graphics-stolen-memory.patch
+platform-x86-wmi-do-not-mix-pages-and-kmalloc.patch
+platform-x86-ideapad-laptop-apply-no_hw_rfkill-to-y20-15ikbm-too.patch
+mm-tlb-x86-mm-support-invalidating-tlb-caches-for-rcu_table_free.patch
+x86-speculation-l1tf-fix-overflow-in-l1tf_pfn_limit-on-32bit.patch
+x86-speculation-l1tf-fix-off-by-one-error-when-warning-that-system-has-too-much-ram.patch
+x86-speculation-l1tf-suggest-what-to-do-on-systems-with-too-much-ram.patch
+x86-vdso-fix-vdso-build-if-a-retpoline-is-emitted.patch
+x86-process-re-export-start_thread.patch
+kvm-x86-ensure-all-msrs-can-always-be-kvm_get-set_msr-d.patch
+kvm-x86-svm-call-x86_spec_ctrl_set_guest-host-with-interrupts-disabled.patch
+fuse-don-t-access-pipe-buffers-without-pipe_lock.patch
+fuse-fix-initial-parallel-dirops.patch
+fuse-fix-double-request_end.patch
+fuse-fix-unlocked-access-to-processing-queue.patch
+fuse-umount-should-wait-for-all-requests.patch
+fuse-fix-oops-at-process_init_reply.patch
+fuse-add-missed-unlock_page-to-fuse_readpages_fill.patch
+x86-kvm-avoid-unused-variable-warning.patch
--- /dev/null
+From db0c8d8b031d2b5960f6407f7f2ca20e97e00605 Mon Sep 17 00:00:00 2001
+From: Paulo Zanoni <paulo.r.zanoni@intel.com>
+Date: Fri, 4 May 2018 13:32:51 -0700
+Subject: x86/gpu: reserve ICL's graphics stolen memory
+
+From: Paulo Zanoni <paulo.r.zanoni@intel.com>
+
+commit db0c8d8b031d2b5960f6407f7f2ca20e97e00605 upstream.
+
+ICL changes the registers and addresses to 64 bits.
+
+I also briefly looked at implementing an u64 version of the PCI config
+read functions, but I concluded this wouldn't be trivial, so it's not
+worth doing it for a single user that can't have any racing problems
+while reading the register in two separate operations.
+
+v2:
+ - Scrub the development (non-public) changelog (Joonas).
+ - Remove the i915.ko bits so this can be easily backported in order
+ to properly avoid stolen memory even on machines without i915.ko
+ (Joonas).
+ - CC stable for the reasons above.
+
+Issue: VIZ-9250
+CC: stable@vger.kernel.org
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: x86@kernel.org
+Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
+Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Signed-off-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
+Fixes: 412310019a20 ("drm/i915/icl: Add initial Icelake definitions.")
+Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20180504203252.28048-1-paulo.r.zanoni@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/early-quirks.c | 18 ++++++++++++++++++
+ include/drm/i915_drm.h | 4 +++-
+ 2 files changed, 21 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/early-quirks.c
++++ b/arch/x86/kernel/early-quirks.c
+@@ -338,6 +338,18 @@ static resource_size_t __init gen3_stole
+ return bsm & INTEL_BSM_MASK;
+ }
+
++static resource_size_t __init gen11_stolen_base(int num, int slot, int func,
++ resource_size_t stolen_size)
++{
++ u64 bsm;
++
++ bsm = read_pci_config(num, slot, func, INTEL_GEN11_BSM_DW0);
++ bsm &= INTEL_BSM_MASK;
++ bsm |= (u64)read_pci_config(num, slot, func, INTEL_GEN11_BSM_DW1) << 32;
++
++ return bsm;
++}
++
+ static resource_size_t __init i830_stolen_size(int num, int slot, int func)
+ {
+ u16 gmch_ctrl;
+@@ -498,6 +510,11 @@ static const struct intel_early_ops chv_
+ .stolen_size = chv_stolen_size,
+ };
+
++static const struct intel_early_ops gen11_early_ops __initconst = {
++ .stolen_base = gen11_stolen_base,
++ .stolen_size = gen9_stolen_size,
++};
++
+ static const struct pci_device_id intel_early_ids[] __initconst = {
+ INTEL_I830_IDS(&i830_early_ops),
+ INTEL_I845G_IDS(&i845_early_ops),
+@@ -529,6 +546,7 @@ static const struct pci_device_id intel_
+ INTEL_CFL_IDS(&gen9_early_ops),
+ INTEL_GLK_IDS(&gen9_early_ops),
+ INTEL_CNL_IDS(&gen9_early_ops),
++ INTEL_ICL_11_IDS(&gen11_early_ops),
+ };
+
+ struct resource intel_graphics_stolen_res __ro_after_init = DEFINE_RES_MEM(0, 0);
+--- a/include/drm/i915_drm.h
++++ b/include/drm/i915_drm.h
+@@ -95,7 +95,9 @@ extern struct resource intel_graphics_st
+ #define I845_TSEG_SIZE_512K (2 << 1)
+ #define I845_TSEG_SIZE_1M (3 << 1)
+
+-#define INTEL_BSM 0x5c
++#define INTEL_BSM 0x5c
++#define INTEL_GEN11_BSM_DW0 0xc0
++#define INTEL_GEN11_BSM_DW1 0xc4
+ #define INTEL_BSM_MASK (-(1u << 20))
+
+ #endif /* _I915_DRM_H_ */
--- /dev/null
+From 7288bde1f9df6c1475675419bdd7725ce84dec56 Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Mon, 20 Aug 2018 23:37:50 +0200
+Subject: x86: kvm: avoid unused variable warning
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+commit 7288bde1f9df6c1475675419bdd7725ce84dec56 upstream.
+
+Removing one of the two accesses of the maxphyaddr variable led to
+a harmless warning:
+
+arch/x86/kvm/x86.c: In function 'kvm_set_mmio_spte_mask':
+arch/x86/kvm/x86.c:6563:6: error: unused variable 'maxphyaddr' [-Werror=unused-variable]
+
+Removing the #ifdef seems to be the nicest workaround, as it
+makes the code look cleaner than adding another #ifdef.
+
+Fixes: 28a1f3ac1d0c ("kvm: x86: Set highest physical address bits in non-present/reserved SPTEs")
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Cc: stable@vger.kernel.org # L1TF
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/x86.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -6512,14 +6512,12 @@ static void kvm_set_mmio_spte_mask(void)
+ /* Set the present bit. */
+ mask |= 1ull;
+
+-#ifdef CONFIG_X86_64
+ /*
+ * If reserved bit is not supported, clear the present bit to disable
+ * mmio page fault.
+ */
+- if (maxphyaddr == 52)
++ if (IS_ENABLED(CONFIG_X86_64) && maxphyaddr == 52)
+ mask &= ~1ull;
+-#endif
+
+ kvm_mmu_set_mmio_spte_mask(mask, mask);
+ }
--- /dev/null
+From dc76803e57cc86589c4efcb5362918f9b0c0436f Mon Sep 17 00:00:00 2001
+From: Rian Hunter <rian@alum.mit.edu>
+Date: Sun, 19 Aug 2018 16:08:53 -0700
+Subject: x86/process: Re-export start_thread()
+
+From: Rian Hunter <rian@alum.mit.edu>
+
+commit dc76803e57cc86589c4efcb5362918f9b0c0436f upstream.
+
+The consolidation of the start_thread() functions removed the export
+unintentionally. This breaks binfmt handlers built as a module.
+
+Add it back.
+
+Fixes: e634d8fc792c ("x86-64: merge the standard and compat start_thread() functions")
+Signed-off-by: Rian Hunter <rian@alum.mit.edu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Borislav Petkov <bpetkov@suse.de>
+Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
+Cc: Joerg Roedel <jroedel@suse.de>
+Cc: Dmitry Safonov <dima@arista.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20180819230854.7275-1-rian@alum.mit.edu
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/process_64.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/x86/kernel/process_64.c
++++ b/arch/x86/kernel/process_64.c
+@@ -384,6 +384,7 @@ start_thread(struct pt_regs *regs, unsig
+ start_thread_common(regs, new_ip, new_sp,
+ __USER_CS, __USER_DS, 0);
+ }
++EXPORT_SYMBOL_GPL(start_thread);
+
+ #ifdef CONFIG_COMPAT
+ void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
--- /dev/null
+From b0a182f875689647b014bc01d36b340217792852 Mon Sep 17 00:00:00 2001
+From: Vlastimil Babka <vbabka@suse.cz>
+Date: Thu, 23 Aug 2018 15:44:18 +0200
+Subject: x86/speculation/l1tf: Fix off-by-one error when warning that system has too much RAM
+
+From: Vlastimil Babka <vbabka@suse.cz>
+
+commit b0a182f875689647b014bc01d36b340217792852 upstream.
+
+Two users have reported [1] that they have an "extremely unlikely" system
+with more than MAX_PA/2 memory and L1TF mitigation is not effective. In
+fact it's a CPU with 36bits phys limit (64GB) and 32GB memory, but due to
+holes in the e820 map, the main region is almost 500MB over the 32GB limit:
+
+[ 0.000000] BIOS-e820: [mem 0x0000000100000000-0x000000081effffff] usable
+
+Suggestions to use 'mem=32G' to enable the L1TF mitigation while losing the
+500MB revealed, that there's an off-by-one error in the check in
+l1tf_select_mitigation().
+
+l1tf_pfn_limit() returns the last usable pfn (inclusive) and the range
+check in the mitigation path does not take this into account.
+
+Instead of amending the range check, make l1tf_pfn_limit() return the first
+PFN which is over the limit which is less error prone. Adjust the other
+users accordingly.
+
+[1] https://bugzilla.suse.com/show_bug.cgi?id=1105536
+
+Fixes: 17dbca119312 ("x86/speculation/l1tf: Add sysfs reporting for l1tf")
+Reported-by: George Anchev <studio@anchev.net>
+Reported-by: Christopher Snowhill <kode54@gmail.com>
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: "H . Peter Anvin" <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Michal Hocko <mhocko@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20180823134418.17008-1-vbabka@suse.cz
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/processor.h | 2 +-
+ arch/x86/mm/init.c | 2 +-
+ arch/x86/mm/mmap.c | 2 +-
+ 3 files changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -183,7 +183,7 @@ extern void cpu_detect(struct cpuinfo_x8
+
+ static inline unsigned long long l1tf_pfn_limit(void)
+ {
+- return BIT_ULL(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT) - 1;
++ return BIT_ULL(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT);
+ }
+
+ extern void early_cpu_init(void);
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -923,7 +923,7 @@ unsigned long max_swapfile_size(void)
+
+ if (boot_cpu_has_bug(X86_BUG_L1TF)) {
+ /* Limit the swap file size to MAX_PA/2 for L1TF workaround */
+- unsigned long long l1tf_limit = l1tf_pfn_limit() + 1;
++ unsigned long long l1tf_limit = l1tf_pfn_limit();
+ /*
+ * We encode swap offsets also with 3 bits below those for pfn
+ * which makes the usable limit higher.
+--- a/arch/x86/mm/mmap.c
++++ b/arch/x86/mm/mmap.c
+@@ -257,7 +257,7 @@ bool pfn_modify_allowed(unsigned long pf
+ /* If it's real memory always allow */
+ if (pfn_valid(pfn))
+ return true;
+- if (pfn > l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN))
++ if (pfn >= l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN))
+ return false;
+ return true;
+ }
--- /dev/null
+From 9df9516940a61d29aedf4d91b483ca6597e7d480 Mon Sep 17 00:00:00 2001
+From: Vlastimil Babka <vbabka@suse.cz>
+Date: Mon, 20 Aug 2018 11:58:35 +0200
+Subject: x86/speculation/l1tf: Fix overflow in l1tf_pfn_limit() on 32bit
+
+From: Vlastimil Babka <vbabka@suse.cz>
+
+commit 9df9516940a61d29aedf4d91b483ca6597e7d480 upstream.
+
+On 32bit PAE kernels on 64bit hardware with enough physical bits,
+l1tf_pfn_limit() will overflow unsigned long. This in turn affects
+max_swapfile_size() and can lead to swapon returning -EINVAL. This has been
+observed in a 32bit guest with 42 bits physical address size, where
+max_swapfile_size() overflows exactly to 1 << 32, thus zero, and produces
+the following warning to dmesg:
+
+[ 6.396845] Truncating oversized swap area, only using 0k out of 2047996k
+
+Fix this by using unsigned long long instead.
+
+Fixes: 17dbca119312 ("x86/speculation/l1tf: Add sysfs reporting for l1tf")
+Fixes: 377eeaa8e11f ("x86/speculation/l1tf: Limit swap file size to MAX_PA/2")
+Reported-by: Dominique Leuenberger <dimstar@suse.de>
+Reported-by: Adrian Schroeter <adrian@suse.de>
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Andi Kleen <ak@linux.intel.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Cc: "H . Peter Anvin" <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Michal Hocko <mhocko@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20180820095835.5298-1-vbabka@suse.cz
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/processor.h | 4 ++--
+ arch/x86/mm/init.c | 4 ++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -181,9 +181,9 @@ extern const struct seq_operations cpuin
+
+ extern void cpu_detect(struct cpuinfo_x86 *c);
+
+-static inline unsigned long l1tf_pfn_limit(void)
++static inline unsigned long long l1tf_pfn_limit(void)
+ {
+- return BIT(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT) - 1;
++ return BIT_ULL(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT) - 1;
+ }
+
+ extern void early_cpu_init(void);
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -923,7 +923,7 @@ unsigned long max_swapfile_size(void)
+
+ if (boot_cpu_has_bug(X86_BUG_L1TF)) {
+ /* Limit the swap file size to MAX_PA/2 for L1TF workaround */
+- unsigned long l1tf_limit = l1tf_pfn_limit() + 1;
++ unsigned long long l1tf_limit = l1tf_pfn_limit() + 1;
+ /*
+ * We encode swap offsets also with 3 bits below those for pfn
+ * which makes the usable limit higher.
+@@ -931,7 +931,7 @@ unsigned long max_swapfile_size(void)
+ #if CONFIG_PGTABLE_LEVELS > 2
+ l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT;
+ #endif
+- pages = min_t(unsigned long, l1tf_limit, pages);
++ pages = min_t(unsigned long long, l1tf_limit, pages);
+ }
+ return pages;
+ }
--- /dev/null
+From 6a012288d6906fee1dbc244050ade1dafe4a9c8d Mon Sep 17 00:00:00 2001
+From: Vlastimil Babka <vbabka@suse.cz>
+Date: Thu, 23 Aug 2018 16:21:29 +0200
+Subject: x86/speculation/l1tf: Suggest what to do on systems with too much RAM
+
+From: Vlastimil Babka <vbabka@suse.cz>
+
+commit 6a012288d6906fee1dbc244050ade1dafe4a9c8d upstream.
+
+Two users have reported [1] that they have an "extremely unlikely" system
+with more than MAX_PA/2 memory and L1TF mitigation is not effective.
+
+Make the warning more helpful by suggesting the proper mem=X kernel boot
+parameter to make it effective and a link to the L1TF document to help
+decide if the mitigation is worth the unusable RAM.
+
+[1] https://bugzilla.suse.com/show_bug.cgi?id=1105536
+
+Suggested-by: Michal Hocko <mhocko@suse.com>
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Cc: "H . Peter Anvin" <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/966571f0-9d7f-43dc-92c6-a10eec7a1254@suse.cz
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/bugs.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -686,6 +686,10 @@ static void __init l1tf_select_mitigatio
+ half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
+ if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
+ pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
++ pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
++ half_pa);
++ pr_info("However, doing so will make a part of your RAM unusable.\n");
++ pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html might help you decide.\n");
+ return;
+ }
+
--- /dev/null
+From 2e549b2ee0e358bc758480e716b881f9cabedb6a Mon Sep 17 00:00:00 2001
+From: Andy Lutomirski <luto@kernel.org>
+Date: Thu, 16 Aug 2018 12:41:15 -0700
+Subject: x86/vdso: Fix vDSO build if a retpoline is emitted
+
+From: Andy Lutomirski <luto@kernel.org>
+
+commit 2e549b2ee0e358bc758480e716b881f9cabedb6a upstream.
+
+Currently, if the vDSO ends up containing an indirect branch or
+call, GCC will emit the "external thunk" style of retpoline, and it
+will fail to link.
+
+Fix it by building the vDSO with inline retpoline thunks.
+
+I haven't seen any reports of this triggering on an unpatched
+kernel.
+
+Fixes: commit 76b043848fd2 ("x86/retpoline: Add initial retpoline support")
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Matt Rickard <matt@softrans.com.au>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Jason Vas Dias <jason.vas.dias@gmail.com>
+Cc: David Woodhouse <dwmw2@infradead.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/c76538cd3afbe19c6246c2d1715bc6a60bd63985.1534448381.git.luto@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ Makefile | 4 ++++
+ arch/x86/entry/vdso/Makefile | 6 ++++--
+ 2 files changed, 8 insertions(+), 2 deletions(-)
+
+--- a/Makefile
++++ b/Makefile
+@@ -493,9 +493,13 @@ KBUILD_AFLAGS += $(call cc-option, -no-i
+ endif
+
+ RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register
++RETPOLINE_VDSO_CFLAGS_GCC := -mindirect-branch=thunk-inline -mindirect-branch-register
+ RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk
++RETPOLINE_VDSO_CFLAGS_CLANG := -mretpoline
+ RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG)))
++RETPOLINE_VDSO_CFLAGS := $(call cc-option,$(RETPOLINE_VDSO_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_VDSO_CFLAGS_CLANG)))
+ export RETPOLINE_CFLAGS
++export RETPOLINE_VDSO_CFLAGS
+
+ KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
+ KBUILD_AFLAGS += $(call cc-option,-fno-PIE)
+--- a/arch/x86/entry/vdso/Makefile
++++ b/arch/x86/entry/vdso/Makefile
+@@ -72,9 +72,9 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.d
+ CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
+ $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
+ -fno-omit-frame-pointer -foptimize-sibling-calls \
+- -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
++ -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO $(RETPOLINE_VDSO_CFLAGS)
+
+-$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
++$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
+
+ #
+ # vDSO code runs in userspace and -pg doesn't help with profiling anyway.
+@@ -138,11 +138,13 @@ KBUILD_CFLAGS_32 := $(filter-out -mcmode
+ KBUILD_CFLAGS_32 := $(filter-out -fno-pic,$(KBUILD_CFLAGS_32))
+ KBUILD_CFLAGS_32 := $(filter-out -mfentry,$(KBUILD_CFLAGS_32))
+ KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32))
++KBUILD_CFLAGS_32 := $(filter-out $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS_32))
+ KBUILD_CFLAGS_32 += -m32 -msoft-float -mregparm=0 -fpic
+ KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
+ KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
+ KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
+ KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
++KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS)
+ $(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
+
+ $(obj)/vdso32.so.dbg: FORCE \