--- /dev/null
+From 03adc61edad49e1bbecfb53f7ea5d78f398fe368 Mon Sep 17 00:00:00 2001
+From: Dan Clash <daclash@linux.microsoft.com>
+Date: Thu, 12 Oct 2023 14:55:18 -0700
+Subject: audit,io_uring: io_uring openat triggers audit reference count underflow
+
+From: Dan Clash <daclash@linux.microsoft.com>
+
+commit 03adc61edad49e1bbecfb53f7ea5d78f398fe368 upstream.
+
+An io_uring openat operation can update an audit reference count
+from multiple threads resulting in the call trace below.
+
+A call to io_uring_submit() with a single openat op with a flag of
+IOSQE_ASYNC results in the following reference count updates.
+
+These first part of the system call performs two increments that do not race.
+
+do_syscall_64()
+ __do_sys_io_uring_enter()
+ io_submit_sqes()
+ io_openat_prep()
+ __io_openat_prep()
+ getname()
+ getname_flags() /* update 1 (increment) */
+ __audit_getname() /* update 2 (increment) */
+
+The openat op is queued to an io_uring worker thread which starts the
+opportunity for a race. The system call exit performs one decrement.
+
+do_syscall_64()
+ syscall_exit_to_user_mode()
+ syscall_exit_to_user_mode_prepare()
+ __audit_syscall_exit()
+ audit_reset_context()
+ putname() /* update 3 (decrement) */
+
+The io_uring worker thread performs one increment and two decrements.
+These updates can race with the system call decrement.
+
+io_wqe_worker()
+ io_worker_handle_work()
+ io_wq_submit_work()
+ io_issue_sqe()
+ io_openat()
+ io_openat2()
+ do_filp_open()
+ path_openat()
+ __audit_inode() /* update 4 (increment) */
+ putname() /* update 5 (decrement) */
+ __audit_uring_exit()
+ audit_reset_context()
+ putname() /* update 6 (decrement) */
+
+The fix is to change the refcnt member of struct audit_names
+from int to atomic_t.
+
+kernel BUG at fs/namei.c:262!
+Call Trace:
+...
+ ? putname+0x68/0x70
+ audit_reset_context.part.0.constprop.0+0xe1/0x300
+ __audit_uring_exit+0xda/0x1c0
+ io_issue_sqe+0x1f3/0x450
+ ? lock_timer_base+0x3b/0xd0
+ io_wq_submit_work+0x8d/0x2b0
+ ? __try_to_del_timer_sync+0x67/0xa0
+ io_worker_handle_work+0x17c/0x2b0
+ io_wqe_worker+0x10a/0x350
+
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/lkml/MW2PR2101MB1033FFF044A258F84AEAA584F1C9A@MW2PR2101MB1033.namprd21.prod.outlook.com/
+Fixes: 5bd2182d58e9 ("audit,io_uring,io-wq: add some basic audit support to io_uring")
+Signed-off-by: Dan Clash <daclash@linux.microsoft.com>
+Link: https://lore.kernel.org/r/20231012215518.GA4048@linuxonhyperv3.guj3yctzbm1etfxqx2vob5hsef.xx.internal.cloudapp.net
+Reviewed-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/namei.c | 9 +++++----
+ include/linux/fs.h | 2 +-
+ kernel/auditsc.c | 8 ++++----
+ 3 files changed, 10 insertions(+), 9 deletions(-)
+
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -187,7 +187,7 @@ getname_flags(const char __user *filenam
+ }
+ }
+
+- result->refcnt = 1;
++ atomic_set(&result->refcnt, 1);
+ /* The empty path is special. */
+ if (unlikely(!len)) {
+ if (empty)
+@@ -248,7 +248,7 @@ getname_kernel(const char * filename)
+ memcpy((char *)result->name, filename, len);
+ result->uptr = NULL;
+ result->aname = NULL;
+- result->refcnt = 1;
++ atomic_set(&result->refcnt, 1);
+ audit_getname(result);
+
+ return result;
+@@ -259,9 +259,10 @@ void putname(struct filename *name)
+ if (IS_ERR(name))
+ return;
+
+- BUG_ON(name->refcnt <= 0);
++ if (WARN_ON_ONCE(!atomic_read(&name->refcnt)))
++ return;
+
+- if (--name->refcnt > 0)
++ if (!atomic_dec_and_test(&name->refcnt))
+ return;
+
+ if (name->name != name->iname) {
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -2735,7 +2735,7 @@ struct audit_names;
+ struct filename {
+ const char *name; /* pointer to actual string */
+ const __user char *uptr; /* original userland pointer */
+- int refcnt;
++ atomic_t refcnt;
+ struct audit_names *aname;
+ const char iname[];
+ };
+--- a/kernel/auditsc.c
++++ b/kernel/auditsc.c
+@@ -2208,7 +2208,7 @@ __audit_reusename(const __user char *upt
+ if (!n->name)
+ continue;
+ if (n->name->uptr == uptr) {
+- n->name->refcnt++;
++ atomic_inc(&n->name->refcnt);
+ return n->name;
+ }
+ }
+@@ -2237,7 +2237,7 @@ void __audit_getname(struct filename *na
+ n->name = name;
+ n->name_len = AUDIT_NAME_FULL;
+ name->aname = n;
+- name->refcnt++;
++ atomic_inc(&name->refcnt);
+ }
+
+ static inline int audit_copy_fcaps(struct audit_names *name,
+@@ -2369,7 +2369,7 @@ out_alloc:
+ return;
+ if (name) {
+ n->name = name;
+- name->refcnt++;
++ atomic_inc(&name->refcnt);
+ }
+
+ out:
+@@ -2496,7 +2496,7 @@ void __audit_inode_child(struct inode *p
+ if (found_parent) {
+ found_child->name = found_parent->name;
+ found_child->name_len = AUDIT_NAME_FULL;
+- found_child->name->refcnt++;
++ atomic_inc(&found_child->name->refcnt);
+ }
+ }
+
--- /dev/null
+From bfbe5b31caa74ab97f1784fe9ade5f45e0d3de91 Mon Sep 17 00:00:00 2001
+From: Konstantin Komarov <almaz.alexandrovich@paragon-software.com>
+Date: Fri, 30 Jun 2023 16:22:53 +0400
+Subject: fs/ntfs3: fix deadlock in mark_as_free_ex
+
+From: Konstantin Komarov <almaz.alexandrovich@paragon-software.com>
+
+commit bfbe5b31caa74ab97f1784fe9ade5f45e0d3de91 upstream.
+
+Reported-by: syzbot+e94d98936a0ed08bde43@syzkaller.appspotmail.com
+Signed-off-by: Konstantin Komarov <almaz.alexandrovich@paragon-software.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ntfs3/fsntfs.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/fs/ntfs3/fsntfs.c
++++ b/fs/ntfs3/fsntfs.c
+@@ -2428,10 +2428,12 @@ void mark_as_free_ex(struct ntfs_sb_info
+ {
+ CLST end, i, zone_len, zlen;
+ struct wnd_bitmap *wnd = &sbi->used.bitmap;
++ bool dirty = false;
+
+ down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
+ if (!wnd_is_used(wnd, lcn, len)) {
+- ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
++ /* mark volume as dirty out of wnd->rw_lock */
++ dirty = true;
+
+ end = lcn + len;
+ len = 0;
+@@ -2485,6 +2487,8 @@ void mark_as_free_ex(struct ntfs_sb_info
+
+ out:
+ up_write(&wnd->rw_lock);
++ if (dirty)
++ ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
+ }
+
+ /*
--- /dev/null
+From 8e7e27b2ee1e19c4040d4987e345f678a74c0aed Mon Sep 17 00:00:00 2001
+From: Zeng Heng <zengheng4@huawei.com>
+Date: Thu, 20 Apr 2023 15:46:22 +0800
+Subject: fs/ntfs3: fix panic about slab-out-of-bounds caused by ntfs_list_ea()
+
+From: Zeng Heng <zengheng4@huawei.com>
+
+commit 8e7e27b2ee1e19c4040d4987e345f678a74c0aed upstream.
+
+Here is a BUG report about linux-6.1 from syzbot, but it still remains
+within upstream:
+
+BUG: KASAN: slab-out-of-bounds in ntfs_list_ea fs/ntfs3/xattr.c:191 [inline]
+BUG: KASAN: slab-out-of-bounds in ntfs_listxattr+0x401/0x570 fs/ntfs3/xattr.c:710
+Read of size 1 at addr ffff888021acaf3d by task syz-executor128/3632
+
+Call Trace:
+ kasan_report+0x139/0x170 mm/kasan/report.c:495
+ ntfs_list_ea fs/ntfs3/xattr.c:191 [inline]
+ ntfs_listxattr+0x401/0x570 fs/ntfs3/xattr.c:710
+ vfs_listxattr fs/xattr.c:457 [inline]
+ listxattr+0x293/0x2d0 fs/xattr.c:804
+ path_listxattr fs/xattr.c:828 [inline]
+ __do_sys_llistxattr fs/xattr.c:846 [inline]
+
+Before derefering field members of `ea` in unpacked_ea_size(), we need to
+check whether the EA_FULL struct is located in access validate range.
+
+Similarly, when derefering `ea->name` field member, we need to check
+whethe the ea->name is located in access validate range, too.
+
+Fixes: be71b5cba2e6 ("fs/ntfs3: Add attrib operations")
+Reported-by: syzbot+9fcea5ef6dc4dc72d334@syzkaller.appspotmail.com
+Signed-off-by: Zeng Heng <zengheng4@huawei.com>
+[almaz.alexandrovich@paragon-software.com: took the ret variable out of the loop block]
+Signed-off-by: Konstantin Komarov <almaz.alexandrovich@paragon-software.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ntfs3/xattr.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/fs/ntfs3/xattr.c
++++ b/fs/ntfs3/xattr.c
+@@ -209,7 +209,8 @@ static ssize_t ntfs_list_ea(struct ntfs_
+ size = le32_to_cpu(info->size);
+
+ /* Enumerate all xattrs. */
+- for (ret = 0, off = 0; off < size; off += ea_size) {
++ ret = 0;
++ for (off = 0; off + sizeof(struct EA_FULL) < size; off += ea_size) {
+ ea = Add2Ptr(ea_all, off);
+ ea_size = unpacked_ea_size(ea);
+
+@@ -217,6 +218,10 @@ static ssize_t ntfs_list_ea(struct ntfs_
+ break;
+
+ if (buffer) {
++ /* Check if we can use field ea->name */
++ if (off + ea_size > size)
++ break;
++
+ if (ret + ea->name_len + 1 > bytes_per_buffer) {
+ err = -ERANGE;
+ goto out;
--- /dev/null
+From 1f9b94af923c88539426ed811ae7e9543834a5c5 Mon Sep 17 00:00:00 2001
+From: Ziqi Zhao <astrajoan@yahoo.com>
+Date: Wed, 9 Aug 2023 12:11:18 -0700
+Subject: fs/ntfs3: Fix possible null-pointer dereference in hdr_find_e()
+
+From: Ziqi Zhao <astrajoan@yahoo.com>
+
+commit 1f9b94af923c88539426ed811ae7e9543834a5c5 upstream.
+
+Upon investigation of the C reproducer provided by Syzbot, it seemed
+the reproducer was trying to mount a corrupted NTFS filesystem, then
+issue a rename syscall to some nodes in the filesystem. This can be
+shown by modifying the reproducer to only include the mount syscall,
+and investigating the filesystem by e.g. `ls` and `rm` commands. As a
+result, during the problematic call to `hdr_fine_e`, the `inode` being
+supplied did not go through `indx_init`, hence the `cmp` function
+pointer was never set.
+
+The fix is simply to check whether `cmp` is not set, and return NULL
+if that's the case, in order to be consistent with other error
+scenarios of the `hdr_find_e` method. The rationale behind this patch
+is that:
+
+- We should prevent crashing the kernel even if the mounted filesystem
+ is corrupted. Any syscalls made on the filesystem could return
+ invalid, but the kernel should be able to sustain these calls.
+
+- Only very specific corruption would lead to this bug, so it would be
+ a pretty rare case in actual usage anyways. Therefore, introducing a
+ check to specifically protect against this bug seems appropriate.
+ Because of its rarity, an `unlikely` clause is used to wrap around
+ this nullity check.
+
+Reported-by: syzbot+60cf892fc31d1f4358fc@syzkaller.appspotmail.com
+Signed-off-by: Ziqi Zhao <astrajoan@yahoo.com>
+Signed-off-by: Konstantin Komarov <almaz.alexandrovich@paragon-software.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ntfs3/index.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/fs/ntfs3/index.c
++++ b/fs/ntfs3/index.c
+@@ -729,6 +729,9 @@ static struct NTFS_DE *hdr_find_e(const
+ u32 total = le32_to_cpu(hdr->total);
+ u16 offs[128];
+
++ if (unlikely(!cmp))
++ return NULL;
++
+ fill_table:
+ if (end > total)
+ return NULL;
--- /dev/null
+From 8647c52e9504c99752a39f1d44f6268f82c40a5c Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Wed, 27 Sep 2023 17:19:53 -0700
+Subject: KVM: x86: Constrain guest-supported xfeatures only at KVM_GET_XSAVE{2}
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit 8647c52e9504c99752a39f1d44f6268f82c40a5c upstream.
+
+Mask off xfeatures that aren't exposed to the guest only when saving guest
+state via KVM_GET_XSAVE{2} instead of modifying user_xfeatures directly.
+Preserving the maximal set of xfeatures in user_xfeatures restores KVM's
+ABI for KVM_SET_XSAVE, which prior to commit ad856280ddea ("x86/kvm/fpu:
+Limit guest user_xfeatures to supported bits of XCR0") allowed userspace
+to load xfeatures that are supported by the host, irrespective of what
+xfeatures are exposed to the guest.
+
+There is no known use case where userspace *intentionally* loads xfeatures
+that aren't exposed to the guest, but the bug fixed by commit ad856280ddea
+was specifically that KVM_GET_SAVE{2} would save xfeatures that weren't
+exposed to the guest, e.g. would lead to userspace unintentionally loading
+guest-unsupported xfeatures when live migrating a VM.
+
+Restricting KVM_SET_XSAVE to guest-supported xfeatures is especially
+problematic for QEMU-based setups, as QEMU has a bug where instead of
+terminating the VM if KVM_SET_XSAVE fails, QEMU instead simply stops
+loading guest state, i.e. resumes the guest after live migration with
+incomplete guest state, and ultimately results in guest data corruption.
+
+Note, letting userspace restore all host-supported xfeatures does not fix
+setups where a VM is migrated from a host *without* commit ad856280ddea,
+to a target with a subset of host-supported xfeatures. However there is
+no way to safely address that scenario, e.g. KVM could silently drop the
+unsupported features, but that would be a clear violation of KVM's ABI and
+so would require userspace to opt-in, at which point userspace could
+simply be updated to sanitize the to-be-loaded XSAVE state.
+
+Reported-by: Tyler Stachecki <stachecki.tyler@gmail.com>
+Closes: https://lore.kernel.org/all/20230914010003.358162-1-tstachecki@bloomberg.net
+Fixes: ad856280ddea ("x86/kvm/fpu: Limit guest user_xfeatures to supported bits of XCR0")
+Cc: stable@vger.kernel.org
+Cc: Leonardo Bras <leobras@redhat.com>
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Acked-by: Dave Hansen <dave.hansen@linux.intel.com>
+Message-Id: <20230928001956.924301-3-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/fpu/xstate.c | 5 +----
+ arch/x86/kvm/cpuid.c | 8 --------
+ arch/x86/kvm/x86.c | 18 ++++++++++++++++--
+ 3 files changed, 17 insertions(+), 14 deletions(-)
+
+--- a/arch/x86/kernel/fpu/xstate.c
++++ b/arch/x86/kernel/fpu/xstate.c
+@@ -1543,10 +1543,7 @@ static int fpstate_realloc(u64 xfeatures
+ fpregs_restore_userregs();
+
+ newfps->xfeatures = curfps->xfeatures | xfeatures;
+-
+- if (!guest_fpu)
+- newfps->user_xfeatures = curfps->user_xfeatures | xfeatures;
+-
++ newfps->user_xfeatures = curfps->user_xfeatures | xfeatures;
+ newfps->xfd = curfps->xfd & ~xfeatures;
+
+ /* Do the final updates within the locked region */
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -338,14 +338,6 @@ static void kvm_vcpu_after_set_cpuid(str
+ vcpu->arch.guest_supported_xcr0 =
+ cpuid_get_supported_xcr0(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
+
+- /*
+- * FP+SSE can always be saved/restored via KVM_{G,S}ET_XSAVE, even if
+- * XSAVE/XCRO are not exposed to the guest, and even if XSAVE isn't
+- * supported by the host.
+- */
+- vcpu->arch.guest_fpu.fpstate->user_xfeatures = vcpu->arch.guest_supported_xcr0 |
+- XFEATURE_MASK_FPSSE;
+-
+ kvm_update_pv_runtime(vcpu);
+
+ vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -5305,12 +5305,26 @@ static int kvm_vcpu_ioctl_x86_set_debugr
+ static void kvm_vcpu_ioctl_x86_get_xsave2(struct kvm_vcpu *vcpu,
+ u8 *state, unsigned int size)
+ {
++ /*
++ * Only copy state for features that are enabled for the guest. The
++ * state itself isn't problematic, but setting bits in the header for
++ * features that are supported in *this* host but not exposed to the
++ * guest can result in KVM_SET_XSAVE failing when live migrating to a
++ * compatible host without the features that are NOT exposed to the
++ * guest.
++ *
++ * FP+SSE can always be saved/restored via KVM_{G,S}ET_XSAVE, even if
++ * XSAVE/XCRO are not exposed to the guest, and even if XSAVE isn't
++ * supported by the host.
++ */
++ u64 supported_xcr0 = vcpu->arch.guest_supported_xcr0 |
++ XFEATURE_MASK_FPSSE;
++
+ if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
+ return;
+
+ fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu, state, size,
+- vcpu->arch.guest_fpu.fpstate->user_xfeatures,
+- vcpu->arch.pkru);
++ supported_xcr0, vcpu->arch.pkru);
+ }
+
+ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
--- /dev/null
+From 72377ab2d671befd6390a1d5677f5cca61235b65 Mon Sep 17 00:00:00 2001
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Wed, 18 Oct 2023 11:23:54 -0700
+Subject: mptcp: more conservative check for zero probes
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+commit 72377ab2d671befd6390a1d5677f5cca61235b65 upstream.
+
+Christoph reported that the MPTCP protocol can find the subflow-level
+write queue unexpectedly not empty while crafting a zero-window probe,
+hitting a warning:
+
+------------[ cut here ]------------
+WARNING: CPU: 0 PID: 188 at net/mptcp/protocol.c:1312 mptcp_sendmsg_frag+0xc06/0xe70
+Modules linked in:
+CPU: 0 PID: 188 Comm: kworker/0:2 Not tainted 6.6.0-rc2-g1176aa719d7a #47
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.11.0-2.el7 04/01/2014
+Workqueue: events mptcp_worker
+RIP: 0010:mptcp_sendmsg_frag+0xc06/0xe70 net/mptcp/protocol.c:1312
+RAX: 47d0530de347ff6a RBX: 47d0530de347ff6b RCX: ffff8881015d3c00
+RDX: ffff8881015d3c00 RSI: 47d0530de347ff6b RDI: 47d0530de347ff6b
+RBP: 47d0530de347ff6b R08: ffffffff8243c6a8 R09: ffffffff82042d9c
+R10: 0000000000000002 R11: ffffffff82056850 R12: ffff88812a13d580
+R13: 0000000000000001 R14: ffff88812b375e50 R15: ffff88812bbf3200
+FS: 0000000000000000(0000) GS:ffff88813bc00000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 0000000000695118 CR3: 0000000115dfc001 CR4: 0000000000170ef0
+Call Trace:
+ <TASK>
+ __subflow_push_pending+0xa4/0x420 net/mptcp/protocol.c:1545
+ __mptcp_push_pending+0x128/0x3b0 net/mptcp/protocol.c:1614
+ mptcp_release_cb+0x218/0x5b0 net/mptcp/protocol.c:3391
+ release_sock+0xf6/0x100 net/core/sock.c:3521
+ mptcp_worker+0x6e8/0x8f0 net/mptcp/protocol.c:2746
+ process_scheduled_works+0x341/0x690 kernel/workqueue.c:2630
+ worker_thread+0x3a7/0x610 kernel/workqueue.c:2784
+ kthread+0x143/0x180 kernel/kthread.c:388
+ ret_from_fork+0x4d/0x60 arch/x86/kernel/process.c:147
+ ret_from_fork_asm+0x1b/0x30 arch/x86/entry/entry_64.S:304
+ </TASK>
+
+The root cause of the issue is that expectations are wrong: e.g. due
+to MPTCP-level re-injection we can hit the critical condition.
+
+Explicitly avoid the zero-window probe when the subflow write queue
+is not empty and drop the related warnings.
+
+Reported-by: Christoph Paasch <cpaasch@apple.com>
+Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/444
+Fixes: f70cad1085d1 ("mptcp: stop relying on tcp_tx_skb_cache")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Mat Martineau <martineau@kernel.org>
+Link: https://lore.kernel.org/r/20231018-send-net-20231018-v1-3-17ecb002e41d@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/protocol.c | 8 +-------
+ 1 file changed, 1 insertion(+), 7 deletions(-)
+
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -1342,7 +1342,7 @@ alloc_skb:
+ if (copy == 0) {
+ u64 snd_una = READ_ONCE(msk->snd_una);
+
+- if (snd_una != msk->snd_nxt) {
++ if (snd_una != msk->snd_nxt || tcp_write_queue_tail(ssk)) {
+ tcp_remove_empty_skb(ssk);
+ return 0;
+ }
+@@ -1350,11 +1350,6 @@ alloc_skb:
+ zero_window_probe = true;
+ data_seq = snd_una - 1;
+ copy = 1;
+-
+- /* all mptcp-level data is acked, no skbs should be present into the
+- * ssk write queue
+- */
+- WARN_ON_ONCE(reuse_skb);
+ }
+
+ copy = min_t(size_t, copy, info->limit - info->sent);
+@@ -1383,7 +1378,6 @@ alloc_skb:
+ if (reuse_skb) {
+ TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
+ mpext->data_len += copy;
+- WARN_ON_ONCE(zero_window_probe);
+ goto out;
+ }
+
--- /dev/null
+From d351c1ea2de3e36e608fc355d8ae7d0cc80e6cd6 Mon Sep 17 00:00:00 2001
+From: Florian Westphal <fw@strlen.de>
+Date: Sun, 8 Oct 2023 19:36:53 +0200
+Subject: netfilter: nft_payload: fix wrong mac header matching
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Florian Westphal <fw@strlen.de>
+
+commit d351c1ea2de3e36e608fc355d8ae7d0cc80e6cd6 upstream.
+
+mcast packets get looped back to the local machine.
+Such packets have a 0-length mac header, we should treat
+this like "mac header not set" and abort rule evaluation.
+
+As-is, we just copy data from the network header instead.
+
+Fixes: 96518518cc41 ("netfilter: add nftables")
+Reported-by: Blažej Krajňák <krajnak@levonet.sk>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/netfilter/nft_payload.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/netfilter/nft_payload.c
++++ b/net/netfilter/nft_payload.c
+@@ -132,7 +132,7 @@ void nft_payload_eval(const struct nft_e
+
+ switch (priv->base) {
+ case NFT_PAYLOAD_LL_HEADER:
+- if (!skb_mac_header_was_set(skb))
++ if (!skb_mac_header_was_set(skb) || skb_mac_header_len(skb) == 0)
+ goto err;
+
+ if (skb_vlan_tag_present(skb)) {
nfc-nci-fix-possible-null-pointer-dereference-in-send_acknowledge.patch
regmap-fix-null-deref-on-lookup.patch
kvm-x86-mask-lvtpc-when-handling-a-pmi.patch
+x86-sev-disable-mmio-emulation-from-user-mode.patch
+x86-sev-check-iobm-for-ioio-exceptions-from-user-space.patch
+x86-sev-check-for-user-space-ioio-pointing-to-kernel-space.patch
+x86-fpu-allow-caller-to-constrain-xfeatures-when-copying-to-uabi-buffer.patch
+kvm-x86-constrain-guest-supported-xfeatures-only-at-kvm_get_xsave-2.patch
+x86-kvm-svm-add-support-for-invalid-ipi-vector-interception.patch
+x86-kvm-svm-refresh-avic-inhibition-in-svm_leave_nested.patch
+audit-io_uring-io_uring-openat-triggers-audit-reference-count-underflow.patch
+tcp-check-mptcp-level-constraints-for-backlog-coalescing.patch
+mptcp-more-conservative-check-for-zero-probes.patch
+fs-ntfs3-fix-possible-null-pointer-dereference-in-hdr_find_e.patch
+fs-ntfs3-fix-panic-about-slab-out-of-bounds-caused-by-ntfs_list_ea.patch
+fs-ntfs3-fix-deadlock-in-mark_as_free_ex.patch
+netfilter-nft_payload-fix-wrong-mac-header-matching.patch
--- /dev/null
+From 6db8a37dfc541e059851652cfd4f0bb13b8ff6af Mon Sep 17 00:00:00 2001
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Wed, 18 Oct 2023 11:23:53 -0700
+Subject: tcp: check mptcp-level constraints for backlog coalescing
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+commit 6db8a37dfc541e059851652cfd4f0bb13b8ff6af upstream.
+
+The MPTCP protocol can acquire the subflow-level socket lock and
+cause the tcp backlog usage. When inserting new skbs into the
+backlog, the stack will try to coalesce them.
+
+Currently, we have no check in place to ensure that such coalescing
+will respect the MPTCP-level DSS, and that may cause data stream
+corruption, as reported by Christoph.
+
+Address the issue by adding the relevant admission check for coalescing
+in tcp_add_backlog().
+
+Note the issue is not easy to reproduce, as the MPTCP protocol tries
+hard to avoid acquiring the subflow-level socket lock.
+
+Fixes: 648ef4b88673 ("mptcp: Implement MPTCP receive path")
+Cc: stable@vger.kernel.org
+Reported-by: Christoph Paasch <cpaasch@apple.com>
+Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/420
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Mat Martineau <martineau@kernel.org>
+Link: https://lore.kernel.org/r/20231018-send-net-20231018-v1-2-17ecb002e41d@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_ipv4.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -1818,6 +1818,7 @@ bool tcp_add_backlog(struct sock *sk, st
+ #ifdef CONFIG_TLS_DEVICE
+ tail->decrypted != skb->decrypted ||
+ #endif
++ !mptcp_skb_can_collapse(tail, skb) ||
+ thtail->doff != th->doff ||
+ memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th)))
+ goto no_coalesce;
--- /dev/null
+From 18164f66e6c59fda15c198b371fa008431efdb22 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Wed, 27 Sep 2023 17:19:52 -0700
+Subject: x86/fpu: Allow caller to constrain xfeatures when copying to uabi buffer
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit 18164f66e6c59fda15c198b371fa008431efdb22 upstream.
+
+Plumb an xfeatures mask into __copy_xstate_to_uabi_buf() so that KVM can
+constrain which xfeatures are saved into the userspace buffer without
+having to modify the user_xfeatures field in KVM's guest_fpu state.
+
+KVM's ABI for KVM_GET_XSAVE{2} is that features that are not exposed to
+guest must not show up in the effective xstate_bv field of the buffer.
+Saving only the guest-supported xfeatures allows userspace to load the
+saved state on a different host with a fewer xfeatures, so long as the
+target host supports the xfeatures that are exposed to the guest.
+
+KVM currently sets user_xfeatures directly to restrict KVM_GET_XSAVE{2} to
+the set of guest-supported xfeatures, but doing so broke KVM's historical
+ABI for KVM_SET_XSAVE, which allows userspace to load any xfeatures that
+are supported by the *host*.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20230928001956.924301-2-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/fpu/api.h | 3 ++-
+ arch/x86/kernel/fpu/core.c | 5 +++--
+ arch/x86/kernel/fpu/xstate.c | 7 +++++--
+ arch/x86/kernel/fpu/xstate.h | 3 ++-
+ arch/x86/kvm/x86.c | 21 +++++++++------------
+ 5 files changed, 21 insertions(+), 18 deletions(-)
+
+--- a/arch/x86/include/asm/fpu/api.h
++++ b/arch/x86/include/asm/fpu/api.h
+@@ -148,7 +148,8 @@ static inline void fpu_update_guest_xfd(
+ static inline void fpu_sync_guest_vmexit_xfd_state(void) { }
+ #endif
+
+-extern void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf, unsigned int size, u32 pkru);
++extern void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf,
++ unsigned int size, u64 xfeatures, u32 pkru);
+ extern int fpu_copy_uabi_to_guest_fpstate(struct fpu_guest *gfpu, const void *buf, u64 xcr0, u32 *vpkru);
+
+ static inline void fpstate_set_confidential(struct fpu_guest *gfpu)
+--- a/arch/x86/kernel/fpu/core.c
++++ b/arch/x86/kernel/fpu/core.c
+@@ -369,14 +369,15 @@ int fpu_swap_kvm_fpstate(struct fpu_gues
+ EXPORT_SYMBOL_GPL(fpu_swap_kvm_fpstate);
+
+ void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf,
+- unsigned int size, u32 pkru)
++ unsigned int size, u64 xfeatures, u32 pkru)
+ {
+ struct fpstate *kstate = gfpu->fpstate;
+ union fpregs_state *ustate = buf;
+ struct membuf mb = { .p = buf, .left = size };
+
+ if (cpu_feature_enabled(X86_FEATURE_XSAVE)) {
+- __copy_xstate_to_uabi_buf(mb, kstate, pkru, XSTATE_COPY_XSAVE);
++ __copy_xstate_to_uabi_buf(mb, kstate, xfeatures, pkru,
++ XSTATE_COPY_XSAVE);
+ } else {
+ memcpy(&ustate->fxsave, &kstate->regs.fxsave,
+ sizeof(ustate->fxsave));
+--- a/arch/x86/kernel/fpu/xstate.c
++++ b/arch/x86/kernel/fpu/xstate.c
+@@ -1053,6 +1053,7 @@ static void copy_feature(bool from_xstat
+ * __copy_xstate_to_uabi_buf - Copy kernel saved xstate to a UABI buffer
+ * @to: membuf descriptor
+ * @fpstate: The fpstate buffer from which to copy
++ * @xfeatures: The mask of xfeatures to save (XSAVE mode only)
+ * @pkru_val: The PKRU value to store in the PKRU component
+ * @copy_mode: The requested copy mode
+ *
+@@ -1063,7 +1064,8 @@ static void copy_feature(bool from_xstat
+ * It supports partial copy but @to.pos always starts from zero.
+ */
+ void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
+- u32 pkru_val, enum xstate_copy_mode copy_mode)
++ u64 xfeatures, u32 pkru_val,
++ enum xstate_copy_mode copy_mode)
+ {
+ const unsigned int off_mxcsr = offsetof(struct fxregs_state, mxcsr);
+ struct xregs_state *xinit = &init_fpstate.regs.xsave;
+@@ -1087,7 +1089,7 @@ void __copy_xstate_to_uabi_buf(struct me
+ break;
+
+ case XSTATE_COPY_XSAVE:
+- header.xfeatures &= fpstate->user_xfeatures;
++ header.xfeatures &= fpstate->user_xfeatures & xfeatures;
+ break;
+ }
+
+@@ -1189,6 +1191,7 @@ void copy_xstate_to_uabi_buf(struct memb
+ enum xstate_copy_mode copy_mode)
+ {
+ __copy_xstate_to_uabi_buf(to, tsk->thread.fpu.fpstate,
++ tsk->thread.fpu.fpstate->user_xfeatures,
+ tsk->thread.pkru, copy_mode);
+ }
+
+--- a/arch/x86/kernel/fpu/xstate.h
++++ b/arch/x86/kernel/fpu/xstate.h
+@@ -43,7 +43,8 @@ enum xstate_copy_mode {
+
+ struct membuf;
+ extern void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
+- u32 pkru_val, enum xstate_copy_mode copy_mode);
++ u64 xfeatures, u32 pkru_val,
++ enum xstate_copy_mode copy_mode);
+ extern void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk,
+ enum xstate_copy_mode mode);
+ extern int copy_uabi_from_kernel_to_xstate(struct fpstate *fpstate, const void *kbuf, u32 *pkru);
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -5301,26 +5301,23 @@ static int kvm_vcpu_ioctl_x86_set_debugr
+ return 0;
+ }
+
+-static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
+- struct kvm_xsave *guest_xsave)
++
++static void kvm_vcpu_ioctl_x86_get_xsave2(struct kvm_vcpu *vcpu,
++ u8 *state, unsigned int size)
+ {
+ if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
+ return;
+
+- fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu,
+- guest_xsave->region,
+- sizeof(guest_xsave->region),
++ fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu, state, size,
++ vcpu->arch.guest_fpu.fpstate->user_xfeatures,
+ vcpu->arch.pkru);
+ }
+
+-static void kvm_vcpu_ioctl_x86_get_xsave2(struct kvm_vcpu *vcpu,
+- u8 *state, unsigned int size)
++static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
++ struct kvm_xsave *guest_xsave)
+ {
+- if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
+- return;
+-
+- fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu,
+- state, size, vcpu->arch.pkru);
++ return kvm_vcpu_ioctl_x86_get_xsave2(vcpu, (void *)guest_xsave->region,
++ sizeof(guest_xsave->region));
+ }
+
+ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
--- /dev/null
+From 2dcf37abf9d3aab7f975002d29fc7c17272def38 Mon Sep 17 00:00:00 2001
+From: Maxim Levitsky <mlevitsk@redhat.com>
+Date: Thu, 28 Sep 2023 20:33:52 +0300
+Subject: x86: KVM: SVM: add support for Invalid IPI Vector interception
+
+From: Maxim Levitsky <mlevitsk@redhat.com>
+
+commit 2dcf37abf9d3aab7f975002d29fc7c17272def38 upstream.
+
+In later revisions of AMD's APM, there is a new 'incomplete IPI' exit code:
+
+"Invalid IPI Vector - The vector for the specified IPI was set to an
+illegal value (VEC < 16)"
+
+Note that tests on Zen2 machine show that this VM exit doesn't happen and
+instead AVIC just does nothing.
+
+Add support for this exit code by doing nothing, instead of filling
+the kernel log with errors.
+
+Also replace an unthrottled 'pr_err()' if another unknown incomplete
+IPI exit happens with vcpu_unimpl()
+
+(e.g in case AMD adds yet another 'Invalid IPI' exit reason)
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
+Reviewed-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20230928173354.217464-3-mlevitsk@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/svm.h | 1 +
+ arch/x86/kvm/svm/avic.c | 5 ++++-
+ 2 files changed, 5 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/svm.h
++++ b/arch/x86/include/asm/svm.h
+@@ -259,6 +259,7 @@ enum avic_ipi_failure_cause {
+ AVIC_IPI_FAILURE_TARGET_NOT_RUNNING,
+ AVIC_IPI_FAILURE_INVALID_TARGET,
+ AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
++ AVIC_IPI_FAILURE_INVALID_IPI_VECTOR,
+ };
+
+ #define AVIC_PHYSICAL_MAX_INDEX_MASK GENMASK_ULL(8, 0)
+--- a/arch/x86/kvm/svm/avic.c
++++ b/arch/x86/kvm/svm/avic.c
+@@ -542,8 +542,11 @@ int avic_incomplete_ipi_interception(str
+ case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
+ WARN_ONCE(1, "Invalid backing page\n");
+ break;
++ case AVIC_IPI_FAILURE_INVALID_IPI_VECTOR:
++ /* Invalid IPI with vector < 16 */
++ break;
+ default:
+- pr_err("Unknown IPI interception\n");
++ vcpu_unimpl(vcpu, "Unknown avic incomplete IPI interception\n");
+ }
+
+ return 1;
--- /dev/null
+From 3fdc6087df3be73a212a81ce5dd6516638568806 Mon Sep 17 00:00:00 2001
+From: Maxim Levitsky <mlevitsk@redhat.com>
+Date: Thu, 28 Sep 2023 20:33:53 +0300
+Subject: x86: KVM: SVM: refresh AVIC inhibition in svm_leave_nested()
+
+From: Maxim Levitsky <mlevitsk@redhat.com>
+
+commit 3fdc6087df3be73a212a81ce5dd6516638568806 upstream.
+
+svm_leave_nested() similar to a nested VM exit, get the vCPU out of nested
+mode and thus should end the local inhibition of AVIC on this vCPU.
+
+Failure to do so, can lead to hangs on guest reboot.
+
+Raise the KVM_REQ_APICV_UPDATE request to refresh the AVIC state of the
+current vCPU in this case.
+
+Fixes: f44509f849fe ("KVM: x86: SVM: allow AVIC to co-exist with a nested guest running")
+Cc: stable@vger.kernel.org
+Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
+Reviewed-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20230928173354.217464-4-mlevitsk@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm/nested.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/x86/kvm/svm/nested.c
++++ b/arch/x86/kvm/svm/nested.c
+@@ -1164,6 +1164,9 @@ void svm_leave_nested(struct kvm_vcpu *v
+
+ nested_svm_uninit_mmu_context(vcpu);
+ vmcb_mark_all_dirty(svm->vmcb);
++
++ if (kvm_apicv_activated(vcpu->kvm))
++ kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
+ }
+
+ kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
--- /dev/null
+From ff04ea9e79021ad1e27d251ec5258468c35217a4 Mon Sep 17 00:00:00 2001
+From: Joerg Roedel <jroedel@suse.de>
+Date: Mon, 16 Oct 2023 14:42:50 +0200
+Subject: x86/sev: Check for user-space IOIO pointing to kernel space
+
+From: Joerg Roedel <jroedel@suse.de>
+
+Upstream commit: 63e44bc52047f182601e7817da969a105aa1f721
+
+Check the memory operand of INS/OUTS before emulating the instruction.
+The #VC exception can get raised from user-space, but the memory operand
+can be manipulated to access kernel memory before the emulation actually
+begins and after the exception handler has run.
+
+ [ bp: Massage commit message. ]
+
+Fixes: 597cfe48212a ("x86/boot/compressed/64: Setup a GHCB-based VC Exception handler")
+Reported-by: Tom Dohrmann <erbse.13@gmx.de>
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Cc: <stable@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/boot/compressed/sev.c | 5 +++++
+ arch/x86/kernel/sev-shared.c | 31 +++++++++++++++++++++++++++++--
+ 2 files changed, 34 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/boot/compressed/sev.c
++++ b/arch/x86/boot/compressed/sev.c
+@@ -108,6 +108,11 @@ static enum es_result vc_ioio_check(stru
+ return ES_OK;
+ }
+
++static bool fault_in_kernel_space(unsigned long address)
++{
++ return false;
++}
++
+ #undef __init
+ #undef __pa
+ #define __init
+--- a/arch/x86/kernel/sev-shared.c
++++ b/arch/x86/kernel/sev-shared.c
+@@ -629,6 +629,23 @@ fail:
+ sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
+ }
+
++static enum es_result vc_insn_string_check(struct es_em_ctxt *ctxt,
++ unsigned long address,
++ bool write)
++{
++ if (user_mode(ctxt->regs) && fault_in_kernel_space(address)) {
++ ctxt->fi.vector = X86_TRAP_PF;
++ ctxt->fi.error_code = X86_PF_USER;
++ ctxt->fi.cr2 = address;
++ if (write)
++ ctxt->fi.error_code |= X86_PF_WRITE;
++
++ return ES_EXCEPTION;
++ }
++
++ return ES_OK;
++}
++
+ static enum es_result vc_insn_string_read(struct es_em_ctxt *ctxt,
+ void *src, char *buf,
+ unsigned int data_size,
+@@ -636,7 +653,12 @@ static enum es_result vc_insn_string_rea
+ bool backwards)
+ {
+ int i, b = backwards ? -1 : 1;
+- enum es_result ret = ES_OK;
++ unsigned long address = (unsigned long)src;
++ enum es_result ret;
++
++ ret = vc_insn_string_check(ctxt, address, false);
++ if (ret != ES_OK)
++ return ret;
+
+ for (i = 0; i < count; i++) {
+ void *s = src + (i * data_size * b);
+@@ -657,7 +679,12 @@ static enum es_result vc_insn_string_wri
+ bool backwards)
+ {
+ int i, s = backwards ? -1 : 1;
+- enum es_result ret = ES_OK;
++ unsigned long address = (unsigned long)dst;
++ enum es_result ret;
++
++ ret = vc_insn_string_check(ctxt, address, true);
++ if (ret != ES_OK)
++ return ret;
+
+ for (i = 0; i < count; i++) {
+ void *d = dst + (i * data_size * s);
--- /dev/null
+From b42add5366631ef4bb2ec65d762e8c0d04afe21c Mon Sep 17 00:00:00 2001
+From: Joerg Roedel <jroedel@suse.de>
+Date: Wed, 21 Jun 2023 17:42:42 +0200
+Subject: x86/sev: Check IOBM for IOIO exceptions from user-space
+
+From: Joerg Roedel <jroedel@suse.de>
+
+Upstream commit: b9cb9c45583b911e0db71d09caa6b56469eb2bdf
+
+Check the IO permission bitmap (if present) before emulating IOIO #VC
+exceptions for user-space. These permissions are checked by hardware
+already before the #VC is raised, but due to the VC-handler decoding
+race it needs to be checked again in software.
+
+Fixes: 25189d08e516 ("x86/sev-es: Add support for handling IOIO exceptions")
+Reported-by: Tom Dohrmann <erbse.13@gmx.de>
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Tested-by: Tom Dohrmann <erbse.13@gmx.de>
+Cc: <stable@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/boot/compressed/sev.c | 5 +++++
+ arch/x86/kernel/sev-shared.c | 22 +++++++++++++++-------
+ arch/x86/kernel/sev.c | 27 +++++++++++++++++++++++++++
+ 3 files changed, 47 insertions(+), 7 deletions(-)
+
+--- a/arch/x86/boot/compressed/sev.c
++++ b/arch/x86/boot/compressed/sev.c
+@@ -103,6 +103,11 @@ static enum es_result vc_read_mem(struct
+ return ES_OK;
+ }
+
++static enum es_result vc_ioio_check(struct es_em_ctxt *ctxt, u16 port, size_t size)
++{
++ return ES_OK;
++}
++
+ #undef __init
+ #undef __pa
+ #define __init
+--- a/arch/x86/kernel/sev-shared.c
++++ b/arch/x86/kernel/sev-shared.c
+@@ -693,6 +693,9 @@ static enum es_result vc_insn_string_wri
+ static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
+ {
+ struct insn *insn = &ctxt->insn;
++ size_t size;
++ u64 port;
++
+ *exitinfo = 0;
+
+ switch (insn->opcode.bytes[0]) {
+@@ -701,7 +704,7 @@ static enum es_result vc_ioio_exitinfo(s
+ case 0x6d:
+ *exitinfo |= IOIO_TYPE_INS;
+ *exitinfo |= IOIO_SEG_ES;
+- *exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
++ port = ctxt->regs->dx & 0xffff;
+ break;
+
+ /* OUTS opcodes */
+@@ -709,41 +712,43 @@ static enum es_result vc_ioio_exitinfo(s
+ case 0x6f:
+ *exitinfo |= IOIO_TYPE_OUTS;
+ *exitinfo |= IOIO_SEG_DS;
+- *exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
++ port = ctxt->regs->dx & 0xffff;
+ break;
+
+ /* IN immediate opcodes */
+ case 0xe4:
+ case 0xe5:
+ *exitinfo |= IOIO_TYPE_IN;
+- *exitinfo |= (u8)insn->immediate.value << 16;
++ port = (u8)insn->immediate.value & 0xffff;
+ break;
+
+ /* OUT immediate opcodes */
+ case 0xe6:
+ case 0xe7:
+ *exitinfo |= IOIO_TYPE_OUT;
+- *exitinfo |= (u8)insn->immediate.value << 16;
++ port = (u8)insn->immediate.value & 0xffff;
+ break;
+
+ /* IN register opcodes */
+ case 0xec:
+ case 0xed:
+ *exitinfo |= IOIO_TYPE_IN;
+- *exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
++ port = ctxt->regs->dx & 0xffff;
+ break;
+
+ /* OUT register opcodes */
+ case 0xee:
+ case 0xef:
+ *exitinfo |= IOIO_TYPE_OUT;
+- *exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
++ port = ctxt->regs->dx & 0xffff;
+ break;
+
+ default:
+ return ES_DECODE_FAILED;
+ }
+
++ *exitinfo |= port << 16;
++
+ switch (insn->opcode.bytes[0]) {
+ case 0x6c:
+ case 0x6e:
+@@ -753,12 +758,15 @@ static enum es_result vc_ioio_exitinfo(s
+ case 0xee:
+ /* Single byte opcodes */
+ *exitinfo |= IOIO_DATA_8;
++ size = 1;
+ break;
+ default:
+ /* Length determined by instruction parsing */
+ *exitinfo |= (insn->opnd_bytes == 2) ? IOIO_DATA_16
+ : IOIO_DATA_32;
++ size = (insn->opnd_bytes == 2) ? 2 : 4;
+ }
++
+ switch (insn->addr_bytes) {
+ case 2:
+ *exitinfo |= IOIO_ADDR_16;
+@@ -774,7 +782,7 @@ static enum es_result vc_ioio_exitinfo(s
+ if (insn_has_rep_prefix(insn))
+ *exitinfo |= IOIO_REP;
+
+- return ES_OK;
++ return vc_ioio_check(ctxt, (u16)port, size);
+ }
+
+ static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
+--- a/arch/x86/kernel/sev.c
++++ b/arch/x86/kernel/sev.c
+@@ -512,6 +512,33 @@ static enum es_result vc_slow_virt_to_ph
+ return ES_OK;
+ }
+
++static enum es_result vc_ioio_check(struct es_em_ctxt *ctxt, u16 port, size_t size)
++{
++ BUG_ON(size > 4);
++
++ if (user_mode(ctxt->regs)) {
++ struct thread_struct *t = ¤t->thread;
++ struct io_bitmap *iobm = t->io_bitmap;
++ size_t idx;
++
++ if (!iobm)
++ goto fault;
++
++ for (idx = port; idx < port + size; ++idx) {
++ if (test_bit(idx, iobm->bitmap))
++ goto fault;
++ }
++ }
++
++ return ES_OK;
++
++fault:
++ ctxt->fi.vector = X86_TRAP_GP;
++ ctxt->fi.error_code = 0;
++
++ return ES_EXCEPTION;
++}
++
+ /* Include code shared with pre-decompression boot stage */
+ #include "sev-shared.c"
+
--- /dev/null
+From ea67a584f361b0dae8c0aaf34b3f49cd488c9025 Mon Sep 17 00:00:00 2001
+From: "Borislav Petkov (AMD)" <bp@alien8.de>
+Date: Thu, 5 Oct 2023 11:06:36 +0200
+Subject: x86/sev: Disable MMIO emulation from user mode
+
+From: "Borislav Petkov (AMD)" <bp@alien8.de>
+
+Upstream commit: a37cd2a59d0cb270b1bba568fd3a3b8668b9d3ba
+
+A virt scenario can be constructed where MMIO memory can be user memory.
+When that happens, a race condition opens between when the hardware
+raises the #VC and when the #VC handler gets to emulate the instruction.
+
+If the MOVS is replaced with a MOVS accessing kernel memory in that
+small race window, then write to kernel memory happens as the access
+checks are not done at emulation time.
+
+Disable MMIO emulation in user mode temporarily until a sensible use
+case appears and justifies properly handling the race window.
+
+Fixes: 0118b604c2c9 ("x86/sev-es: Handle MMIO String Instructions")
+Reported-by: Tom Dohrmann <erbse.13@gmx.de>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Tested-by: Tom Dohrmann <erbse.13@gmx.de>
+Cc: <stable@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/sev.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/x86/kernel/sev.c
++++ b/arch/x86/kernel/sev.c
+@@ -1552,6 +1552,9 @@ static enum es_result vc_handle_mmio(str
+ return ES_DECODE_FAILED;
+ }
+
++ if (user_mode(ctxt->regs))
++ return ES_UNSUPPORTED;
++
+ switch (mmio) {
+ case MMIO_WRITE:
+ memcpy(ghcb->shared_buffer, reg_data, bytes);