--- /dev/null
+From 1e90315149f3fe148e114a5de86f0196d1c21fa5 Mon Sep 17 00:00:00 2001
+From: Josef Bacik <josef@toxicpanda.com>
+Date: Thu, 13 Feb 2020 10:47:29 -0500
+Subject: btrfs: do not check delayed items are empty for single transaction cleanup
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+commit 1e90315149f3fe148e114a5de86f0196d1c21fa5 upstream.
+
+btrfs_assert_delayed_root_empty() will check if the delayed root is
+completely empty, but this is a filesystem-wide check. On cleanup we
+may have allowed other transactions to begin, for whatever reason, and
+thus the delayed root is not empty.
+
+So remove this check from cleanup_one_transation(). This however can
+stay in btrfs_cleanup_transaction(), because it checks only after all of
+the transactions have been properly cleaned up, and thus is valid.
+
+CC: stable@vger.kernel.org # 4.4+
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Reviewed-by: Nikolay Borisov <nborisov@suse.com>
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/disk-io.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -4469,7 +4469,6 @@ void btrfs_cleanup_one_transaction(struc
+ wake_up(&fs_info->transaction_wait);
+
+ btrfs_destroy_delayed_inodes(fs_info);
+- btrfs_assert_delayed_root_empty(fs_info);
+
+ btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
+ EXTENT_DIRTY);
--- /dev/null
+From e75fd33b3f744f644061a4f9662bd63f5434f806 Mon Sep 17 00:00:00 2001
+From: Filipe Manana <fdmanana@suse.com>
+Date: Thu, 13 Feb 2020 12:29:50 +0000
+Subject: Btrfs: fix btrfs_wait_ordered_range() so that it waits for all ordered extents
+
+From: Filipe Manana <fdmanana@suse.com>
+
+commit e75fd33b3f744f644061a4f9662bd63f5434f806 upstream.
+
+In btrfs_wait_ordered_range() once we find an ordered extent that has
+finished with an error we exit the loop and don't wait for any other
+ordered extents that might be still in progress.
+
+All the users of btrfs_wait_ordered_range() expect that there are no more
+ordered extents in progress after that function returns. So past fixes
+such like the ones from the two following commits:
+
+ ff612ba7849964 ("btrfs: fix panic during relocation after ENOSPC before
+ writeback happens")
+
+ 28aeeac1dd3080 ("Btrfs: fix panic when starting bg cache writeout after
+ IO error")
+
+don't work when there are multiple ordered extents in the range.
+
+Fix that by making btrfs_wait_ordered_range() wait for all ordered extents
+even after it finds one that had an error.
+
+Link: https://github.com/kdave/btrfs-progs/issues/228#issuecomment-569777554
+CC: stable@vger.kernel.org # 4.4+
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Reviewed-by: Josef Bacik <josef@toxicpanda.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/ordered-data.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/fs/btrfs/ordered-data.c
++++ b/fs/btrfs/ordered-data.c
+@@ -712,10 +712,15 @@ int btrfs_wait_ordered_range(struct inod
+ }
+ btrfs_start_ordered_extent(inode, ordered, 1);
+ end = ordered->file_offset;
++ /*
++ * If the ordered extent had an error save the error but don't
++ * exit without waiting first for all other ordered extents in
++ * the range to complete.
++ */
+ if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
+ ret = -EIO;
+ btrfs_put_ordered_extent(ordered);
+- if (ret || end == 0 || end == start)
++ if (end == 0 || end == start)
+ break;
+ end--;
+ }
--- /dev/null
+From b778cf962d71a0e737923d55d0432f3bd287258e Mon Sep 17 00:00:00 2001
+From: Josef Bacik <josef@toxicpanda.com>
+Date: Thu, 13 Feb 2020 10:47:31 -0500
+Subject: btrfs: fix bytes_may_use underflow in prealloc error condtition
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+commit b778cf962d71a0e737923d55d0432f3bd287258e upstream.
+
+I hit the following warning while running my error injection stress
+testing:
+
+ WARNING: CPU: 3 PID: 1453 at fs/btrfs/space-info.h:108 btrfs_free_reserved_data_space_noquota+0xfd/0x160 [btrfs]
+ RIP: 0010:btrfs_free_reserved_data_space_noquota+0xfd/0x160 [btrfs]
+ Call Trace:
+ btrfs_free_reserved_data_space+0x4f/0x70 [btrfs]
+ __btrfs_prealloc_file_range+0x378/0x470 [btrfs]
+ elfcorehdr_read+0x40/0x40
+ ? elfcorehdr_read+0x40/0x40
+ ? btrfs_commit_transaction+0xca/0xa50 [btrfs]
+ ? dput+0xb4/0x2a0
+ ? btrfs_log_dentry_safe+0x55/0x70 [btrfs]
+ ? btrfs_sync_file+0x30e/0x420 [btrfs]
+ ? do_fsync+0x38/0x70
+ ? __x64_sys_fdatasync+0x13/0x20
+ ? do_syscall_64+0x5b/0x1b0
+ ? entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+This happens if we fail to insert our reserved file extent. At this
+point we've already converted our reservation from ->bytes_may_use to
+->bytes_reserved. However once we break we will attempt to free
+everything from [cur_offset, end] from ->bytes_may_use, but our extent
+reservation will overlap part of this.
+
+Fix this problem by adding ins.offset (our extent allocation size) to
+cur_offset so we remove the actual remaining part from ->bytes_may_use.
+
+I validated this fix using my inject-error.py script
+
+python inject-error.py -o should_fail_bio -t cache_save_setup -t \
+ __btrfs_prealloc_file_range \
+ -t insert_reserved_file_extent.constprop.0 \
+ -r "-5" ./run-fsstress.sh
+
+where run-fsstress.sh simply mounts and runs fsstress on a disk.
+
+CC: stable@vger.kernel.org # 4.4+
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/inode.c | 16 +++++++++++++---
+ 1 file changed, 13 insertions(+), 3 deletions(-)
+
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -10348,6 +10348,7 @@ static int __btrfs_prealloc_file_range(s
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_key ins;
+ u64 cur_offset = start;
++ u64 clear_offset = start;
+ u64 i_size;
+ u64 cur_bytes;
+ u64 last_alloc = (u64)-1;
+@@ -10382,6 +10383,15 @@ static int __btrfs_prealloc_file_range(s
+ btrfs_end_transaction(trans);
+ break;
+ }
++
++ /*
++ * We've reserved this space, and thus converted it from
++ * ->bytes_may_use to ->bytes_reserved. Any error that happens
++ * from here on out we will only need to clear our reservation
++ * for the remaining unreserved area, so advance our
++ * clear_offset by our extent size.
++ */
++ clear_offset += ins.offset;
+ btrfs_dec_block_group_reservations(fs_info, ins.objectid);
+
+ last_alloc = ins.offset;
+@@ -10462,9 +10472,9 @@ next:
+ if (own_trans)
+ btrfs_end_transaction(trans);
+ }
+- if (cur_offset < end)
+- btrfs_free_reserved_data_space(inode, NULL, cur_offset,
+- end - cur_offset + 1);
++ if (clear_offset < end)
++ btrfs_free_reserved_data_space(inode, NULL, clear_offset,
++ end - clear_offset + 1);
+ return ret;
+ }
+
--- /dev/null
+From 315bf8ef914f31d51d084af950703aa1e09a728c Mon Sep 17 00:00:00 2001
+From: Josef Bacik <josef@toxicpanda.com>
+Date: Thu, 13 Feb 2020 10:47:28 -0500
+Subject: btrfs: reset fs_root to NULL on error in open_ctree
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+commit 315bf8ef914f31d51d084af950703aa1e09a728c upstream.
+
+While running my error injection script I hit a panic when we tried to
+clean up the fs_root when freeing the fs_root. This is because
+fs_info->fs_root == PTR_ERR(-EIO), which isn't great. Fix this by
+setting fs_info->fs_root = NULL; if we fail to read the root.
+
+CC: stable@vger.kernel.org # 4.4+
+Reviewed-by: Nikolay Borisov <nborisov@suse.com>
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/disk-io.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -3153,6 +3153,7 @@ retry_root_backup:
+ if (IS_ERR(fs_info->fs_root)) {
+ err = PTR_ERR(fs_info->fs_root);
+ btrfs_warn(fs_info, "failed to read fs tree: %d", err);
++ fs_info->fs_root = NULL;
+ goto fail_qgroup;
+ }
+
--- /dev/null
+From 23520b2def95205f132e167cf5b25c609975e959 Mon Sep 17 00:00:00 2001
+From: Miaohe Lin <linmiaohe@huawei.com>
+Date: Fri, 21 Feb 2020 22:04:46 +0800
+Subject: KVM: apic: avoid calculating pending eoi from an uninitialized val
+
+From: Miaohe Lin <linmiaohe@huawei.com>
+
+commit 23520b2def95205f132e167cf5b25c609975e959 upstream.
+
+When pv_eoi_get_user() fails, 'val' may remain uninitialized and the return
+value of pv_eoi_get_pending() becomes random. Fix the issue by initializing
+the variable.
+
+Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/lapic.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -633,9 +633,11 @@ static inline bool pv_eoi_enabled(struct
+ static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
+ {
+ u8 val;
+- if (pv_eoi_get_user(vcpu, &val) < 0)
++ if (pv_eoi_get_user(vcpu, &val) < 0) {
+ apic_debug("Can't read EOI MSR value: 0x%llx\n",
+ (unsigned long long)vcpu->arch.pv_eoi.msr_val);
++ return false;
++ }
+ return val & 0x1;
+ }
+
--- /dev/null
+From a4443267800af240072280c44521caab61924e55 Mon Sep 17 00:00:00 2001
+From: Vitaly Kuznetsov <vkuznets@redhat.com>
+Date: Thu, 20 Feb 2020 18:22:04 +0100
+Subject: KVM: nVMX: clear PIN_BASED_POSTED_INTR from nested pinbased_ctls only when apicv is globally disabled
+
+From: Vitaly Kuznetsov <vkuznets@redhat.com>
+
+commit a4443267800af240072280c44521caab61924e55 upstream.
+
+When apicv is disabled on a vCPU (e.g. by enabling KVM_CAP_HYPERV_SYNIC*),
+nothing happens to VMX MSRs on the already existing vCPUs, however, all new
+ones are created with PIN_BASED_POSTED_INTR filtered out. This is very
+confusing and results in the following picture inside the guest:
+
+$ rdmsr -ax 0x48d
+ff00000016
+7f00000016
+7f00000016
+7f00000016
+
+This is observed with QEMU and 4-vCPU guest: QEMU creates vCPU0, does
+KVM_CAP_HYPERV_SYNIC2 and then creates the remaining three.
+
+L1 hypervisor may only check CPU0's controls to find out what features
+are available and it will be very confused later. Switch to setting
+PIN_BASED_POSTED_INTR control based on global 'enable_apicv' setting.
+
+Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/vmx/capabilities.h | 1 +
+ arch/x86/kvm/vmx/nested.c | 5 ++---
+ arch/x86/kvm/vmx/nested.h | 3 +--
+ arch/x86/kvm/vmx/vmx.c | 10 ++++------
+ 4 files changed, 8 insertions(+), 11 deletions(-)
+
+--- a/arch/x86/kvm/vmx/capabilities.h
++++ b/arch/x86/kvm/vmx/capabilities.h
+@@ -12,6 +12,7 @@ extern bool __read_mostly enable_ept;
+ extern bool __read_mostly enable_unrestricted_guest;
+ extern bool __read_mostly enable_ept_ad_bits;
+ extern bool __read_mostly enable_pml;
++extern bool __read_mostly enable_apicv;
+ extern int __read_mostly pt_mode;
+
+ #define PT_MODE_SYSTEM 0
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -5979,8 +5979,7 @@ void nested_vmx_set_vmcs_shadowing_bitma
+ * bit in the high half is on if the corresponding bit in the control field
+ * may be on. See also vmx_control_verify().
+ */
+-void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
+- bool apicv)
++void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps)
+ {
+ /*
+ * Note that as a general rule, the high half of the MSRs (bits in
+@@ -6007,7 +6006,7 @@ void nested_vmx_setup_ctls_msrs(struct n
+ PIN_BASED_EXT_INTR_MASK |
+ PIN_BASED_NMI_EXITING |
+ PIN_BASED_VIRTUAL_NMIS |
+- (apicv ? PIN_BASED_POSTED_INTR : 0);
++ (enable_apicv ? PIN_BASED_POSTED_INTR : 0);
+ msrs->pinbased_ctls_high |=
+ PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
+ PIN_BASED_VMX_PREEMPTION_TIMER;
+--- a/arch/x86/kvm/vmx/nested.h
++++ b/arch/x86/kvm/vmx/nested.h
+@@ -17,8 +17,7 @@ enum nvmx_vmentry_status {
+ };
+
+ void vmx_leave_nested(struct kvm_vcpu *vcpu);
+-void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
+- bool apicv);
++void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps);
+ void nested_vmx_hardware_unsetup(void);
+ __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *));
+ void nested_vmx_set_vmcs_shadowing_bitmap(void);
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -95,7 +95,7 @@ module_param(emulate_invalid_guest_state
+ static bool __read_mostly fasteoi = 1;
+ module_param(fasteoi, bool, S_IRUGO);
+
+-static bool __read_mostly enable_apicv = 1;
++bool __read_mostly enable_apicv = 1;
+ module_param(enable_apicv, bool, S_IRUGO);
+
+ /*
+@@ -6803,8 +6803,7 @@ static struct kvm_vcpu *vmx_create_vcpu(
+
+ if (nested)
+ nested_vmx_setup_ctls_msrs(&vmx->nested.msrs,
+- vmx_capability.ept,
+- kvm_vcpu_apicv_active(&vmx->vcpu));
++ vmx_capability.ept);
+ else
+ memset(&vmx->nested.msrs, 0, sizeof(vmx->nested.msrs));
+
+@@ -6884,8 +6883,7 @@ static int __init vmx_check_processor_co
+ if (setup_vmcs_config(&vmcs_conf, &vmx_cap) < 0)
+ return -EIO;
+ if (nested)
+- nested_vmx_setup_ctls_msrs(&vmcs_conf.nested, vmx_cap.ept,
+- enable_apicv);
++ nested_vmx_setup_ctls_msrs(&vmcs_conf.nested, vmx_cap.ept);
+ if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
+ printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
+ smp_processor_id());
+@@ -7792,7 +7790,7 @@ static __init int hardware_setup(void)
+
+ if (nested) {
+ nested_vmx_setup_ctls_msrs(&vmcs_config.nested,
+- vmx_capability.ept, enable_apicv);
++ vmx_capability.ept);
+
+ r = nested_vmx_hardware_setup(kvm_vmx_exit_handlers);
+ if (r)
--- /dev/null
+From 91a5f413af596ad01097e59bf487eb07cb3f1331 Mon Sep 17 00:00:00 2001
+From: Vitaly Kuznetsov <vkuznets@redhat.com>
+Date: Thu, 20 Feb 2020 18:22:05 +0100
+Subject: KVM: nVMX: handle nested posted interrupts when apicv is disabled for L1
+
+From: Vitaly Kuznetsov <vkuznets@redhat.com>
+
+commit 91a5f413af596ad01097e59bf487eb07cb3f1331 upstream.
+
+Even when APICv is disabled for L1 it can (and, actually, is) still
+available for L2, this means we need to always call
+vmx_deliver_nested_posted_interrupt() when attempting an interrupt
+delivery.
+
+Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/kvm_host.h | 2 +-
+ arch/x86/kvm/lapic.c | 5 +----
+ arch/x86/kvm/svm.c | 7 ++++++-
+ arch/x86/kvm/vmx.c | 13 +++++++++----
+ 4 files changed, 17 insertions(+), 10 deletions(-)
+
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1040,7 +1040,7 @@ struct kvm_x86_ops {
+ void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
+ void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
+ void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
+- void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
++ int (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
+ int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
+ int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
+ int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -1060,11 +1060,8 @@ static int __apic_accept_irq(struct kvm_
+ apic_clear_vector(vector, apic->regs + APIC_TMR);
+ }
+
+- if (vcpu->arch.apicv_active)
+- kvm_x86_ops->deliver_posted_interrupt(vcpu, vector);
+- else {
++ if (kvm_x86_ops->deliver_posted_interrupt(vcpu, vector)) {
+ kvm_lapic_set_irr(vector, apic);
+-
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
+ kvm_vcpu_kick(vcpu);
+ }
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -5140,8 +5140,11 @@ static void svm_load_eoi_exitmap(struct
+ return;
+ }
+
+-static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
++static int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
+ {
++ if (!vcpu->arch.apicv_active)
++ return -1;
++
+ kvm_lapic_set_irr(vec, vcpu->arch.apic);
+ smp_mb__after_atomic();
+
+@@ -5150,6 +5153,8 @@ static void svm_deliver_avic_intr(struct
+ kvm_cpu_get_apicid(vcpu->cpu));
+ else
+ kvm_vcpu_wake_up(vcpu);
++
++ return 0;
+ }
+
+ static bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -6284,24 +6284,29 @@ static int vmx_deliver_nested_posted_int
+ * 2. If target vcpu isn't running(root mode), kick it to pick up the
+ * interrupt from PIR in next vmentry.
+ */
+-static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
++static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
+ {
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ int r;
+
+ r = vmx_deliver_nested_posted_interrupt(vcpu, vector);
+ if (!r)
+- return;
++ return 0;
++
++ if (!vcpu->arch.apicv_active)
++ return -1;
+
+ if (pi_test_and_set_pir(vector, &vmx->pi_desc))
+- return;
++ return 0;
+
+ /* If a previous notification has sent the IPI, nothing to do. */
+ if (pi_test_and_set_on(&vmx->pi_desc))
+- return;
++ return 0;
+
+ if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false))
+ kvm_vcpu_kick(vcpu);
++
++ return 0;
+ }
+
+ /*
ext4-fix-race-between-writepages-and-enabling-ext4_extents_fl.patch
kvm-nvmx-refactor-io-bitmap-checks-into-helper-function.patch
kvm-nvmx-check-io-instruction-vm-exit-conditions.patch
+kvm-nvmx-handle-nested-posted-interrupts-when-apicv-is-disabled-for-l1.patch
+kvm-apic-avoid-calculating-pending-eoi-from-an-uninitialized-val.patch
+btrfs-fix-bytes_may_use-underflow-in-prealloc-error-condtition.patch
+btrfs-reset-fs_root-to-null-on-error-in-open_ctree.patch
+btrfs-do-not-check-delayed-items-are-empty-for-single-transaction-cleanup.patch
+btrfs-fix-btrfs_wait_ordered_range-so-that-it-waits-for-all-ordered-extents.patch
+kvm-nvmx-clear-pin_based_posted_intr-from-nested-pinbased_ctls-only-when-apicv-is-globally-disabled.patch