--- /dev/null
+From 2f96e40212d435b328459ba6b3956395eed8fa9f Mon Sep 17 00:00:00 2001
+From: Josef Bacik <josef@toxicpanda.com>
+Date: Fri, 15 Jan 2021 16:26:17 -0500
+Subject: btrfs: fix possible free space tree corruption with online conversion
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+commit 2f96e40212d435b328459ba6b3956395eed8fa9f upstream.
+
+While running btrfs/011 in a loop I would often ASSERT() while trying to
+add a new free space entry that already existed, or get an EEXIST while
+adding a new block to the extent tree, which is another indication of
+double allocation.
+
+This occurs because when we do the free space tree population, we create
+the new root and then populate the tree and commit the transaction.
+The problem is when you create a new root, the root node and commit root
+node are the same. During this initial transaction commit we will run
+all of the delayed refs that were paused during the free space tree
+generation, and thus begin to cache block groups. While caching block
+groups the caching thread will be reading from the main root for the
+free space tree, so as we make allocations we'll be changing the free
+space tree, which can cause us to add the same range twice which results
+in either the ASSERT(ret != -EEXIST); in __btrfs_add_free_space, or in a
+variety of different errors when running delayed refs because of a
+double allocation.
+
+Fix this by marking the fs_info as unsafe to load the free space tree,
+and fall back on the old slow method. We could be smarter than this,
+for example caching the block group while we're populating the free
+space tree, but since this is a serious problem I've opted for the
+simplest solution.
+
+CC: stable@vger.kernel.org # 4.9+
+Fixes: a5ed91828518 ("Btrfs: implement the free space B-tree")
+Reviewed-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/block-group.c | 10 +++++++++-
+ fs/btrfs/ctree.h | 3 +++
+ fs/btrfs/free-space-tree.c | 10 +++++++++-
+ 3 files changed, 21 insertions(+), 2 deletions(-)
+
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -640,7 +640,15 @@ static noinline void caching_thread(stru
+ mutex_lock(&caching_ctl->mutex);
+ down_read(&fs_info->commit_root_sem);
+
+- if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
++ /*
++ * If we are in the transaction that populated the free space tree we
++ * can't actually cache from the free space tree as our commit root and
++ * real root are the same, so we could change the contents of the blocks
++ * while caching. Instead do the slow caching in this case, and after
++ * the transaction has committed we will be safe.
++ */
++ if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
++ !(test_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags)))
+ ret = load_free_space_tree(caching_ctl);
+ else
+ ret = load_extent_tree_free(caching_ctl);
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -136,6 +136,9 @@ enum {
+ BTRFS_FS_STATE_DEV_REPLACING,
+ /* The btrfs_fs_info created for self-tests */
+ BTRFS_FS_STATE_DUMMY_FS_INFO,
++
++ /* Indicate that we can't trust the free space tree for caching yet */
++ BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED,
+ };
+
+ #define BTRFS_BACKREF_REV_MAX 256
+--- a/fs/btrfs/free-space-tree.c
++++ b/fs/btrfs/free-space-tree.c
+@@ -1149,6 +1149,7 @@ int btrfs_create_free_space_tree(struct
+ return PTR_ERR(trans);
+
+ set_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
++ set_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags);
+ free_space_root = btrfs_create_tree(trans,
+ BTRFS_FREE_SPACE_TREE_OBJECTID);
+ if (IS_ERR(free_space_root)) {
+@@ -1170,11 +1171,18 @@ int btrfs_create_free_space_tree(struct
+ btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE);
+ btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID);
+ clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
++ ret = btrfs_commit_transaction(trans);
+
+- return btrfs_commit_transaction(trans);
++ /*
++ * Now that we've committed the transaction any reading of our commit
++ * root will be safe, so we can cache from the free space tree now.
++ */
++ clear_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags);
++ return ret;
+
+ abort:
+ clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
++ clear_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags);
+ btrfs_abort_transaction(trans, ret);
+ btrfs_end_transaction(trans);
+ return ret;
--- /dev/null
+From 680896556805d3ad3fa47f6002b87b3041a45ac2 Mon Sep 17 00:00:00 2001
+From: Claudiu Beznea <claudiu.beznea@microchip.com>
+Date: Fri, 22 Jan 2021 14:21:34 +0200
+Subject: drivers: soc: atmel: add null entry at the end of at91_soc_allowed_list[]
+
+From: Claudiu Beznea <claudiu.beznea@microchip.com>
+
+commit 680896556805d3ad3fa47f6002b87b3041a45ac2 upstream.
+
+of_match_node() calls __of_match_node() which loops though the entries of
+matches array. It stops when condition:
+(matches->name[0] || matches->type[0] || matches->compatible[0]) is
+false. Thus, add a null entry at the end of at91_soc_allowed_list[]
+array.
+
+Fixes: caab13b49604 ("drivers: soc: atmel: Avoid calling at91_soc_init on non AT91 SoCs")
+Cc: stable@vger.kernel.org #4.12+
+Signed-off-by: Claudiu Beznea <claudiu.beznea@microchip.com>
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/soc/atmel/soc.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/soc/atmel/soc.c
++++ b/drivers/soc/atmel/soc.c
+@@ -268,7 +268,8 @@ static const struct of_device_id at91_so
+ { .compatible = "atmel,at91rm9200", },
+ { .compatible = "atmel,at91sam9", },
+ { .compatible = "atmel,sama5", },
+- { .compatible = "atmel,samv7", }
++ { .compatible = "atmel,samv7", },
++ { }
+ };
+
+ static int __init atmel_soc_device_init(void)
--- /dev/null
+From caab13b4960416b9fee83169a758eb0f31e65109 Mon Sep 17 00:00:00 2001
+From: Sudeep Holla <sudeep.holla@arm.com>
+Date: Fri, 11 Dec 2020 13:58:46 +0000
+Subject: drivers: soc: atmel: Avoid calling at91_soc_init on non AT91 SoCs
+
+From: Sudeep Holla <sudeep.holla@arm.com>
+
+commit caab13b4960416b9fee83169a758eb0f31e65109 upstream.
+
+Since at91_soc_init is called unconditionally from atmel_soc_device_init,
+we get the following warning on all non AT91 SoCs:
+ " AT91: Could not find identification node"
+
+Fix the same by filtering with allowed AT91 SoC list.
+
+Cc: Nicolas Ferre <nicolas.ferre@microchip.com>
+Cc: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Cc: Ludovic Desroches <ludovic.desroches@microchip.com>
+Cc: stable@vger.kernel.org #4.12+
+Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
+Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Link: https://lore.kernel.org/r/20201211135846.1334322-1-sudeep.holla@arm.com
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/soc/atmel/soc.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/drivers/soc/atmel/soc.c
++++ b/drivers/soc/atmel/soc.c
+@@ -264,8 +264,20 @@ struct soc_device * __init at91_soc_init
+ return soc_dev;
+ }
+
++static const struct of_device_id at91_soc_allowed_list[] __initconst = {
++ { .compatible = "atmel,at91rm9200", },
++ { .compatible = "atmel,at91sam9", },
++ { .compatible = "atmel,sama5", },
++ { .compatible = "atmel,samv7", }
++};
++
+ static int __init atmel_soc_device_init(void)
+ {
++ struct device_node *np = of_find_node_by_path("/");
++
++ if (!of_match_node(at91_soc_allowed_list, np))
++ return 0;
++
+ at91_soc_init(socs);
+
+ return 0;
--- /dev/null
+From d51e1d3f6b4236e0352407d8a63f5c5f71ce193d Mon Sep 17 00:00:00 2001
+From: Maxim Levitsky <mlevitsk@redhat.com>
+Date: Thu, 14 Jan 2021 22:54:47 +0200
+Subject: KVM: nVMX: Sync unsync'd vmcs02 state to vmcs12 on migration
+
+From: Maxim Levitsky <mlevitsk@redhat.com>
+
+commit d51e1d3f6b4236e0352407d8a63f5c5f71ce193d upstream.
+
+Even when we are outside the nested guest, some vmcs02 fields
+may not be in sync vs vmcs12. This is intentional, even across
+nested VM-exit, because the sync can be delayed until the nested
+hypervisor performs a VMCLEAR or a VMREAD/VMWRITE that affects those
+rarely accessed fields.
+
+However, during KVM_GET_NESTED_STATE, the vmcs12 has to be up to date to
+be able to restore it. To fix that, call copy_vmcs02_to_vmcs12_rare()
+before the vmcs12 contents are copied to userspace.
+
+Fixes: 7952d769c29ca ("KVM: nVMX: Sync rarely accessed guest fields only when needed")
+Reviewed-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
+Message-Id: <20210114205449.8715-2-mlevitsk@redhat.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/vmx/nested.c | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -5579,11 +5579,14 @@ static int vmx_get_nested_state(struct k
+ if (is_guest_mode(vcpu)) {
+ sync_vmcs02_to_vmcs12(vcpu, vmcs12);
+ sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
+- } else if (!vmx->nested.need_vmcs12_to_shadow_sync) {
+- if (vmx->nested.hv_evmcs)
+- copy_enlightened_to_vmcs12(vmx);
+- else if (enable_shadow_vmcs)
+- copy_shadow_to_vmcs12(vmx);
++ } else {
++ copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu));
++ if (!vmx->nested.need_vmcs12_to_shadow_sync) {
++ if (vmx->nested.hv_evmcs)
++ copy_enlightened_to_vmcs12(vmx);
++ else if (enable_shadow_vmcs)
++ copy_shadow_to_vmcs12(vmx);
++ }
+ }
+
+ BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE);
--- /dev/null
+From 1f7becf1b7e21794fc9d460765fe09679bc9b9e0 Mon Sep 17 00:00:00 2001
+From: Jay Zhou <jianjay.zhou@huawei.com>
+Date: Mon, 18 Jan 2021 16:47:20 +0800
+Subject: KVM: x86: get smi pending status correctly
+
+From: Jay Zhou <jianjay.zhou@huawei.com>
+
+commit 1f7becf1b7e21794fc9d460765fe09679bc9b9e0 upstream.
+
+The injection process of smi has two steps:
+
+ Qemu KVM
+Step1:
+ cpu->interrupt_request &= \
+ ~CPU_INTERRUPT_SMI;
+ kvm_vcpu_ioctl(cpu, KVM_SMI)
+
+ call kvm_vcpu_ioctl_smi() and
+ kvm_make_request(KVM_REQ_SMI, vcpu);
+
+Step2:
+ kvm_vcpu_ioctl(cpu, KVM_RUN, 0)
+
+ call process_smi() if
+ kvm_check_request(KVM_REQ_SMI, vcpu) is
+ true, mark vcpu->arch.smi_pending = true;
+
+The vcpu->arch.smi_pending will be set true in step2, unfortunately if
+vcpu paused between step1 and step2, the kvm_run->immediate_exit will be
+set and vcpu has to exit to Qemu immediately during step2 before mark
+vcpu->arch.smi_pending true.
+During VM migration, Qemu will get the smi pending status from KVM using
+KVM_GET_VCPU_EVENTS ioctl at the downtime, then the smi pending status
+will be lost.
+
+Signed-off-by: Jay Zhou <jianjay.zhou@huawei.com>
+Signed-off-by: Shengen Zhuang <zhuangshengen@huawei.com>
+Message-Id: <20210118084720.1585-1-jianjay.zhou@huawei.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/x86.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -102,6 +102,7 @@ static u64 __read_mostly cr4_reserved_bi
+
+ static void update_cr8_intercept(struct kvm_vcpu *vcpu);
+ static void process_nmi(struct kvm_vcpu *vcpu);
++static void process_smi(struct kvm_vcpu *vcpu);
+ static void enter_smm(struct kvm_vcpu *vcpu);
+ static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
+ static void store_regs(struct kvm_vcpu *vcpu);
+@@ -3772,6 +3773,10 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_
+ {
+ process_nmi(vcpu);
+
++
++ if (kvm_check_request(KVM_REQ_SMI, vcpu))
++ process_smi(vcpu);
++
+ /*
+ * The API doesn't provide the instruction length for software
+ * exceptions, so don't report them. As long as the guest RIP
--- /dev/null
+From 98dd2f108e448988d91e296173e773b06fb978b8 Mon Sep 17 00:00:00 2001
+From: Like Xu <like.xu@linux.intel.com>
+Date: Wed, 30 Dec 2020 16:19:16 +0800
+Subject: KVM: x86/pmu: Fix HW_REF_CPU_CYCLES event pseudo-encoding in intel_arch_events[]
+
+From: Like Xu <like.xu@linux.intel.com>
+
+commit 98dd2f108e448988d91e296173e773b06fb978b8 upstream.
+
+The HW_REF_CPU_CYCLES event on the fixed counter 2 is pseudo-encoded as
+0x0300 in the intel_perfmon_event_map[]. Correct its usage.
+
+Fixes: 62079d8a4312 ("KVM: PMU: add proper support for fixed counter 2")
+Signed-off-by: Like Xu <like.xu@linux.intel.com>
+Message-Id: <20201230081916.63417-1-like.xu@linux.intel.com>
+Reviewed-by: Sean Christopherson <seanjc@google.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/vmx/pmu_intel.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/vmx/pmu_intel.c
++++ b/arch/x86/kvm/vmx/pmu_intel.c
+@@ -26,7 +26,7 @@ static struct kvm_event_hw_type_mapping
+ [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
+ [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
+ [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
+- [7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES },
++ [7] = { 0x00, 0x03, PERF_COUNT_HW_REF_CPU_CYCLES },
+ };
+
+ /* mapping between fixed pmc index and intel_arch_events array */
--- /dev/null
+From e61ab2a320c3dfd6209efe18a575979e07470597 Mon Sep 17 00:00:00 2001
+From: Like Xu <like.xu@linux.intel.com>
+Date: Mon, 18 Jan 2021 10:58:00 +0800
+Subject: KVM: x86/pmu: Fix UBSAN shift-out-of-bounds warning in intel_pmu_refresh()
+
+From: Like Xu <like.xu@linux.intel.com>
+
+commit e61ab2a320c3dfd6209efe18a575979e07470597 upstream.
+
+Since we know vPMU will not work properly when (1) the guest bit_width(s)
+of the [gp|fixed] counters are greater than the host ones, or (2) guest
+requested architectural events exceeds the range supported by the host, so
+we can setup a smaller left shift value and refresh the guest cpuid entry,
+thus fixing the following UBSAN shift-out-of-bounds warning:
+
+shift exponent 197 is too large for 64-bit type 'long long unsigned int'
+
+Call Trace:
+ __dump_stack lib/dump_stack.c:79 [inline]
+ dump_stack+0x107/0x163 lib/dump_stack.c:120
+ ubsan_epilogue+0xb/0x5a lib/ubsan.c:148
+ __ubsan_handle_shift_out_of_bounds.cold+0xb1/0x181 lib/ubsan.c:395
+ intel_pmu_refresh.cold+0x75/0x99 arch/x86/kvm/vmx/pmu_intel.c:348
+ kvm_vcpu_after_set_cpuid+0x65a/0xf80 arch/x86/kvm/cpuid.c:177
+ kvm_vcpu_ioctl_set_cpuid2+0x160/0x440 arch/x86/kvm/cpuid.c:308
+ kvm_arch_vcpu_ioctl+0x11b6/0x2d70 arch/x86/kvm/x86.c:4709
+ kvm_vcpu_ioctl+0x7b9/0xdb0 arch/x86/kvm/../../../virt/kvm/kvm_main.c:3386
+ vfs_ioctl fs/ioctl.c:48 [inline]
+ __do_sys_ioctl fs/ioctl.c:753 [inline]
+ __se_sys_ioctl fs/ioctl.c:739 [inline]
+ __x64_sys_ioctl+0x193/0x200 fs/ioctl.c:739
+ do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+Reported-by: syzbot+ae488dc136a4cc6ba32b@syzkaller.appspotmail.com
+Signed-off-by: Like Xu <like.xu@linux.intel.com>
+Message-Id: <20210118025800.34620-1-like.xu@linux.intel.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/vmx/pmu_intel.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/x86/kvm/vmx/pmu_intel.c
++++ b/arch/x86/kvm/vmx/pmu_intel.c
+@@ -296,7 +296,9 @@ static void intel_pmu_refresh(struct kvm
+
+ pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
+ x86_pmu.num_counters_gp);
++ eax.split.bit_width = min_t(int, eax.split.bit_width, x86_pmu.bit_width_gp);
+ pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
++ eax.split.mask_length = min_t(int, eax.split.mask_length, x86_pmu.events_mask_len);
+ pmu->available_event_types = ~entry->ebx &
+ ((1ull << eax.split.mask_length) - 1);
+
+@@ -306,6 +308,8 @@ static void intel_pmu_refresh(struct kvm
+ pmu->nr_arch_fixed_counters =
+ min_t(int, edx.split.num_counters_fixed,
+ x86_pmu.num_counters_fixed);
++ edx.split.bit_width_fixed = min_t(int,
++ edx.split.bit_width_fixed, x86_pmu.bit_width_fixed);
+ pmu->counter_bitmask[KVM_PMC_FIXED] =
+ ((u64)1 << edx.split.bit_width_fixed) - 1;
+ }
net-usb-qmi_wwan-added-support-for-thales-cinterion-plsx3-modem-family.patch
s390-vfio-ap-no-need-to-disable-irq-after-queue-reset.patch
pm-hibernate-flush-swap-writer-after-marking.patch
+drivers-soc-atmel-avoid-calling-at91_soc_init-on-non-at91-socs.patch
+drivers-soc-atmel-add-null-entry-at-the-end-of-at91_soc_allowed_list.patch
+btrfs-fix-possible-free-space-tree-corruption-with-online-conversion.patch
+kvm-x86-pmu-fix-hw_ref_cpu_cycles-event-pseudo-encoding-in-intel_arch_events.patch
+kvm-x86-pmu-fix-ubsan-shift-out-of-bounds-warning-in-intel_pmu_refresh.patch
+kvm-nvmx-sync-unsync-d-vmcs02-state-to-vmcs12-on-migration.patch
+kvm-x86-get-smi-pending-status-correctly.patch