--- /dev/null
+From 4b654acdae850f48b8250b9a578a4eaa518c7a6f Mon Sep 17 00:00:00 2001
+From: Qu Wenruo <wqu@suse.com>
+Date: Thu, 10 Oct 2019 10:39:26 +0800
+Subject: btrfs: block-group: Fix a memory leak due to missing btrfs_put_block_group()
+
+From: Qu Wenruo <wqu@suse.com>
+
+commit 4b654acdae850f48b8250b9a578a4eaa518c7a6f upstream.
+
+In btrfs_read_block_groups(), if we have an invalid block group which
+has mixed type (DATA|METADATA) while the fs doesn't have MIXED_GROUPS
+feature, we error out without freeing the block group cache.
+
+This patch will add the missing btrfs_put_block_group() to prevent
+memory leak.
+
+Note for stable backports: the file to patch in versions <= 5.3 is
+fs/btrfs/extent-tree.c
+
+Fixes: 49303381f19a ("Btrfs: bail out if block group has different mixed flag")
+CC: stable@vger.kernel.org # 4.9+
+Reviewed-by: Anand Jain <anand.jain@oracle.com>
+Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
+Signed-off-by: Qu Wenruo <wqu@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/extent-tree.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -10255,6 +10255,7 @@ int btrfs_read_block_groups(struct btrfs
+ btrfs_err(info,
+ "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
+ cache->key.objectid);
++ btrfs_put_block_group(cache);
+ ret = -EINVAL;
+ goto error;
+ }
--- /dev/null
+From 03d9a9fe3f3aec508e485dd3dcfa1e99933b4bdb Mon Sep 17 00:00:00 2001
+From: Roberto Bergantinos Corpas <rbergant@redhat.com>
+Date: Mon, 14 Oct 2019 10:59:23 +0200
+Subject: CIFS: avoid using MID 0xFFFF
+
+From: Roberto Bergantinos Corpas <rbergant@redhat.com>
+
+commit 03d9a9fe3f3aec508e485dd3dcfa1e99933b4bdb upstream.
+
+According to MS-CIFS specification MID 0xFFFF should not be used by the
+CIFS client, but we actually do. Besides, this has proven to cause races
+leading to oops between SendReceive2/cifs_demultiplex_thread. On SMB1,
+MID is a 2 byte value easy to reach in CurrentMid which may conflict with
+an oplock break notification request coming from server
+
+Signed-off-by: Roberto Bergantinos Corpas <rbergant@redhat.com>
+Reviewed-by: Ronnie Sahlberg <lsahlber@redhat.com>
+Reviewed-by: Aurelien Aptel <aaptel@suse.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+CC: Stable <stable@vger.kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/smb1ops.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/fs/cifs/smb1ops.c
++++ b/fs/cifs/smb1ops.c
+@@ -181,6 +181,9 @@ cifs_get_next_mid(struct TCP_Server_Info
+ /* we do not want to loop forever */
+ last_mid = cur_mid;
+ cur_mid++;
++ /* avoid 0xFFFF MID */
++ if (cur_mid == 0xffff)
++ cur_mid++;
+
+ /*
+ * This nested loop looks more expensive than it is.
--- /dev/null
+From 65650b35133ff20f0c9ef0abd5c3c66dbce3ae57 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Wed, 9 Oct 2019 01:29:10 +0200
+Subject: cpufreq: Avoid cpufreq_suspend() deadlock on system shutdown
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+commit 65650b35133ff20f0c9ef0abd5c3c66dbce3ae57 upstream.
+
+It is incorrect to set the cpufreq syscore shutdown callback pointer
+to cpufreq_suspend(), because that function cannot be run in the
+syscore stage of system shutdown for two reasons: (a) it may attempt
+to carry out actions depending on devices that have already been shut
+down at that point and (b) the RCU synchronization carried out by it
+may not be able to make progress then.
+
+The latter issue has been present since commit 45975c7d21a1 ("rcu:
+Define RCU-sched API in terms of RCU for Tree RCU PREEMPT builds"),
+but the former one has been there since commit 90de2a4aa9f3 ("cpufreq:
+suspend cpufreq governors on shutdown") regardless.
+
+Fix that by dropping cpufreq_syscore_ops altogether and making
+device_shutdown() call cpufreq_suspend() directly before shutting
+down devices, which is along the lines of what system-wide power
+management does.
+
+Fixes: 45975c7d21a1 ("rcu: Define RCU-sched API in terms of RCU for Tree RCU PREEMPT builds")
+Fixes: 90de2a4aa9f3 ("cpufreq: suspend cpufreq governors on shutdown")
+Reported-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Tested-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
+Cc: 4.0+ <stable@vger.kernel.org> # 4.0+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/base/core.c | 3 +++
+ drivers/cpufreq/cpufreq.c | 10 ----------
+ 2 files changed, 3 insertions(+), 10 deletions(-)
+
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -10,6 +10,7 @@
+ *
+ */
+
++#include <linux/cpufreq.h>
+ #include <linux/device.h>
+ #include <linux/err.h>
+ #include <linux/fwnode.h>
+@@ -2845,6 +2846,8 @@ void device_shutdown(void)
+ wait_for_device_probe();
+ device_block_probing();
+
++ cpufreq_suspend();
++
+ spin_lock(&devices_kset->list_lock);
+ /*
+ * Walk the devices list backward, shutting down each in turn.
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -2570,14 +2570,6 @@ int cpufreq_unregister_driver(struct cpu
+ }
+ EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
+
+-/*
+- * Stop cpufreq at shutdown to make sure it isn't holding any locks
+- * or mutexes when secondary CPUs are halted.
+- */
+-static struct syscore_ops cpufreq_syscore_ops = {
+- .shutdown = cpufreq_suspend,
+-};
+-
+ struct kobject *cpufreq_global_kobject;
+ EXPORT_SYMBOL(cpufreq_global_kobject);
+
+@@ -2589,8 +2581,6 @@ static int __init cpufreq_core_init(void
+ cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
+ BUG_ON(!cpufreq_global_kobject);
+
+- register_syscore_ops(&cpufreq_syscore_ops);
+-
+ return 0;
+ }
+ module_param(off, int, 0444);
--- /dev/null
+From a468f2dbf921d02f5107378501693137a812999b Mon Sep 17 00:00:00 2001
+From: Junaid Shahid <junaids@google.com>
+Date: Thu, 26 Apr 2018 13:09:50 -0700
+Subject: kvm: apic: Flush TLB after APIC mode/address change if VPIDs are in use
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Junaid Shahid <junaids@google.com>
+
+commit a468f2dbf921d02f5107378501693137a812999b upstream.
+
+Currently, KVM flushes the TLB after a change to the APIC access page
+address or the APIC mode when EPT mode is enabled. However, even in
+shadow paging mode, a TLB flush is needed if VPIDs are being used, as
+specified in the Intel SDM Section 29.4.5.
+
+So replace vmx_flush_tlb_ept_only() with vmx_flush_tlb(), which will
+flush if either EPT or VPIDs are in use.
+
+Signed-off-by: Junaid Shahid <junaids@google.com>
+Reviewed-by: Jim Mattson <jmattson@google.com>
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+Cc: "Jitindar SIngh, Suraj" <surajjs@amazon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/vmx.c | 14 ++++----------
+ 1 file changed, 4 insertions(+), 10 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -4444,12 +4444,6 @@ static void vmx_flush_tlb(struct kvm_vcp
+ __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa);
+ }
+
+-static void vmx_flush_tlb_ept_only(struct kvm_vcpu *vcpu)
+-{
+- if (enable_ept)
+- vmx_flush_tlb(vcpu, true);
+-}
+-
+ static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
+ {
+ ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
+@@ -9320,7 +9314,7 @@ static void vmx_set_virtual_x2apic_mode(
+ } else {
+ sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
+ sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+- vmx_flush_tlb_ept_only(vcpu);
++ vmx_flush_tlb(vcpu, true);
+ }
+ vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
+
+@@ -9348,7 +9342,7 @@ static void vmx_set_apic_access_page_add
+ !nested_cpu_has2(get_vmcs12(&vmx->vcpu),
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
+ vmcs_write64(APIC_ACCESS_ADDR, hpa);
+- vmx_flush_tlb_ept_only(vcpu);
++ vmx_flush_tlb(vcpu, true);
+ }
+ }
+
+@@ -11243,7 +11237,7 @@ static int prepare_vmcs02(struct kvm_vcp
+ }
+ } else if (nested_cpu_has2(vmcs12,
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
+- vmx_flush_tlb_ept_only(vcpu);
++ vmx_flush_tlb(vcpu, true);
+ }
+
+ /*
+@@ -12198,7 +12192,7 @@ static void nested_vmx_vmexit(struct kvm
+ } else if (!nested_cpu_has_ept(vmcs12) &&
+ nested_cpu_has2(vmcs12,
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
+- vmx_flush_tlb_ept_only(vcpu);
++ vmx_flush_tlb(vcpu, true);
+ }
+
+ /* This is needed for same reason as it was needed in prepare_vmcs02 */
--- /dev/null
+From 8d860bbeedef97fe981d28fa7b71d77f3b29563f Mon Sep 17 00:00:00 2001
+From: Jim Mattson <jmattson@google.com>
+Date: Wed, 9 May 2018 16:56:05 -0400
+Subject: kvm: vmx: Basic APIC virtualization controls have three settings
+
+From: Jim Mattson <jmattson@google.com>
+
+commit 8d860bbeedef97fe981d28fa7b71d77f3b29563f upstream.
+
+Previously, we toggled between SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE
+and SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES, depending on whether or
+not the EXTD bit was set in MSR_IA32_APICBASE. However, if the local
+APIC is disabled, we should not set either of these APIC
+virtualization control bits.
+
+Signed-off-by: Jim Mattson <jmattson@google.com>
+Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Jitindar SIngh, Suraj" <surajjs@amazon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/kvm_host.h | 2 -
+ arch/x86/kvm/lapic.c | 12 ++++------
+ arch/x86/kvm/svm.c | 4 +--
+ arch/x86/kvm/vmx.c | 48 +++++++++++++++++++++++++---------------
+ 4 files changed, 38 insertions(+), 28 deletions(-)
+
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -998,7 +998,7 @@ struct kvm_x86_ops {
+ void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
+ void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr);
+ void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
+- void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set);
++ void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
+ void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
+ void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
+ int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -1967,13 +1967,11 @@ void kvm_lapic_set_base(struct kvm_vcpu
+ }
+ }
+
+- if ((old_value ^ value) & X2APIC_ENABLE) {
+- if (value & X2APIC_ENABLE) {
+- kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);
+- kvm_x86_ops->set_virtual_x2apic_mode(vcpu, true);
+- } else
+- kvm_x86_ops->set_virtual_x2apic_mode(vcpu, false);
+- }
++ if (((old_value ^ value) & X2APIC_ENABLE) && (value & X2APIC_ENABLE))
++ kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);
++
++ if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE))
++ kvm_x86_ops->set_virtual_apic_mode(vcpu);
+
+ apic->base_address = apic->vcpu->arch.apic_base &
+ MSR_IA32_APICBASE_BASE;
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -4589,7 +4589,7 @@ static void update_cr8_intercept(struct
+ set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
+ }
+
+-static void svm_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
++static void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
+ {
+ return;
+ }
+@@ -5713,7 +5713,7 @@ static struct kvm_x86_ops svm_x86_ops __
+ .enable_nmi_window = enable_nmi_window,
+ .enable_irq_window = enable_irq_window,
+ .update_cr8_intercept = update_cr8_intercept,
+- .set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode,
++ .set_virtual_apic_mode = svm_set_virtual_apic_mode,
+ .get_enable_apicv = svm_get_enable_apicv,
+ .refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl,
+ .load_eoi_exitmap = svm_load_eoi_exitmap,
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -591,7 +591,8 @@ struct nested_vmx {
+ */
+ bool sync_shadow_vmcs;
+
+- bool change_vmcs01_virtual_x2apic_mode;
++ bool change_vmcs01_virtual_apic_mode;
++
+ /* L2 must run next, and mustn't decide to exit to L1. */
+ bool nested_run_pending;
+
+@@ -9290,31 +9291,43 @@ static void update_cr8_intercept(struct
+ vmcs_write32(TPR_THRESHOLD, irr);
+ }
+
+-static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
++static void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
+ {
+ u32 sec_exec_control;
+
++ if (!lapic_in_kernel(vcpu))
++ return;
++
+ /* Postpone execution until vmcs01 is the current VMCS. */
+ if (is_guest_mode(vcpu)) {
+- to_vmx(vcpu)->nested.change_vmcs01_virtual_x2apic_mode = true;
++ to_vmx(vcpu)->nested.change_vmcs01_virtual_apic_mode = true;
+ return;
+ }
+
+- if (!cpu_has_vmx_virtualize_x2apic_mode())
+- return;
+-
+ if (!cpu_need_tpr_shadow(vcpu))
+ return;
+
+ sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
++ sec_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
++ SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
+
+- if (set) {
+- sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+- sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
+- } else {
+- sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
+- sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+- vmx_flush_tlb(vcpu, true);
++ switch (kvm_get_apic_mode(vcpu)) {
++ case LAPIC_MODE_INVALID:
++ WARN_ONCE(true, "Invalid local APIC state");
++ case LAPIC_MODE_DISABLED:
++ break;
++ case LAPIC_MODE_XAPIC:
++ if (flexpriority_enabled) {
++ sec_exec_control |=
++ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
++ vmx_flush_tlb(vcpu, true);
++ }
++ break;
++ case LAPIC_MODE_X2APIC:
++ if (cpu_has_vmx_virtualize_x2apic_mode())
++ sec_exec_control |=
++ SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
++ break;
+ }
+ vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
+
+@@ -12185,10 +12198,9 @@ static void nested_vmx_vmexit(struct kvm
+ if (kvm_has_tsc_control)
+ decache_tsc_multiplier(vmx);
+
+- if (vmx->nested.change_vmcs01_virtual_x2apic_mode) {
+- vmx->nested.change_vmcs01_virtual_x2apic_mode = false;
+- vmx_set_virtual_x2apic_mode(vcpu,
+- vcpu->arch.apic_base & X2APIC_ENABLE);
++ if (vmx->nested.change_vmcs01_virtual_apic_mode) {
++ vmx->nested.change_vmcs01_virtual_apic_mode = false;
++ vmx_set_virtual_apic_mode(vcpu);
+ } else if (!nested_cpu_has_ept(vmcs12) &&
+ nested_cpu_has2(vmcs12,
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
+@@ -12749,7 +12761,7 @@ static struct kvm_x86_ops vmx_x86_ops __
+ .enable_nmi_window = enable_nmi_window,
+ .enable_irq_window = enable_irq_window,
+ .update_cr8_intercept = update_cr8_intercept,
+- .set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode,
++ .set_virtual_apic_mode = vmx_set_virtual_apic_mode,
+ .set_apic_access_page_addr = vmx_set_apic_access_page_addr,
+ .get_enable_apicv = vmx_get_enable_apicv,
+ .refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl,
--- /dev/null
+From 588716494258899389206fa50426e78cc9df89b9 Mon Sep 17 00:00:00 2001
+From: Jim Mattson <jmattson@google.com>
+Date: Wed, 9 May 2018 16:56:04 -0400
+Subject: kvm: vmx: Introduce lapic_mode enumeration
+
+From: Jim Mattson <jmattson@google.com>
+
+commit 588716494258899389206fa50426e78cc9df89b9 upstream.
+
+The local APIC can be in one of three modes: disabled, xAPIC or
+x2APIC. (A fourth mode, "invalid," is included for completeness.)
+
+Using the new enumeration can make some of the APIC mode logic easier
+to read. In kvm_set_apic_base, for instance, it is clear that one
+cannot transition directly from x2APIC mode to xAPIC mode or directly
+from APIC disabled to x2APIC mode.
+
+Signed-off-by: Jim Mattson <jmattson@google.com>
+Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
+[Check invalid bits even if msr_info->host_initiated. Reported by
+ Wanpeng Li. - Paolo]
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Jitindar SIngh, Suraj" <surajjs@amazon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/lapic.h | 14 ++++++++++++++
+ arch/x86/kvm/x86.c | 26 +++++++++++++++-----------
+ 2 files changed, 29 insertions(+), 11 deletions(-)
+
+--- a/arch/x86/kvm/lapic.h
++++ b/arch/x86/kvm/lapic.h
+@@ -16,6 +16,13 @@
+ #define APIC_BUS_CYCLE_NS 1
+ #define APIC_BUS_FREQUENCY (1000000000ULL / APIC_BUS_CYCLE_NS)
+
++enum lapic_mode {
++ LAPIC_MODE_DISABLED = 0,
++ LAPIC_MODE_INVALID = X2APIC_ENABLE,
++ LAPIC_MODE_XAPIC = MSR_IA32_APICBASE_ENABLE,
++ LAPIC_MODE_X2APIC = MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE,
++};
++
+ struct kvm_timer {
+ struct hrtimer timer;
+ s64 period; /* unit: ns */
+@@ -89,6 +96,7 @@ u64 kvm_get_apic_base(struct kvm_vcpu *v
+ int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
+ int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s);
+ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s);
++enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu);
+ int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu);
+
+ u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu);
+@@ -220,4 +228,10 @@ void kvm_lapic_switch_to_hv_timer(struct
+ void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu);
+ bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu);
+ void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu);
++
++static inline enum lapic_mode kvm_apic_mode(u64 apic_base)
++{
++ return apic_base & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
++}
++
+ #endif
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -306,23 +306,27 @@ u64 kvm_get_apic_base(struct kvm_vcpu *v
+ }
+ EXPORT_SYMBOL_GPL(kvm_get_apic_base);
+
++enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu)
++{
++ return kvm_apic_mode(kvm_get_apic_base(vcpu));
++}
++EXPORT_SYMBOL_GPL(kvm_get_apic_mode);
++
+ int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ {
+- u64 old_state = vcpu->arch.apic_base &
+- (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
+- u64 new_state = msr_info->data &
+- (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
++ enum lapic_mode old_mode = kvm_get_apic_mode(vcpu);
++ enum lapic_mode new_mode = kvm_apic_mode(msr_info->data);
+ u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) | 0x2ff |
+ (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) ? 0 : X2APIC_ENABLE);
+
+- if ((msr_info->data & reserved_bits) || new_state == X2APIC_ENABLE)
+- return 1;
+- if (!msr_info->host_initiated &&
+- ((new_state == MSR_IA32_APICBASE_ENABLE &&
+- old_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) ||
+- (new_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE) &&
+- old_state == 0)))
++ if ((msr_info->data & reserved_bits) != 0 || new_mode == LAPIC_MODE_INVALID)
+ return 1;
++ if (!msr_info->host_initiated) {
++ if (old_mode == LAPIC_MODE_X2APIC && new_mode == LAPIC_MODE_XAPIC)
++ return 1;
++ if (old_mode == LAPIC_MODE_DISABLED && new_mode == LAPIC_MODE_X2APIC)
++ return 1;
++ }
+
+ kvm_lapic_set_base(vcpu, msr_info->data);
+ return 0;
--- /dev/null
+From c2ba05ccfde2f069a66c0462e5b5ef8a517dcc9c Mon Sep 17 00:00:00 2001
+From: Wanpeng Li <wanpeng.li@hotmail.com>
+Date: Tue, 12 Dec 2017 17:33:03 -0800
+Subject: KVM: X86: introduce invalidate_gpa argument to tlb flush
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Wanpeng Li <wanpeng.li@hotmail.com>
+
+commit c2ba05ccfde2f069a66c0462e5b5ef8a517dcc9c upstream.
+
+Introduce a new bool invalidate_gpa argument to kvm_x86_ops->tlb_flush,
+it will be used by later patches to just flush guest tlb.
+
+For VMX, this will use INVVPID instead of INVEPT, which will invalidate
+combined mappings while keeping guest-physical mappings.
+
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Radim Krčmář <rkrcmar@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: "Jitindar SIngh, Suraj" <surajjs@amazon.com>
+Signed-off-by: Wanpeng Li <wanpeng.li@hotmail.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/kvm_host.h | 2 +-
+ arch/x86/kvm/svm.c | 14 +++++++-------
+ arch/x86/kvm/vmx.c | 21 +++++++++++----------
+ arch/x86/kvm/x86.c | 6 +++---
+ 4 files changed, 22 insertions(+), 21 deletions(-)
+
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -973,7 +973,7 @@ struct kvm_x86_ops {
+ unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
+ void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
+
+- void (*tlb_flush)(struct kvm_vcpu *vcpu);
++ void (*tlb_flush)(struct kvm_vcpu *vcpu, bool invalidate_gpa);
+
+ void (*run)(struct kvm_vcpu *vcpu);
+ int (*handle_exit)(struct kvm_vcpu *vcpu);
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -299,7 +299,7 @@ static int vgif = true;
+ module_param(vgif, int, 0444);
+
+ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
+-static void svm_flush_tlb(struct kvm_vcpu *vcpu);
++static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa);
+ static void svm_complete_interrupts(struct vcpu_svm *svm);
+
+ static int nested_svm_exit_handled(struct vcpu_svm *svm);
+@@ -2097,7 +2097,7 @@ static int svm_set_cr4(struct kvm_vcpu *
+ return 1;
+
+ if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
+- svm_flush_tlb(vcpu);
++ svm_flush_tlb(vcpu, true);
+
+ vcpu->arch.cr4 = cr4;
+ if (!npt_enabled)
+@@ -2438,7 +2438,7 @@ static void nested_svm_set_tdp_cr3(struc
+
+ svm->vmcb->control.nested_cr3 = __sme_set(root);
+ mark_dirty(svm->vmcb, VMCB_NPT);
+- svm_flush_tlb(vcpu);
++ svm_flush_tlb(vcpu, true);
+ }
+
+ static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
+@@ -3111,7 +3111,7 @@ static bool nested_svm_vmrun(struct vcpu
+ svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
+ svm->nested.intercept = nested_vmcb->control.intercept;
+
+- svm_flush_tlb(&svm->vcpu);
++ svm_flush_tlb(&svm->vcpu, true);
+ svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
+ if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
+ svm->vcpu.arch.hflags |= HF_VINTR_MASK;
+@@ -4947,7 +4947,7 @@ static int svm_set_tss_addr(struct kvm *
+ return 0;
+ }
+
+-static void svm_flush_tlb(struct kvm_vcpu *vcpu)
++static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
+ {
+ struct vcpu_svm *svm = to_svm(vcpu);
+
+@@ -5288,7 +5288,7 @@ static void svm_set_cr3(struct kvm_vcpu
+
+ svm->vmcb->save.cr3 = __sme_set(root);
+ mark_dirty(svm->vmcb, VMCB_CR);
+- svm_flush_tlb(vcpu);
++ svm_flush_tlb(vcpu, true);
+ }
+
+ static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
+@@ -5302,7 +5302,7 @@ static void set_tdp_cr3(struct kvm_vcpu
+ svm->vmcb->save.cr3 = kvm_read_cr3(vcpu);
+ mark_dirty(svm->vmcb, VMCB_CR);
+
+- svm_flush_tlb(vcpu);
++ svm_flush_tlb(vcpu, true);
+ }
+
+ static int is_disabled(void)
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -4427,9 +4427,10 @@ static void exit_lmode(struct kvm_vcpu *
+
+ #endif
+
+-static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid)
++static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid,
++ bool invalidate_gpa)
+ {
+- if (enable_ept) {
++ if (enable_ept && (invalidate_gpa || !enable_vpid)) {
+ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ return;
+ ept_sync_context(construct_eptp(vcpu, vcpu->arch.mmu.root_hpa));
+@@ -4438,15 +4439,15 @@ static inline void __vmx_flush_tlb(struc
+ }
+ }
+
+-static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
++static void vmx_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
+ {
+- __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid);
++ __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa);
+ }
+
+ static void vmx_flush_tlb_ept_only(struct kvm_vcpu *vcpu)
+ {
+ if (enable_ept)
+- vmx_flush_tlb(vcpu);
++ vmx_flush_tlb(vcpu, true);
+ }
+
+ static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
+@@ -4644,7 +4645,7 @@ static void vmx_set_cr3(struct kvm_vcpu
+ ept_load_pdptrs(vcpu);
+ }
+
+- vmx_flush_tlb(vcpu);
++ vmx_flush_tlb(vcpu, true);
+ vmcs_writel(GUEST_CR3, guest_cr3);
+ }
+
+@@ -8314,7 +8315,7 @@ static int handle_invvpid(struct kvm_vcp
+ return kvm_skip_emulated_instruction(vcpu);
+ }
+
+- __vmx_flush_tlb(vcpu, vmx->nested.vpid02);
++ __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true);
+ nested_vmx_succeed(vcpu);
+
+ return kvm_skip_emulated_instruction(vcpu);
+@@ -11214,11 +11215,11 @@ static int prepare_vmcs02(struct kvm_vcp
+ vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
+ if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
+ vmx->nested.last_vpid = vmcs12->virtual_processor_id;
+- __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02);
++ __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02, true);
+ }
+ } else {
+ vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
+- vmx_flush_tlb(vcpu);
++ vmx_flush_tlb(vcpu, true);
+ }
+
+ }
+@@ -11921,7 +11922,7 @@ static void load_vmcs12_host_state(struc
+ * L1's vpid. TODO: move to a more elaborate solution, giving
+ * each L2 its own vpid and exposing the vpid feature to L1.
+ */
+- vmx_flush_tlb(vcpu);
++ vmx_flush_tlb(vcpu, true);
+ }
+ /* Restore posted intr vector. */
+ if (nested_cpu_has_posted_intr(vmcs12))
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -6943,10 +6943,10 @@ static void vcpu_scan_ioapic(struct kvm_
+ kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap);
+ }
+
+-static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
++static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
+ {
+ ++vcpu->stat.tlb_flush;
+- kvm_x86_ops->tlb_flush(vcpu);
++ kvm_x86_ops->tlb_flush(vcpu, invalidate_gpa);
+ }
+
+ void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+@@ -7017,7 +7017,7 @@ static int vcpu_enter_guest(struct kvm_v
+ if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
+ kvm_mmu_sync_roots(vcpu);
+ if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
+- kvm_vcpu_flush_tlb(vcpu);
++ kvm_vcpu_flush_tlb(vcpu, true);
+ if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
+ vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
+ r = 0;
--- /dev/null
+From 28c9fac09ab0147158db0baeec630407a5e9b892 Mon Sep 17 00:00:00 2001
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Date: Sat, 5 Oct 2019 13:21:01 +0200
+Subject: memstick: jmb38x_ms: Fix an error handling path in 'jmb38x_ms_probe()'
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+commit 28c9fac09ab0147158db0baeec630407a5e9b892 upstream.
+
+If 'jmb38x_ms_count_slots()' returns 0, we must undo the previous
+'pci_request_regions()' call.
+
+Goto 'err_out_int' to fix it.
+
+Fixes: 60fdd931d577 ("memstick: add support for JMicron jmb38x MemoryStick host controller")
+Cc: stable@vger.kernel.org
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/memstick/host/jmb38x_ms.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/memstick/host/jmb38x_ms.c
++++ b/drivers/memstick/host/jmb38x_ms.c
+@@ -947,7 +947,7 @@ static int jmb38x_ms_probe(struct pci_de
+ if (!cnt) {
+ rc = -ENODEV;
+ pci_dev_busy = 1;
+- goto err_out;
++ goto err_out_int;
+ }
+
+ jm = kzalloc(sizeof(struct jmb38x_ms)
--- /dev/null
+From 45144d42f299455911cc29366656c7324a3a7c97 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Mon, 14 Oct 2019 13:25:00 +0200
+Subject: PCI: PM: Fix pci_power_up()
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+commit 45144d42f299455911cc29366656c7324a3a7c97 upstream.
+
+There is an arbitrary difference between the system resume and
+runtime resume code paths for PCI devices regarding the delay to
+apply when switching the devices from D3cold to D0.
+
+Namely, pci_restore_standard_config() used in the runtime resume
+code path calls pci_set_power_state() which in turn invokes
+__pci_start_power_transition() to power up the device through the
+platform firmware and that function applies the transition delay
+(as per PCI Express Base Specification Revision 2.0, Section 6.6.1).
+However, pci_pm_default_resume_early() used in the system resume
+code path calls pci_power_up() which doesn't apply the delay at
+all and that causes issues to occur during resume from
+suspend-to-idle on some systems where the delay is required.
+
+Since there is no reason for that difference to exist, modify
+pci_power_up() to follow pci_set_power_state() more closely and
+invoke __pci_start_power_transition() from there to call the
+platform firmware to power up the device (in case that's necessary).
+
+Fixes: db288c9c5f9d ("PCI / PM: restore the original behavior of pci_set_power_state()")
+Reported-by: Daniel Drake <drake@endlessm.com>
+Tested-by: Daniel Drake <drake@endlessm.com>
+Link: https://lore.kernel.org/linux-pm/CAD8Lp44TYxrMgPLkHCqF9hv6smEurMXvmmvmtyFhZ6Q4SE+dig@mail.gmail.com/T/#m21be74af263c6a34f36e0fc5c77c5449d9406925
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Acked-by: Bjorn Helgaas <bhelgaas@google.com>
+Cc: 3.10+ <stable@vger.kernel.org> # 3.10+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pci/pci.c | 24 +++++++++++-------------
+ 1 file changed, 11 insertions(+), 13 deletions(-)
+
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -749,19 +749,6 @@ void pci_update_current_state(struct pci
+ }
+
+ /**
+- * pci_power_up - Put the given device into D0 forcibly
+- * @dev: PCI device to power up
+- */
+-void pci_power_up(struct pci_dev *dev)
+-{
+- if (platform_pci_power_manageable(dev))
+- platform_pci_set_power_state(dev, PCI_D0);
+-
+- pci_raw_set_power_state(dev, PCI_D0);
+- pci_update_current_state(dev, PCI_D0);
+-}
+-
+-/**
+ * pci_platform_power_transition - Use platform to change device power state
+ * @dev: PCI device to handle.
+ * @state: State to put the device into.
+@@ -940,6 +927,17 @@ int pci_set_power_state(struct pci_dev *
+ EXPORT_SYMBOL(pci_set_power_state);
+
+ /**
++ * pci_power_up - Put the given device into D0 forcibly
++ * @dev: PCI device to power up
++ */
++void pci_power_up(struct pci_dev *dev)
++{
++ __pci_start_power_transition(dev, PCI_D0);
++ pci_raw_set_power_state(dev, PCI_D0);
++ pci_update_current_state(dev, PCI_D0);
++}
++
++/**
+ * pci_choose_state - Choose the power state of a PCI device
+ * @dev: PCI device to be suspended
+ * @state: target sleep state for the whole system. This is the value
--- /dev/null
+From 20504fa1d2ffd5d03cdd9dc9c9dd4ed4579b97ef Mon Sep 17 00:00:00 2001
+From: Patrick Williams <alpawi@amazon.com>
+Date: Tue, 1 Oct 2019 10:46:31 -0500
+Subject: pinctrl: armada-37xx: fix control of pins 32 and up
+
+From: Patrick Williams <alpawi@amazon.com>
+
+commit 20504fa1d2ffd5d03cdd9dc9c9dd4ed4579b97ef upstream.
+
+The 37xx configuration registers are only 32 bits long, so
+pins 32-35 spill over into the next register. The calculation
+for the register address was done, but the bitmask was not, so
+any configuration to pin 32 or above resulted in a bitmask that
+overflowed and performed no action.
+
+Fix the register / offset calculation to also adjust the offset.
+
+Fixes: 5715092a458c ("pinctrl: armada-37xx: Add gpio support")
+Signed-off-by: Patrick Williams <alpawi@amazon.com>
+Acked-by: Gregory CLEMENT <gregory.clement@bootlin.com>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20191001154634.96165-1-alpawi@amazon.com
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pinctrl/mvebu/pinctrl-armada-37xx.c | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
++++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+@@ -205,11 +205,11 @@ static const struct armada_37xx_pin_data
+ };
+
+ static inline void armada_37xx_update_reg(unsigned int *reg,
+- unsigned int offset)
++ unsigned int *offset)
+ {
+ /* We never have more than 2 registers */
+- if (offset >= GPIO_PER_REG) {
+- offset -= GPIO_PER_REG;
++ if (*offset >= GPIO_PER_REG) {
++ *offset -= GPIO_PER_REG;
+ *reg += sizeof(u32);
+ }
+ }
+@@ -373,7 +373,7 @@ static inline void armada_37xx_irq_updat
+ {
+ int offset = irqd_to_hwirq(d);
+
+- armada_37xx_update_reg(reg, offset);
++ armada_37xx_update_reg(reg, &offset);
+ }
+
+ static int armada_37xx_gpio_direction_input(struct gpio_chip *chip,
+@@ -383,7 +383,7 @@ static int armada_37xx_gpio_direction_in
+ unsigned int reg = OUTPUT_EN;
+ unsigned int mask;
+
+- armada_37xx_update_reg(®, offset);
++ armada_37xx_update_reg(®, &offset);
+ mask = BIT(offset);
+
+ return regmap_update_bits(info->regmap, reg, mask, 0);
+@@ -396,7 +396,7 @@ static int armada_37xx_gpio_get_directio
+ unsigned int reg = OUTPUT_EN;
+ unsigned int val, mask;
+
+- armada_37xx_update_reg(®, offset);
++ armada_37xx_update_reg(®, &offset);
+ mask = BIT(offset);
+ regmap_read(info->regmap, reg, &val);
+
+@@ -410,7 +410,7 @@ static int armada_37xx_gpio_direction_ou
+ unsigned int reg = OUTPUT_EN;
+ unsigned int mask, val, ret;
+
+- armada_37xx_update_reg(®, offset);
++ armada_37xx_update_reg(®, &offset);
+ mask = BIT(offset);
+
+ ret = regmap_update_bits(info->regmap, reg, mask, mask);
+@@ -431,7 +431,7 @@ static int armada_37xx_gpio_get(struct g
+ unsigned int reg = INPUT_VAL;
+ unsigned int val, mask;
+
+- armada_37xx_update_reg(®, offset);
++ armada_37xx_update_reg(®, &offset);
+ mask = BIT(offset);
+
+ regmap_read(info->regmap, reg, &val);
+@@ -446,7 +446,7 @@ static void armada_37xx_gpio_set(struct
+ unsigned int reg = OUTPUT_VAL;
+ unsigned int mask, val;
+
+- armada_37xx_update_reg(®, offset);
++ armada_37xx_update_reg(®, &offset);
+ mask = BIT(offset);
+ val = value ? mask : 0;
+
--- /dev/null
+From b835d6953009dc350d61402a854b5a7178d8c615 Mon Sep 17 00:00:00 2001
+From: Patrick Williams <alpawi@amazon.com>
+Date: Tue, 1 Oct 2019 10:51:38 -0500
+Subject: pinctrl: armada-37xx: swap polarity on LED group
+
+From: Patrick Williams <alpawi@amazon.com>
+
+commit b835d6953009dc350d61402a854b5a7178d8c615 upstream.
+
+The configuration registers for the LED group have inverted
+polarity, which puts the GPIO into open-drain state when used in
+GPIO mode. Switch to '0' for GPIO and '1' for LED modes.
+
+Fixes: 87466ccd9401 ("pinctrl: armada-37xx: Add pin controller support for Armada 37xx")
+Signed-off-by: Patrick Williams <alpawi@amazon.com>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20191001155154.99710-1-alpawi@amazon.com
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pinctrl/mvebu/pinctrl-armada-37xx.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
++++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+@@ -170,10 +170,10 @@ static struct armada_37xx_pin_group arma
+ PIN_GRP_EXTRA("uart2", 9, 2, BIT(1) | BIT(13) | BIT(14) | BIT(19),
+ BIT(1) | BIT(13) | BIT(14), BIT(1) | BIT(19),
+ 18, 2, "gpio", "uart"),
+- PIN_GRP_GPIO("led0_od", 11, 1, BIT(20), "led"),
+- PIN_GRP_GPIO("led1_od", 12, 1, BIT(21), "led"),
+- PIN_GRP_GPIO("led2_od", 13, 1, BIT(22), "led"),
+- PIN_GRP_GPIO("led3_od", 14, 1, BIT(23), "led"),
++ PIN_GRP_GPIO_2("led0_od", 11, 1, BIT(20), BIT(20), 0, "led"),
++ PIN_GRP_GPIO_2("led1_od", 12, 1, BIT(21), BIT(21), 0, "led"),
++ PIN_GRP_GPIO_2("led2_od", 13, 1, BIT(22), BIT(22), 0, "led"),
++ PIN_GRP_GPIO_2("led3_od", 14, 1, BIT(23), BIT(23), 0, "led"),
+
+ };
+
hugetlbfs-don-t-access-uninitialized-memmaps-in-pfn_range_valid_gigantic.patch
xtensa-drop-export_symbol-for-outs-ins.patch
parisc-fix-vmap-memory-leak-in-ioremap-iounmap.patch
+cifs-avoid-using-mid-0xffff.patch
+x86-boot-64-make-level2_kernel_pgt-pages-invalid-outside-kernel-area.patch
+pinctrl-armada-37xx-fix-control-of-pins-32-and-up.patch
+pinctrl-armada-37xx-swap-polarity-on-led-group.patch
+btrfs-block-group-fix-a-memory-leak-due-to-missing-btrfs_put_block_group.patch
+memstick-jmb38x_ms-fix-an-error-handling-path-in-jmb38x_ms_probe.patch
+cpufreq-avoid-cpufreq_suspend-deadlock-on-system-shutdown.patch
+xen-netback-fix-error-path-of-xenvif_connect_data.patch
+pci-pm-fix-pci_power_up.patch
+kvm-x86-introduce-invalidate_gpa-argument-to-tlb-flush.patch
+kvm-vmx-introduce-lapic_mode-enumeration.patch
+kvm-apic-flush-tlb-after-apic-mode-address-change-if-vpids-are-in-use.patch
+kvm-vmx-basic-apic-virtualization-controls-have-three-settings.patch
--- /dev/null
+From 2aa85f246c181b1fa89f27e8e20c5636426be624 Mon Sep 17 00:00:00 2001
+From: Steve Wahl <steve.wahl@hpe.com>
+Date: Tue, 24 Sep 2019 16:03:55 -0500
+Subject: x86/boot/64: Make level2_kernel_pgt pages invalid outside kernel area
+
+From: Steve Wahl <steve.wahl@hpe.com>
+
+commit 2aa85f246c181b1fa89f27e8e20c5636426be624 upstream.
+
+Our hardware (UV aka Superdome Flex) has address ranges marked
+reserved by the BIOS. Access to these ranges is caught as an error,
+causing the BIOS to halt the system.
+
+Initial page tables mapped a large range of physical addresses that
+were not checked against the list of BIOS reserved addresses, and
+sometimes included reserved addresses in part of the mapped range.
+Including the reserved range in the map allowed processor speculative
+accesses to the reserved range, triggering a BIOS halt.
+
+Used early in booting, the page table level2_kernel_pgt addresses 1
+GiB divided into 2 MiB pages, and it was set up to linearly map a full
+ 1 GiB of physical addresses that included the physical address range
+of the kernel image, as chosen by KASLR. But this also included a
+large range of unused addresses on either side of the kernel image.
+And unlike the kernel image's physical address range, this extra
+mapped space was not checked against the BIOS tables of usable RAM
+addresses. So there were times when the addresses chosen by KASLR
+would result in processor accessible mappings of BIOS reserved
+physical addresses.
+
+The kernel code did not directly access any of this extra mapped
+space, but having it mapped allowed the processor to issue speculative
+accesses into reserved memory, causing system halts.
+
+This was encountered somewhat rarely on a normal system boot, and much
+more often when starting the crash kernel if "crashkernel=512M,high"
+was specified on the command line (this heavily restricts the physical
+address of the crash kernel, in our case usually within 1 GiB of
+reserved space).
+
+The solution is to invalidate the pages of this table outside the kernel
+image's space before the page table is activated. It fixes this problem
+on our hardware.
+
+ [ bp: Touchups. ]
+
+Signed-off-by: Steve Wahl <steve.wahl@hpe.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Acked-by: Dave Hansen <dave.hansen@linux.intel.com>
+Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Baoquan He <bhe@redhat.com>
+Cc: Brijesh Singh <brijesh.singh@amd.com>
+Cc: dimitri.sivanich@hpe.com
+Cc: Feng Tang <feng.tang@intel.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Jordan Borgner <mail@jordan-borgner.de>
+Cc: Juergen Gross <jgross@suse.com>
+Cc: mike.travis@hpe.com
+Cc: russ.anderson@hpe.com
+Cc: stable@vger.kernel.org
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: x86-ml <x86@kernel.org>
+Cc: Zhenzhong Duan <zhenzhong.duan@oracle.com>
+Link: https://lkml.kernel.org/r/9c011ee51b081534a7a15065b1681d200298b530.1569358539.git.steve.wahl@hpe.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/head64.c | 22 ++++++++++++++++++++--
+ 1 file changed, 20 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/head64.c
++++ b/arch/x86/kernel/head64.c
+@@ -145,13 +145,31 @@ unsigned long __head __startup_64(unsign
+ * we might write invalid pmds, when the kernel is relocated
+ * cleanup_highmap() fixes this up along with the mappings
+ * beyond _end.
++ *
++ * Only the region occupied by the kernel image has so far
++ * been checked against the table of usable memory regions
++ * provided by the firmware, so invalidate pages outside that
++ * region. A page table entry that maps to a reserved area of
++ * memory would allow processor speculation into that area,
++ * and on some hardware (particularly the UV platform) even
++ * speculative access to some reserved areas is caught as an
++ * error, causing the BIOS to halt the system.
+ */
+
+ pmd = fixup_pointer(level2_kernel_pgt, physaddr);
+- for (i = 0; i < PTRS_PER_PMD; i++) {
++
++ /* invalidate pages before the kernel image */
++ for (i = 0; i < pmd_index((unsigned long)_text); i++)
++ pmd[i] &= ~_PAGE_PRESENT;
++
++ /* fixup pages that are part of the kernel image */
++ for (; i <= pmd_index((unsigned long)_end); i++)
+ if (pmd[i] & _PAGE_PRESENT)
+ pmd[i] += load_delta;
+- }
++
++ /* invalidate pages after the kernel image */
++ for (; i < PTRS_PER_PMD; i++)
++ pmd[i] &= ~_PAGE_PRESENT;
+
+ /*
+ * Fixup phys_base - remove the memory encryption mask to obtain
--- /dev/null
+From 3d5c1a037d37392a6859afbde49be5ba6a70a6b3 Mon Sep 17 00:00:00 2001
+From: Juergen Gross <jgross@suse.com>
+Date: Fri, 18 Oct 2019 09:45:49 +0200
+Subject: xen/netback: fix error path of xenvif_connect_data()
+
+From: Juergen Gross <jgross@suse.com>
+
+commit 3d5c1a037d37392a6859afbde49be5ba6a70a6b3 upstream.
+
+xenvif_connect_data() calls module_put() in case of error. This is
+wrong as there is no related module_get().
+
+Remove the superfluous module_put().
+
+Fixes: 279f438e36c0a7 ("xen-netback: Don't destroy the netdev until the vif is shut down")
+Cc: <stable@vger.kernel.org> # 3.12
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Paul Durrant <paul@xen.org>
+Reviewed-by: Wei Liu <wei.liu@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/xen-netback/interface.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/drivers/net/xen-netback/interface.c
++++ b/drivers/net/xen-netback/interface.c
+@@ -718,7 +718,6 @@ err_unmap:
+ xenvif_unmap_frontend_data_rings(queue);
+ netif_napi_del(&queue->napi);
+ err:
+- module_put(THIS_MODULE);
+ return err;
+ }
+