--- /dev/null
+From de3ccd26fafc707b09792d9b633c8b5b48865315 Mon Sep 17 00:00:00 2001
+From: Yu Zhang <yu.c.zhang@linux.intel.com>
+Date: Fri, 1 Feb 2019 00:09:23 +0800
+Subject: KVM: MMU: record maximum physical address width in kvm_mmu_extended_role
+
+From: Yu Zhang <yu.c.zhang@linux.intel.com>
+
+commit de3ccd26fafc707b09792d9b633c8b5b48865315 upstream.
+
+Previously, commit 7dcd57552008 ("x86/kvm/mmu: check if tdp/shadow
+MMU reconfiguration is needed") offered some optimization to avoid
+the unnecessary reconfiguration. Yet one scenario is broken - when
+cpuid changes VM's maximum physical address width, reconfiguration
+is needed to reset the reserved bits. Also, the TDP may need to
+reset its shadow_root_level when this value is changed.
+
+To fix this, a new field, maxphyaddr, is introduced in the extended
+role structure to keep track of the configured guest physical address
+width.
+
+Signed-off-by: Yu Zhang <yu.c.zhang@linux.intel.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/kvm_host.h | 1 +
+ arch/x86/kvm/mmu.c | 1 +
+ 2 files changed, 2 insertions(+)
+
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -299,6 +299,7 @@ union kvm_mmu_extended_role {
+ unsigned int cr4_smap:1;
+ unsigned int cr4_smep:1;
+ unsigned int cr4_la57:1;
++ unsigned int maxphyaddr:6;
+ };
+ };
+
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -4731,6 +4731,7 @@ static union kvm_mmu_extended_role kvm_c
+ ext.cr4_pse = !!is_pse(vcpu);
+ ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE);
+ ext.cr4_la57 = !!kvm_read_cr4_bits(vcpu, X86_CR4_LA57);
++ ext.maxphyaddr = cpuid_maxphyaddr(vcpu);
+
+ ext.valid = 1;
+
--- /dev/null
+From 511da98d207d5c0675a10351b01e37cbe50a79e5 Mon Sep 17 00:00:00 2001
+From: Yu Zhang <yu.c.zhang@linux.intel.com>
+Date: Fri, 1 Feb 2019 00:09:43 +0800
+Subject: kvm: x86: Return LA57 feature based on hardware capability
+
+From: Yu Zhang <yu.c.zhang@linux.intel.com>
+
+commit 511da98d207d5c0675a10351b01e37cbe50a79e5 upstream.
+
+Previously, 'commit 372fddf70904 ("x86/mm: Introduce the 'no5lvl' kernel
+parameter")' cleared X86_FEATURE_LA57 in boot_cpu_data, if Linux chooses
+to not run in 5-level paging mode. Yet boot_cpu_data is queried by
+do_cpuid_ent() as the host capability later when creating vcpus, and Qemu
+will not be able to detect this feature and create VMs with LA57 feature.
+
+As discussed earlier, VMs can still benefit from extended linear address
+width, e.g. to enhance features like ASLR. So we would like to fix this,
+by return the true hardware capability when Qemu queries.
+
+Signed-off-by: Yu Zhang <yu.c.zhang@linux.intel.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/cpuid.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -337,6 +337,7 @@ static inline int __do_cpuid_ent(struct
+ unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0;
+ unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
+ unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0;
++ unsigned f_la57 = 0;
+
+ /* cpuid 1.edx */
+ const u32 kvm_cpuid_1_edx_x86_features =
+@@ -491,7 +492,10 @@ static inline int __do_cpuid_ent(struct
+ // TSC_ADJUST is emulated
+ entry->ebx |= F(TSC_ADJUST);
+ entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
++ f_la57 = entry->ecx & F(LA57);
+ cpuid_mask(&entry->ecx, CPUID_7_ECX);
++ /* Set LA57 based on hardware capability. */
++ entry->ecx |= f_la57;
+ entry->ecx |= f_umip;
+ /* PKU is not yet implemented for shadow paging. */
+ if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
--- /dev/null
+From 51d0af222f6fa43134c6187ab4f374630f6e0d96 Mon Sep 17 00:00:00 2001
+From: Felix Fietkau <nbd@nbd.name>
+Date: Fri, 22 Feb 2019 13:21:15 +0100
+Subject: mac80211: allocate tailroom for forwarded mesh packets
+
+From: Felix Fietkau <nbd@nbd.name>
+
+commit 51d0af222f6fa43134c6187ab4f374630f6e0d96 upstream.
+
+Forwarded packets enter the tx path through ieee80211_add_pending_skb,
+which skips the ieee80211_skb_resize call.
+Fixes WARN_ON in ccmp_encrypt_skb and resulting packet loss.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/mac80211/rx.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -2640,6 +2640,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80
+ struct ieee80211_sub_if_data *sdata = rx->sdata;
+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+ u16 ac, q, hdrlen;
++ int tailroom = 0;
+
+ hdr = (struct ieee80211_hdr *) skb->data;
+ hdrlen = ieee80211_hdrlen(hdr->frame_control);
+@@ -2726,8 +2727,12 @@ ieee80211_rx_h_mesh_fwding(struct ieee80
+ if (!ifmsh->mshcfg.dot11MeshForwarding)
+ goto out;
+
++ if (sdata->crypto_tx_tailroom_needed_cnt)
++ tailroom = IEEE80211_ENCRYPT_TAILROOM;
++
+ fwd_skb = skb_copy_expand(skb, local->tx_headroom +
+- sdata->encrypt_headroom, 0, GFP_ATOMIC);
++ sdata->encrypt_headroom,
++ tailroom, GFP_ATOMIC);
+ if (!fwd_skb)
+ goto out;
+
--- /dev/null
+From 5c14a4d05f68415af9e41a4e667d1748d41d1baf Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= <toke@redhat.com>
+Date: Thu, 21 Feb 2019 18:29:36 +0100
+Subject: mac80211: Change default tx_sk_pacing_shift to 7
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Toke Høiland-Jørgensen <toke@redhat.com>
+
+commit 5c14a4d05f68415af9e41a4e667d1748d41d1baf upstream.
+
+When we did the original tests for the optimal value of sk_pacing_shift, we
+came up with 6 ms of buffering as the default. Sadly, 6 is not a power of
+two, so when picking the shift value I erred on the size of less buffering
+and picked 4 ms instead of 8. This was probably wrong; those 2 ms of extra
+buffering makes a larger difference than I thought.
+
+So, change the default pacing shift to 7, which corresponds to 8 ms of
+buffering. The point of diminishing returns really kicks in after 8 ms, and
+so having this as a default should cut down on the need for extensive
+per-device testing and overrides needed in the drivers.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/mac80211/main.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/mac80211/main.c
++++ b/net/mac80211/main.c
+@@ -615,13 +615,13 @@ struct ieee80211_hw *ieee80211_alloc_hw_
+ * We need a bit of data queued to build aggregates properly, so
+ * instruct the TCP stack to allow more than a single ms of data
+ * to be queued in the stack. The value is a bit-shift of 1
+- * second, so 8 is ~4ms of queued data. Only affects local TCP
++ * second, so 7 is ~8ms of queued data. Only affects local TCP
+ * sockets.
+ * This is the default, anyhow - drivers may need to override it
+ * for local reasons (longer buffers, longer completion time, or
+ * similar).
+ */
+- local->hw.tx_sk_pacing_shift = 8;
++ local->hw.tx_sk_pacing_shift = 7;
+
+ /* set up some defaults */
+ local->hw.queues = 1;
--- /dev/null
+From b10bd9a256aec504c14a7c9b6fccb6301ecf290a Mon Sep 17 00:00:00 2001
+From: Pierre Morel <pmorel@linux.ibm.com>
+Date: Mon, 11 Feb 2019 10:20:49 +0100
+Subject: s390: vsie: Use effective CRYCBD.31 to check CRYCBD validity
+
+From: Pierre Morel <pmorel@linux.ibm.com>
+
+commit b10bd9a256aec504c14a7c9b6fccb6301ecf290a upstream.
+
+When facility.76 MSAX3 is present for the guest we must issue a validity
+interception if the CRYCBD is not valid.
+
+The bit CRYCBD.31 is an effective field and tested at each guest level
+and has for effect to mask the facility.76
+
+It follows that if CRYCBD.31 is clear and AP is not in use we do not
+have to test the CRYCBD validatity even if facility.76 is present in the
+host.
+
+Fixes: 6ee74098201b ("KVM: s390: vsie: allow CRYCB FORMAT-0")
+Cc: stable@vger.kernel.org
+
+Signed-off-by: Pierre Morel <pmorel@linux.ibm.com>
+Reported-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
+Acked-by: David Hildenbrand <david@redhat.com>
+Acked-by: Cornelia Huck <cohuck@redhat.com>
+Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Message-Id: <1549876849-32680-1-git-send-email-pmorel@linux.ibm.com>
+Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/kvm/vsie.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/s390/kvm/vsie.c
++++ b/arch/s390/kvm/vsie.c
+@@ -297,7 +297,7 @@ static int shadow_crycb(struct kvm_vcpu
+ scb_s->crycbd = 0;
+
+ apie_h = vcpu->arch.sie_block->eca & ECA_APIE;
+- if (!apie_h && !key_msk)
++ if (!apie_h && (!key_msk || fmt_o == CRYCB_FORMAT0))
+ return 0;
+
+ if (!crycb_addr)
drm-i915-fbdev-actually-configure-untiled-displays.patch
drm-amdgpu-disable-bulk-moves-for-now.patch
drm-amd-display-fix-mst-reboot-poweroff-sequence.patch
+mac80211-change-default-tx_sk_pacing_shift-to-7.patch
+mac80211-allocate-tailroom-for-forwarded-mesh-packets.patch
+kvm-mmu-record-maximum-physical-address-width-in-kvm_mmu_extended_role.patch
+x86-kvm-mmu-fix-switch-between-root-and-guest-mmus.patch
+kvm-x86-return-la57-feature-based-on-hardware-capability.patch
+s390-vsie-use-effective-crycbd.31-to-check-crycbd-validity.patch
--- /dev/null
+From ad7dc69aeb23138cc23c406cac25003b97e8ee17 Mon Sep 17 00:00:00 2001
+From: Vitaly Kuznetsov <vkuznets@redhat.com>
+Date: Fri, 22 Feb 2019 17:45:01 +0100
+Subject: x86/kvm/mmu: fix switch between root and guest MMUs
+
+From: Vitaly Kuznetsov <vkuznets@redhat.com>
+
+commit ad7dc69aeb23138cc23c406cac25003b97e8ee17 upstream.
+
+Commit 14c07ad89f4d ("x86/kvm/mmu: introduce guest_mmu") brought one subtle
+change: previously, when switching back from L2 to L1, we were resetting
+MMU hooks (like mmu->get_cr3()) in kvm_init_mmu() called from
+nested_vmx_load_cr3() and now we do that in nested_ept_uninit_mmu_context()
+when we re-target vcpu->arch.mmu pointer.
+The change itself looks logical: if nested_ept_init_mmu_context() changes
+something than nested_ept_uninit_mmu_context() restores it back. There is,
+however, one thing: the following call chain:
+
+ nested_vmx_load_cr3()
+ kvm_mmu_new_cr3()
+ __kvm_mmu_new_cr3()
+ fast_cr3_switch()
+ cached_root_available()
+
+now happens with MMU hooks pointing to the new MMU (root MMU in our case)
+while previously it was happening with the old one. cached_root_available()
+tries to stash current root but it is incorrect to read current CR3 with
+mmu->get_cr3(), we need to use old_mmu->get_cr3() which in case we're
+switching from L2 to L1 is guest_mmu. (BTW, in shadow page tables case this
+is a non-issue because we don't switch MMU).
+
+While we could've tried to guess that we're switching between MMUs and call
+the right ->get_cr3() from cached_root_available() this seems to be overly
+complicated. Instead, just stash the corresponding CR3 when setting
+root_hpa and make cached_root_available() use the stashed value.
+
+Fixes: 14c07ad89f4d ("x86/kvm/mmu: introduce guest_mmu")
+Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/kvm_host.h | 1 +
+ arch/x86/kvm/mmu.c | 17 +++++++++++++----
+ 2 files changed, 14 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -398,6 +398,7 @@ struct kvm_mmu {
+ void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
+ u64 *spte, const void *pte);
+ hpa_t root_hpa;
++ gpa_t root_cr3;
+ union kvm_mmu_role mmu_role;
+ u8 root_level;
+ u8 shadow_root_level;
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -3517,6 +3517,7 @@ void kvm_mmu_free_roots(struct kvm_vcpu
+ &invalid_list);
+ mmu->root_hpa = INVALID_PAGE;
+ }
++ mmu->root_cr3 = 0;
+ }
+
+ kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
+@@ -3572,6 +3573,7 @@ static int mmu_alloc_direct_roots(struct
+ vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
+ } else
+ BUG();
++ vcpu->arch.mmu->root_cr3 = vcpu->arch.mmu->get_cr3(vcpu);
+
+ return 0;
+ }
+@@ -3580,10 +3582,11 @@ static int mmu_alloc_shadow_roots(struct
+ {
+ struct kvm_mmu_page *sp;
+ u64 pdptr, pm_mask;
+- gfn_t root_gfn;
++ gfn_t root_gfn, root_cr3;
+ int i;
+
+- root_gfn = vcpu->arch.mmu->get_cr3(vcpu) >> PAGE_SHIFT;
++ root_cr3 = vcpu->arch.mmu->get_cr3(vcpu);
++ root_gfn = root_cr3 >> PAGE_SHIFT;
+
+ if (mmu_check_root(vcpu, root_gfn))
+ return 1;
+@@ -3608,7 +3611,7 @@ static int mmu_alloc_shadow_roots(struct
+ ++sp->root_count;
+ spin_unlock(&vcpu->kvm->mmu_lock);
+ vcpu->arch.mmu->root_hpa = root;
+- return 0;
++ goto set_root_cr3;
+ }
+
+ /*
+@@ -3674,6 +3677,9 @@ static int mmu_alloc_shadow_roots(struct
+ vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root);
+ }
+
++set_root_cr3:
++ vcpu->arch.mmu->root_cr3 = root_cr3;
++
+ return 0;
+ }
+
+@@ -4125,7 +4131,7 @@ static bool cached_root_available(struct
+ struct kvm_mmu_root_info root;
+ struct kvm_mmu *mmu = vcpu->arch.mmu;
+
+- root.cr3 = mmu->get_cr3(vcpu);
++ root.cr3 = mmu->root_cr3;
+ root.hpa = mmu->root_hpa;
+
+ for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
+@@ -4138,6 +4144,7 @@ static bool cached_root_available(struct
+ }
+
+ mmu->root_hpa = root.hpa;
++ mmu->root_cr3 = root.cr3;
+
+ return i < KVM_MMU_NUM_PREV_ROOTS;
+ }
+@@ -5478,11 +5485,13 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu
+ vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
+
+ vcpu->arch.root_mmu.root_hpa = INVALID_PAGE;
++ vcpu->arch.root_mmu.root_cr3 = 0;
+ vcpu->arch.root_mmu.translate_gpa = translate_gpa;
+ for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
+ vcpu->arch.root_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
+
+ vcpu->arch.guest_mmu.root_hpa = INVALID_PAGE;
++ vcpu->arch.guest_mmu.root_cr3 = 0;
+ vcpu->arch.guest_mmu.translate_gpa = translate_gpa;
+ for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
+ vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;