--- /dev/null
+From f8be156be163a052a067306417cd0ff679068c97 Mon Sep 17 00:00:00 2001
+From: Nicholas Piggin <npiggin@gmail.com>
+Date: Thu, 24 Jun 2021 08:29:04 -0400
+Subject: KVM: do not allow mapping valid but non-reference-counted pages
+
+From: Nicholas Piggin <npiggin@gmail.com>
+
+commit f8be156be163a052a067306417cd0ff679068c97 upstream.
+
+It's possible to create a region which maps valid but non-refcounted
+pages (e.g., tail pages of non-compound higher order allocations). These
+host pages can then be returned by gfn_to_page, gfn_to_pfn, etc., family
+of APIs, which take a reference to the page, which takes it from 0 to 1.
+When the reference is dropped, this will free the page incorrectly.
+
+Fix this by only taking a reference on valid pages if it was non-zero,
+which indicates it is participating in normal refcounting (and can be
+released with put_page).
+
+This addresses CVE-2021-22543.
+
+Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
+Tested-by: Paolo Bonzini <pbonzini@redhat.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Ovidiu Panait <ovidiu.panait@windriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ virt/kvm/kvm_main.c | 19 +++++++++++++++++--
+ 1 file changed, 17 insertions(+), 2 deletions(-)
+
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -1485,6 +1485,13 @@ static bool vma_is_valid(struct vm_area_
+ return true;
+ }
+
++static int kvm_try_get_pfn(kvm_pfn_t pfn)
++{
++ if (kvm_is_reserved_pfn(pfn))
++ return 1;
++ return get_page_unless_zero(pfn_to_page(pfn));
++}
++
+ static int hva_to_pfn_remapped(struct vm_area_struct *vma,
+ unsigned long addr, bool *async,
+ bool write_fault, bool *writable,
+@@ -1534,13 +1541,21 @@ static int hva_to_pfn_remapped(struct vm
+ * Whoever called remap_pfn_range is also going to call e.g.
+ * unmap_mapping_range before the underlying pages are freed,
+ * causing a call to our MMU notifier.
++ *
++ * Certain IO or PFNMAP mappings can be backed with valid
++ * struct pages, but be allocated without refcounting e.g.,
++ * tail pages of non-compound higher order allocations, which
++ * would then underflow the refcount when the caller does the
++ * required put_page. Don't allow those pages here.
+ */
+- kvm_get_pfn(pfn);
++ if (!kvm_try_get_pfn(pfn))
++ r = -EFAULT;
+
+ out:
+ pte_unmap_unlock(ptep, ptl);
+ *p_pfn = pfn;
+- return 0;
++
++ return r;
+ }
+
+ /*
--- /dev/null
+From bd2fae8da794b55bf2ac02632da3a151b10e664c Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Mon, 1 Feb 2021 05:12:11 -0500
+Subject: KVM: do not assume PTE is writable after follow_pfn
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit bd2fae8da794b55bf2ac02632da3a151b10e664c upstream.
+
+In order to convert an HVA to a PFN, KVM usually tries to use
+the get_user_pages family of functinso. This however is not
+possible for VM_IO vmas; in that case, KVM instead uses follow_pfn.
+
+In doing this however KVM loses the information on whether the
+PFN is writable. That is usually not a problem because the main
+use of VM_IO vmas with KVM is for BARs in PCI device assignment,
+however it is a bug. To fix it, use follow_pte and check pte_write
+while under the protection of the PTE lock. The information can
+be used to fail hva_to_pfn_remapped or passed back to the
+caller via *writable.
+
+Usage of follow_pfn was introduced in commit add6a0cd1c5b ("KVM: MMU: try to fix
+up page faults before giving up", 2016-07-05); however, even older version
+have the same issue, all the way back to commit 2e2e3738af33 ("KVM:
+Handle vma regions with no backing page", 2008-07-20), as they also did
+not check whether the PFN was writable.
+
+Fixes: 2e2e3738af33 ("KVM: Handle vma regions with no backing page")
+Reported-by: David Stevens <stevensd@google.com>
+Cc: 3pvd@google.com
+Cc: Jann Horn <jannh@google.com>
+Cc: Jason Gunthorpe <jgg@ziepe.ca>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+[OP: backport to 4.14, adjust follow_pte() -> follow_pte_pmd()]
+Signed-off-by: Ovidiu Panait <ovidiu.panait@windriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ virt/kvm/kvm_main.c | 15 ++++++++++++---
+ 1 file changed, 12 insertions(+), 3 deletions(-)
+
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -1491,9 +1491,11 @@ static int hva_to_pfn_remapped(struct vm
+ kvm_pfn_t *p_pfn)
+ {
+ unsigned long pfn;
++ pte_t *ptep;
++ spinlock_t *ptl;
+ int r;
+
+- r = follow_pfn(vma, addr, &pfn);
++ r = follow_pte_pmd(vma->vm_mm, addr, NULL, NULL, &ptep, NULL, &ptl);
+ if (r) {
+ /*
+ * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does
+@@ -1508,14 +1510,19 @@ static int hva_to_pfn_remapped(struct vm
+ if (r)
+ return r;
+
+- r = follow_pfn(vma, addr, &pfn);
++ r = follow_pte_pmd(vma->vm_mm, addr, NULL, NULL, &ptep, NULL, &ptl);
+ if (r)
+ return r;
++ }
+
++ if (write_fault && !pte_write(*ptep)) {
++ pfn = KVM_PFN_ERR_RO_FAULT;
++ goto out;
+ }
+
+ if (writable)
+- *writable = true;
++ *writable = pte_write(*ptep);
++ pfn = pte_pfn(*ptep);
+
+ /*
+ * Get a reference here because callers of *hva_to_pfn* and
+@@ -1530,6 +1537,8 @@ static int hva_to_pfn_remapped(struct vm
+ */
+ kvm_get_pfn(pfn);
+
++out:
++ pte_unmap_unlock(ptep, ptl);
+ *p_pfn = pfn;
+ return 0;
+ }
--- /dev/null
+From a9545779ee9e9e103648f6f2552e73cfe808d0f4 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Mon, 8 Feb 2021 12:19:40 -0800
+Subject: KVM: Use kvm_pfn_t for local PFN variable in hva_to_pfn_remapped()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit a9545779ee9e9e103648f6f2552e73cfe808d0f4 upstream.
+
+Use kvm_pfn_t, a.k.a. u64, for the local 'pfn' variable when retrieving
+a so called "remapped" hva/pfn pair. In theory, the hva could resolve to
+a pfn in high memory on a 32-bit kernel.
+
+This bug was inadvertantly exposed by commit bd2fae8da794 ("KVM: do not
+assume PTE is writable after follow_pfn"), which added an error PFN value
+to the mix, causing gcc to comlain about overflowing the unsigned long.
+
+ arch/x86/kvm/../../../virt/kvm/kvm_main.c: In function ‘hva_to_pfn_remapped’:
+ include/linux/kvm_host.h:89:30: error: conversion from ‘long long unsigned int’
+ to ‘long unsigned int’ changes value from
+ ‘9218868437227405314’ to ‘2’ [-Werror=overflow]
+ 89 | #define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2)
+ | ^
+virt/kvm/kvm_main.c:1935:9: note: in expansion of macro ‘KVM_PFN_ERR_RO_FAULT’
+
+Cc: stable@vger.kernel.org
+Fixes: add6a0cd1c5b ("KVM: MMU: try to fix up page faults before giving up")
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20210208201940.1258328-1-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Ovidiu Panait <ovidiu.panait@windriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ virt/kvm/kvm_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -1497,7 +1497,7 @@ static int hva_to_pfn_remapped(struct vm
+ bool write_fault, bool *writable,
+ kvm_pfn_t *p_pfn)
+ {
+- unsigned long pfn;
++ kvm_pfn_t pfn;
+ pte_t *ptep;
+ spinlock_t *ptl;
+ int r;
--- /dev/null
+From b173962fd25a6a3ddf10771bdd6d9697bff2d21f Mon Sep 17 00:00:00 2001
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Fri, 6 Aug 2021 08:28:48 +0200
+Subject: Revert "watchdog: iTCO_wdt: Account for rebooting on second timeout"
+
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+This reverts commit cb1bdbfad648aa32c43bec6ef6d03e1c9d434393 which is
+commit cb011044e34c293e139570ce5c01aed66a34345c upstream.
+
+It is reported to cause problems with systems and probably should not
+have been backported in the first place :(
+
+Link: https://lore.kernel.org/r/20210803165108.4154cd52@endymion
+Reported-by: Jean Delvare <jdelvare@suse.de>
+Cc: Jan Kiszka <jan.kiszka@siemens.com>
+Cc: Guenter Roeck <linux@roeck-us.net>
+Cc: Guenter Roeck <linux@roeck-us.net>
+Cc: Wim Van Sebroeck <wim@linux-watchdog.org>
+Cc: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/watchdog/iTCO_wdt.c | 12 +++---------
+ 1 file changed, 3 insertions(+), 9 deletions(-)
+
+--- a/drivers/watchdog/iTCO_wdt.c
++++ b/drivers/watchdog/iTCO_wdt.c
+@@ -75,8 +75,6 @@
+ #define TCOBASE(p) ((p)->tco_res->start)
+ /* SMI Control and Enable Register */
+ #define SMI_EN(p) ((p)->smi_res->start)
+-#define TCO_EN (1 << 13)
+-#define GBL_SMI_EN (1 << 0)
+
+ #define TCO_RLD(p) (TCOBASE(p) + 0x00) /* TCO Timer Reload/Curr. Value */
+ #define TCOv1_TMR(p) (TCOBASE(p) + 0x01) /* TCOv1 Timer Initial Value*/
+@@ -332,12 +330,8 @@ static int iTCO_wdt_set_timeout(struct w
+
+ tmrval = seconds_to_ticks(p, t);
+
+- /*
+- * If TCO SMIs are off, the timer counts down twice before rebooting.
+- * Otherwise, the BIOS generally reboots when the SMI triggers.
+- */
+- if (p->smi_res &&
+- (SMI_EN(p) & (TCO_EN | GBL_SMI_EN)) != (TCO_EN | GBL_SMI_EN))
++ /* For TCO v1 the timer counts down twice before rebooting */
++ if (p->iTCO_version == 1)
+ tmrval /= 2;
+
+ /* from the specs: */
+@@ -499,7 +493,7 @@ static int iTCO_wdt_probe(struct platfor
+ * Disables TCO logic generating an SMI#
+ */
+ val32 = inl(SMI_EN(p));
+- val32 &= ~TCO_EN; /* Turn off SMI clearing watchdog */
++ val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */
+ outl(val32, SMI_EN(p));
+ }
+
net-fix-zero-copy-head-len-calculation.patch
revert-spi-mediatek-fix-fifo-rx-mode.patch
revert-bluetooth-shutdown-controller-after-workqueues-are-flushed-or-cancelled.patch
+kvm-do-not-assume-pte-is-writable-after-follow_pfn.patch
+kvm-do-not-allow-mapping-valid-but-non-reference-counted-pages.patch
+kvm-use-kvm_pfn_t-for-local-pfn-variable-in-hva_to_pfn_remapped.patch
+revert-watchdog-itco_wdt-account-for-rebooting-on-second-timeout.patch