--- /dev/null
+From 291da9d4a9eb3a1cb0610b7f4480f5b52b1825e7 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 22 Mar 2021 09:46:13 +0100
+Subject: locking/mutex: Fix non debug version of mutex_lock_io_nested()
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 291da9d4a9eb3a1cb0610b7f4480f5b52b1825e7 upstream.
+
+If CONFIG_DEBUG_LOCK_ALLOC=n then mutex_lock_io_nested() maps to
+mutex_lock() which is clearly wrong because mutex_lock() lacks the
+io_schedule_prepare()/finish() invocations.
+
+Map it to mutex_lock_io().
+
+Fixes: f21860bac05b ("locking/mutex, sched/wait: Fix the mutex_lock_io_nested() define")
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/878s6fshii.fsf@nanos.tec.linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/mutex.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/include/linux/mutex.h
++++ b/include/linux/mutex.h
+@@ -184,7 +184,7 @@ extern void mutex_lock_io(struct mutex *
+ # define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
+ # define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
+ # define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
+-# define mutex_lock_io_nested(lock, subclass) mutex_lock(lock)
++# define mutex_lock_io_nested(lock, subclass) mutex_lock_io(lock)
+ #endif
+
+ /*
--- /dev/null
+From 8249d17d3194eac064a8ca5bc5ca0abc86feecde Mon Sep 17 00:00:00 2001
+From: Isaku Yamahata <isaku.yamahata@intel.com>
+Date: Thu, 18 Mar 2021 13:26:57 -0700
+Subject: x86/mem_encrypt: Correct physical address calculation in __set_clr_pte_enc()
+
+From: Isaku Yamahata <isaku.yamahata@intel.com>
+
+commit 8249d17d3194eac064a8ca5bc5ca0abc86feecde upstream.
+
+The pfn variable contains the page frame number as returned by the
+pXX_pfn() functions, shifted to the right by PAGE_SHIFT to remove the
+page bits. After page protection computations are done to it, it gets
+shifted back to the physical address using page_level_shift().
+
+That is wrong, of course, because that function determines the shift
+length based on the level of the page in the page table but in all the
+cases, it was shifted by PAGE_SHIFT before.
+
+Therefore, shift it back using PAGE_SHIFT to get the correct physical
+address.
+
+ [ bp: Rewrite commit message. ]
+
+Fixes: dfaaec9033b8 ("x86: Add support for changing memory encryption attribute in early boot")
+Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: <stable@vger.kernel.org>
+Link: https://lkml.kernel.org/r/81abbae1657053eccc535c16151f63cd049dcb97.1616098294.git.isaku.yamahata@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/mm/mem_encrypt.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/mm/mem_encrypt.c
++++ b/arch/x86/mm/mem_encrypt.c
+@@ -228,7 +228,7 @@ static void __init __set_clr_pte_enc(pte
+ if (pgprot_val(old_prot) == pgprot_val(new_prot))
+ return;
+
+- pa = pfn << page_level_shift(level);
++ pa = pfn << PAGE_SHIFT;
+ size = page_level_size(level);
+
+ /*