]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.19-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 29 Mar 2021 05:32:24 +0000 (07:32 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 29 Mar 2021 05:32:24 +0000 (07:32 +0200)
added patches:
locking-mutex-fix-non-debug-version-of-mutex_lock_io_nested.patch
x86-mem_encrypt-correct-physical-address-calculation-in-__set_clr_pte_enc.patch

queue-4.19/locking-mutex-fix-non-debug-version-of-mutex_lock_io_nested.patch [new file with mode: 0644]
queue-4.19/series
queue-4.19/x86-mem_encrypt-correct-physical-address-calculation-in-__set_clr_pte_enc.patch [new file with mode: 0644]

diff --git a/queue-4.19/locking-mutex-fix-non-debug-version-of-mutex_lock_io_nested.patch b/queue-4.19/locking-mutex-fix-non-debug-version-of-mutex_lock_io_nested.patch
new file mode 100644 (file)
index 0000000..ba1226f
--- /dev/null
@@ -0,0 +1,37 @@
+From 291da9d4a9eb3a1cb0610b7f4480f5b52b1825e7 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 22 Mar 2021 09:46:13 +0100
+Subject: locking/mutex: Fix non debug version of mutex_lock_io_nested()
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 291da9d4a9eb3a1cb0610b7f4480f5b52b1825e7 upstream.
+
+If CONFIG_DEBUG_LOCK_ALLOC=n then mutex_lock_io_nested() maps to
+mutex_lock() which is clearly wrong because mutex_lock() lacks the
+io_schedule_prepare()/finish() invocations.
+
+Map it to mutex_lock_io().
+
+Fixes: f21860bac05b ("locking/mutex, sched/wait: Fix the mutex_lock_io_nested() define")
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/878s6fshii.fsf@nanos.tec.linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/mutex.h |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/include/linux/mutex.h
++++ b/include/linux/mutex.h
+@@ -184,7 +184,7 @@ extern void mutex_lock_io(struct mutex *
+ # define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
+ # define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
+ # define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
+-# define mutex_lock_io_nested(lock, subclass) mutex_lock(lock)
++# define mutex_lock_io_nested(lock, subclass) mutex_lock_io(lock)
+ #endif
+ /*
index 65bcecdc2353645f69177598bcfbe5d63c5f8dc1..da322310b648f9ed66da0aeda9bb74cb6e07c9d5 100644 (file)
@@ -62,3 +62,5 @@ dm-verity-add-root-hash-pkcs-7-signature-verificatio.patch
 perf-auxtrace-fix-auxtrace-queue-conflict.patch
 scsi-qedi-fix-error-return-code-of-qedi_alloc_global.patch
 scsi-mpt3sas-fix-error-return-code-of-mpt3sas_base_a.patch
+locking-mutex-fix-non-debug-version-of-mutex_lock_io_nested.patch
+x86-mem_encrypt-correct-physical-address-calculation-in-__set_clr_pte_enc.patch
diff --git a/queue-4.19/x86-mem_encrypt-correct-physical-address-calculation-in-__set_clr_pte_enc.patch b/queue-4.19/x86-mem_encrypt-correct-physical-address-calculation-in-__set_clr_pte_enc.patch
new file mode 100644 (file)
index 0000000..6fddbec
--- /dev/null
@@ -0,0 +1,46 @@
+From 8249d17d3194eac064a8ca5bc5ca0abc86feecde Mon Sep 17 00:00:00 2001
+From: Isaku Yamahata <isaku.yamahata@intel.com>
+Date: Thu, 18 Mar 2021 13:26:57 -0700
+Subject: x86/mem_encrypt: Correct physical address calculation in __set_clr_pte_enc()
+
+From: Isaku Yamahata <isaku.yamahata@intel.com>
+
+commit 8249d17d3194eac064a8ca5bc5ca0abc86feecde upstream.
+
+The pfn variable contains the page frame number as returned by the
+pXX_pfn() functions, shifted to the right by PAGE_SHIFT to remove the
+page bits. After page protection computations are done to it, it gets
+shifted back to the physical address using page_level_shift().
+
+That is wrong, of course, because that function determines the shift
+length based on the level of the page in the page table but in all the
+cases, it was shifted by PAGE_SHIFT before.
+
+Therefore, shift it back using PAGE_SHIFT to get the correct physical
+address.
+
+ [ bp: Rewrite commit message. ]
+
+Fixes: dfaaec9033b8 ("x86: Add support for changing memory encryption attribute in early boot")
+Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: <stable@vger.kernel.org>
+Link: https://lkml.kernel.org/r/81abbae1657053eccc535c16151f63cd049dcb97.1616098294.git.isaku.yamahata@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/mm/mem_encrypt.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/mm/mem_encrypt.c
++++ b/arch/x86/mm/mem_encrypt.c
+@@ -228,7 +228,7 @@ static void __init __set_clr_pte_enc(pte
+       if (pgprot_val(old_prot) == pgprot_val(new_prot))
+               return;
+-      pa = pfn << page_level_shift(level);
++      pa = pfn << PAGE_SHIFT;
+       size = page_level_size(level);
+       /*