]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 11 Apr 2024 09:35:46 +0000 (11:35 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 11 Apr 2024 09:35:46 +0000 (11:35 +0200)
added patches:
kbuild-dummy-tools-adjust-to-stricter-stackprotector-check.patch
scsi-sd-fix-wrong-zone_write_granularity-value-during-revalidate.patch
x86-head-64-re-enable-stack-protection.patch
x86-retpoline-add-noendbr-annotation-to-the-srso-dummy-return-thunk.patch

queue-5.10/kbuild-dummy-tools-adjust-to-stricter-stackprotector-check.patch [new file with mode: 0644]
queue-5.10/scsi-sd-fix-wrong-zone_write_granularity-value-during-revalidate.patch [new file with mode: 0644]
queue-5.10/series
queue-5.10/x86-head-64-re-enable-stack-protection.patch [new file with mode: 0644]
queue-5.10/x86-retpoline-add-noendbr-annotation-to-the-srso-dummy-return-thunk.patch [new file with mode: 0644]

diff --git a/queue-5.10/kbuild-dummy-tools-adjust-to-stricter-stackprotector-check.patch b/queue-5.10/kbuild-dummy-tools-adjust-to-stricter-stackprotector-check.patch
new file mode 100644 (file)
index 0000000..90fc39f
--- /dev/null
@@ -0,0 +1,38 @@
+From c93db682cfb213501881072a9200a48ce1dc3c3f Mon Sep 17 00:00:00 2001
+From: Michal Kubecek <mkubecek@suse.cz>
+Date: Sat, 15 May 2021 12:11:13 +0200
+Subject: kbuild: dummy-tools: adjust to stricter stackprotector check
+
+From: Michal Kubecek <mkubecek@suse.cz>
+
+commit c93db682cfb213501881072a9200a48ce1dc3c3f upstream.
+
+Commit 3fb0fdb3bbe7 ("x86/stackprotector/32: Make the canary into a regular
+percpu variable") modified the stackprotector check on 32-bit x86 to check
+if gcc supports using %fs as canary. Adjust dummy-tools gcc script to pass
+this new test by returning "%fs" rather than "%gs" if it detects
+-mstack-protector-guard-reg=fs on command line.
+
+Fixes: 3fb0fdb3bbe7 ("x86/stackprotector/32: Make the canary into a regular percpu variable")
+Signed-off-by: Michal Kubecek <mkubecek@suse.cz>
+Signed-off-by: Masahiro Yamada <masahiroy@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ scripts/dummy-tools/gcc |    6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/scripts/dummy-tools/gcc
++++ b/scripts/dummy-tools/gcc
+@@ -76,7 +76,11 @@ fi
+ if arg_contain -S "$@"; then
+       # For scripts/gcc-x86-*-has-stack-protector.sh
+       if arg_contain -fstack-protector "$@"; then
+-              echo "%gs"
++              if arg_contain -mstack-protector-guard-reg=fs "$@"; then
++                      echo "%fs"
++              else
++                      echo "%gs"
++              fi
+               exit 0
+       fi
+ fi
diff --git a/queue-5.10/scsi-sd-fix-wrong-zone_write_granularity-value-during-revalidate.patch b/queue-5.10/scsi-sd-fix-wrong-zone_write_granularity-value-during-revalidate.patch
new file mode 100644 (file)
index 0000000..8d0527e
--- /dev/null
@@ -0,0 +1,70 @@
+From 288b3271d920c9ba949c3bab0f749f4cecc70e09 Mon Sep 17 00:00:00 2001
+From: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
+Date: Mon, 6 Mar 2023 15:30:24 +0900
+Subject: scsi: sd: Fix wrong zone_write_granularity value during revalidate
+
+From: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
+
+commit 288b3271d920c9ba949c3bab0f749f4cecc70e09 upstream.
+
+When the sd driver revalidates host-managed SMR disks, it calls
+disk_set_zoned() which changes the zone_write_granularity attribute value
+to the logical block size regardless of the device type. After that, the sd
+driver overwrites the value in sd_zbc_read_zone() with the physical block
+size, since ZBC/ZAC requires this for host-managed disks. Between the calls
+to disk_set_zoned() and sd_zbc_read_zone(), there exists a window where the
+attribute shows the logical block size as the zone_write_granularity value,
+which is wrong for host-managed disks. The duration of the window is from
+20ms to 200ms, depending on report zone command execution time.
+
+To avoid the wrong zone_write_granularity value between disk_set_zoned()
+and sd_zbc_read_zone(), modify the value not in sd_zbc_read_zone() but
+just after disk_set_zoned() call.
+
+Fixes: a805a4fa4fa3 ("block: introduce zone_write_granularity limit")
+Signed-off-by: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
+Link: https://lore.kernel.org/r/20230306063024.3376959-1-shinichiro.kawasaki@wdc.com
+Reviewed-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Reviewed-by: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/sd.c     |    7 ++++++-
+ drivers/scsi/sd_zbc.c |    8 --------
+ 2 files changed, 6 insertions(+), 9 deletions(-)
+
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -3026,8 +3026,13 @@ static void sd_read_block_characteristic
+       }
+       if (sdkp->device->type == TYPE_ZBC) {
+-              /* Host-managed */
++              /*
++               * Host-managed: Per ZBC and ZAC specifications, writes in
++               * sequential write required zones of host-managed devices must
++               * be aligned to the device physical block size.
++               */
+               blk_queue_set_zoned(sdkp->disk, BLK_ZONED_HM);
++              blk_queue_zone_write_granularity(q, sdkp->physical_block_size);
+       } else {
+               sdkp->zoned = (buffer[8] >> 4) & 3;
+               if (sdkp->zoned == 1) {
+--- a/drivers/scsi/sd_zbc.c
++++ b/drivers/scsi/sd_zbc.c
+@@ -793,14 +793,6 @@ int sd_zbc_read_zones(struct scsi_disk *
+       blk_queue_max_active_zones(q, 0);
+       nr_zones = round_up(sdkp->capacity, zone_blocks) >> ilog2(zone_blocks);
+-      /*
+-       * Per ZBC and ZAC specifications, writes in sequential write required
+-       * zones of host-managed devices must be aligned to the device physical
+-       * block size.
+-       */
+-      if (blk_queue_zoned_model(q) == BLK_ZONED_HM)
+-              blk_queue_zone_write_granularity(q, sdkp->physical_block_size);
+-
+       /* READ16/WRITE16 is mandatory for ZBC disks */
+       sdkp->device->use_16_for_rw = 1;
+       sdkp->device->use_10_for_rw = 0;
index 526876f0a6b524d56e804107a1fcf3dcce05fbee..6d173101120754729ef37de193ba2f5be6c51e7a 100644 (file)
@@ -288,3 +288,7 @@ x86-mm-pat-fix-vm_pat-handling-in-cow-mappings.patch
 drm-i915-gt-reset-queue_priority_hint-on-parking.patch
 bluetooth-btintel-fixe-build-regression.patch
 vmci-fix-possible-memcpy-run-time-warning-in-vmci_datagram_invoke_guest_handler.patch
+kbuild-dummy-tools-adjust-to-stricter-stackprotector-check.patch
+scsi-sd-fix-wrong-zone_write_granularity-value-during-revalidate.patch
+x86-retpoline-add-noendbr-annotation-to-the-srso-dummy-return-thunk.patch
+x86-head-64-re-enable-stack-protection.patch
diff --git a/queue-5.10/x86-head-64-re-enable-stack-protection.patch b/queue-5.10/x86-head-64-re-enable-stack-protection.patch
new file mode 100644 (file)
index 0000000..7572294
--- /dev/null
@@ -0,0 +1,151 @@
+From 469693d8f62299709e8ba56d8fb3da9ea990213c Mon Sep 17 00:00:00 2001
+From: Michael Roth <michael.roth@amd.com>
+Date: Wed, 9 Feb 2022 12:10:17 -0600
+Subject: x86/head/64: Re-enable stack protection
+
+From: Michael Roth <michael.roth@amd.com>
+
+commit 469693d8f62299709e8ba56d8fb3da9ea990213c upstream.
+
+Due to
+
+  103a4908ad4d ("x86/head/64: Disable stack protection for head$(BITS).o")
+
+kernel/head{32,64}.c are compiled with -fno-stack-protector to allow
+a call to set_bringup_idt_handler(), which would otherwise have stack
+protection enabled with CONFIG_STACKPROTECTOR_STRONG.
+
+While sufficient for that case, there may still be issues with calls to
+any external functions that were compiled with stack protection enabled
+that in-turn make stack-protected calls, or if the exception handlers
+set up by set_bringup_idt_handler() make calls to stack-protected
+functions.
+
+Subsequent patches for SEV-SNP CPUID validation support will introduce
+both such cases. Attempting to disable stack protection for everything
+in scope to address that is prohibitive since much of the code, like the
+SEV-ES #VC handler, is shared code that remains in use after boot and
+could benefit from having stack protection enabled. Attempting to inline
+calls is brittle and can quickly balloon out to library/helper code
+where that's not really an option.
+
+Instead, re-enable stack protection for head32.c/head64.c, and make the
+appropriate changes to ensure the segment used for the stack canary is
+initialized in advance of any stack-protected C calls.
+
+For head64.c:
+
+- The BSP will enter from startup_64() and call into C code
+  (startup_64_setup_env()) shortly after setting up the stack, which
+  may result in calls to stack-protected code. Set up %gs early to allow
+  for this safely.
+- APs will enter from secondary_startup_64*(), and %gs will be set up
+  soon after. There is one call to C code prior to %gs being setup
+  (__startup_secondary_64()), but it is only to fetch 'sme_me_mask'
+  global, so just load 'sme_me_mask' directly instead, and remove the
+  now-unused __startup_secondary_64() function.
+
+For head32.c:
+
+- BSPs/APs will set %fs to __BOOT_DS prior to any C calls. In recent
+  kernels, the compiler is configured to access the stack canary at
+  %fs:__stack_chk_guard [1], which overlaps with the initial per-cpu
+  '__stack_chk_guard' variable in the initial/"master" .data..percpu
+  area. This is sufficient to allow access to the canary for use
+  during initial startup, so no changes are needed there.
+
+[1] 3fb0fdb3bbe7 ("x86/stackprotector/32: Make the canary into a regular percpu variable")
+
+  [ bp: Massage commit message. ]
+
+Suggested-by: Joerg Roedel <jroedel@suse.de> #for 64-bit %gs set up
+Signed-off-by: Michael Roth <michael.roth@amd.com>
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Link: https://lore.kernel.org/r/20220307213356.2797205-24-brijesh.singh@amd.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/setup.h |    1 -
+ arch/x86/kernel/Makefile     |    1 -
+ arch/x86/kernel/head64.c     |    9 ---------
+ arch/x86/kernel/head_64.S    |   24 +++++++++++++++++++++---
+ 4 files changed, 21 insertions(+), 14 deletions(-)
+
+--- a/arch/x86/include/asm/setup.h
++++ b/arch/x86/include/asm/setup.h
+@@ -49,7 +49,6 @@ extern unsigned long saved_video_mode;
+ extern void reserve_standard_io_resources(void);
+ extern void i386_reserve_resources(void);
+ extern unsigned long __startup_64(unsigned long physaddr, struct boot_params *bp);
+-extern unsigned long __startup_secondary_64(void);
+ extern void startup_64_setup_env(unsigned long physbase);
+ extern void early_setup_idt(void);
+ extern void __init do_early_exception(struct pt_regs *regs, int trapnr);
+--- a/arch/x86/kernel/Makefile
++++ b/arch/x86/kernel/Makefile
+@@ -49,7 +49,6 @@ endif
+ # non-deterministic coverage.
+ KCOV_INSTRUMENT               := n
+-CFLAGS_head$(BITS).o  += -fno-stack-protector
+ CFLAGS_cc_platform.o  += -fno-stack-protector
+ CFLAGS_irq.o := -I $(srctree)/$(src)/../include/asm/trace
+--- a/arch/x86/kernel/head64.c
++++ b/arch/x86/kernel/head64.c
+@@ -302,15 +302,6 @@ unsigned long __head __startup_64(unsign
+       return sme_get_me_mask();
+ }
+-unsigned long __startup_secondary_64(void)
+-{
+-      /*
+-       * Return the SME encryption mask (if SME is active) to be used as a
+-       * modifier for the initial pgdir entry programmed into CR3.
+-       */
+-      return sme_get_me_mask();
+-}
+-
+ /* Wipe all early page tables except for the kernel symbol map */
+ static void __init reset_early_page_tables(void)
+ {
+--- a/arch/x86/kernel/head_64.S
++++ b/arch/x86/kernel/head_64.S
+@@ -74,6 +74,22 @@ SYM_CODE_START_NOALIGN(startup_64)
+       leaq    (__end_init_task - SIZEOF_PTREGS)(%rip), %rsp
+       leaq    _text(%rip), %rdi
++
++      /*
++       * initial_gs points to initial fixed_percpu_data struct with storage for
++       * the stack protector canary. Global pointer fixups are needed at this
++       * stage, so apply them as is done in fixup_pointer(), and initialize %gs
++       * such that the canary can be accessed at %gs:40 for subsequent C calls.
++       */
++      movl    $MSR_GS_BASE, %ecx
++      movq    initial_gs(%rip), %rax
++      movq    $_text, %rdx
++      subq    %rdx, %rax
++      addq    %rdi, %rax
++      movq    %rax, %rdx
++      shrq    $32,  %rdx
++      wrmsr
++
+       pushq   %rsi
+       call    startup_64_setup_env
+       popq    %rsi
+@@ -141,9 +157,11 @@ SYM_INNER_LABEL(secondary_startup_64_no_
+        * Retrieve the modifier (SME encryption mask if SME is active) to be
+        * added to the initial pgdir entry that will be programmed into CR3.
+        */
+-      pushq   %rsi
+-      call    __startup_secondary_64
+-      popq    %rsi
++#ifdef CONFIG_AMD_MEM_ENCRYPT
++      movq    sme_me_mask, %rax
++#else
++      xorq    %rax, %rax
++#endif
+       /* Form the CR3 value being sure to include the CR3 modifier */
+       addq    $(init_top_pgt - __START_KERNEL_map), %rax
diff --git a/queue-5.10/x86-retpoline-add-noendbr-annotation-to-the-srso-dummy-return-thunk.patch b/queue-5.10/x86-retpoline-add-noendbr-annotation-to-the-srso-dummy-return-thunk.patch
new file mode 100644 (file)
index 0000000..afc9407
--- /dev/null
@@ -0,0 +1,35 @@
+From b377c66ae3509ccea596512d6afb4777711c4870 Mon Sep 17 00:00:00 2001
+From: "Borislav Petkov (AMD)" <bp@alien8.de>
+Date: Fri, 5 Apr 2024 16:46:37 +0200
+Subject: x86/retpoline: Add NOENDBR annotation to the SRSO dummy return thunk
+
+From: Borislav Petkov (AMD) <bp@alien8.de>
+
+commit b377c66ae3509ccea596512d6afb4777711c4870 upstream.
+
+srso_alias_untrain_ret() is special code, even if it is a dummy
+which is called in the !SRSO case, so annotate it like its real
+counterpart, to address the following objtool splat:
+
+  vmlinux.o: warning: objtool: .export_symbol+0x2b290: data relocation to !ENDBR: srso_alias_untrain_ret+0x0
+
+Fixes: 4535e1a4174c ("x86/bugs: Fix the SRSO mitigation on Zen3/4")
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Link: https://lore.kernel.org/r/20240405144637.17908-1-bp@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/lib/retpoline.S |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/x86/lib/retpoline.S
++++ b/arch/x86/lib/retpoline.S
+@@ -258,6 +258,7 @@ SYM_CODE_START(__x86_return_thunk)
+       UNWIND_HINT_FUNC
+       ANNOTATE_NOENDBR
+       ANNOTATE_UNRET_SAFE
++      ANNOTATE_NOENDBR
+       ret
+       int3
+ SYM_CODE_END(__x86_return_thunk)