]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.1-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 4 Mar 2024 07:38:20 +0000 (08:38 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 4 Mar 2024 07:38:20 +0000 (08:38 +0100)
added patches:
efivarfs-request-at-most-512-bytes-for-variable-names.patch
fs-aio-make-io_cancel-generate-completions-again.patch
pmdomain-qcom-rpmhpd-fix-enabled_corner-aggregation.patch
x86-cpu-intel-detect-tme-keyid-bits-before-setting-mtrr-mask-registers.patch
x86-e820-don-t-reserve-setup_rng_seed-in-e820.patch

queue-6.1/efivarfs-request-at-most-512-bytes-for-variable-names.patch [new file with mode: 0644]
queue-6.1/fs-aio-make-io_cancel-generate-completions-again.patch [new file with mode: 0644]
queue-6.1/pmdomain-qcom-rpmhpd-fix-enabled_corner-aggregation.patch [new file with mode: 0644]
queue-6.1/series
queue-6.1/x86-cpu-intel-detect-tme-keyid-bits-before-setting-mtrr-mask-registers.patch [new file with mode: 0644]
queue-6.1/x86-e820-don-t-reserve-setup_rng_seed-in-e820.patch [new file with mode: 0644]

diff --git a/queue-6.1/efivarfs-request-at-most-512-bytes-for-variable-names.patch b/queue-6.1/efivarfs-request-at-most-512-bytes-for-variable-names.patch
new file mode 100644 (file)
index 0000000..3e53993
--- /dev/null
@@ -0,0 +1,74 @@
+From f45812cc23fb74bef62d4eb8a69fe7218f4b9f2a Mon Sep 17 00:00:00 2001
+From: Tim Schumacher <timschumi@gmx.de>
+Date: Fri, 26 Jan 2024 17:25:23 +0100
+Subject: efivarfs: Request at most 512 bytes for variable names
+
+From: Tim Schumacher <timschumi@gmx.de>
+
+commit f45812cc23fb74bef62d4eb8a69fe7218f4b9f2a upstream.
+
+Work around a quirk in a few old (2011-ish) UEFI implementations, where
+a call to `GetNextVariableName` with a buffer size larger than 512 bytes
+will always return EFI_INVALID_PARAMETER.
+
+There is some lore around EFI variable names being up to 1024 bytes in
+size, but this has no basis in the UEFI specification, and the upper
+bounds are typically platform specific, and apply to the entire variable
+(name plus payload).
+
+Given that Linux does not permit creating files with names longer than
+NAME_MAX (255) bytes, 512 bytes (== 256 UTF-16 characters) is a
+reasonable limit.
+
+Cc: <stable@vger.kernel.org> # 6.1+
+Signed-off-by: Tim Schumacher <timschumi@gmx.de>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/efivarfs/vars.c |   17 +++++++++++------
+ 1 file changed, 11 insertions(+), 6 deletions(-)
+
+--- a/fs/efivarfs/vars.c
++++ b/fs/efivarfs/vars.c
+@@ -372,7 +372,7 @@ static void dup_variable_bug(efi_char16_
+ int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
+               void *data, bool duplicates, struct list_head *head)
+ {
+-      unsigned long variable_name_size = 1024;
++      unsigned long variable_name_size = 512;
+       efi_char16_t *variable_name;
+       efi_status_t status;
+       efi_guid_t vendor_guid;
+@@ -389,12 +389,13 @@ int efivar_init(int (*func)(efi_char16_t
+               goto free;
+       /*
+-       * Per EFI spec, the maximum storage allocated for both
+-       * the variable name and variable data is 1024 bytes.
++       * A small set of old UEFI implementations reject sizes
++       * above a certain threshold, the lowest seen in the wild
++       * is 512.
+        */
+       do {
+-              variable_name_size = 1024;
++              variable_name_size = 512;
+               status = efivar_get_next_variable(&variable_name_size,
+                                                 variable_name,
+@@ -431,9 +432,13 @@ int efivar_init(int (*func)(efi_char16_t
+                       break;
+               case EFI_NOT_FOUND:
+                       break;
++              case EFI_BUFFER_TOO_SMALL:
++                      pr_warn("efivars: Variable name size exceeds maximum (%lu > 512)\n",
++                              variable_name_size);
++                      status = EFI_NOT_FOUND;
++                      break;
+               default:
+-                      printk(KERN_WARNING "efivars: get_next_variable: status=%lx\n",
+-                              status);
++                      pr_warn("efivars: get_next_variable: status=%lx\n", status);
+                       status = EFI_NOT_FOUND;
+                       break;
+               }
diff --git a/queue-6.1/fs-aio-make-io_cancel-generate-completions-again.patch b/queue-6.1/fs-aio-make-io_cancel-generate-completions-again.patch
new file mode 100644 (file)
index 0000000..18eb258
--- /dev/null
@@ -0,0 +1,85 @@
+From 54cbc058d86beca3515c994039b5c0f0a34f53dd Mon Sep 17 00:00:00 2001
+From: Bart Van Assche <bvanassche@acm.org>
+Date: Thu, 15 Feb 2024 12:47:39 -0800
+Subject: fs/aio: Make io_cancel() generate completions again
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+commit 54cbc058d86beca3515c994039b5c0f0a34f53dd upstream.
+
+The following patch accidentally removed the code for delivering
+completions for cancelled reads and writes to user space: "[PATCH 04/33]
+aio: remove retry-based AIO"
+(https://lore.kernel.org/all/1363883754-27966-5-git-send-email-koverstreet@google.com/)
+>From that patch:
+
+-      if (kiocbIsCancelled(iocb)) {
+-              ret = -EINTR;
+-              aio_complete(iocb, ret, 0);
+-              /* must not access the iocb after this */
+-              goto out;
+-      }
+
+This leads to a leak in user space of a struct iocb. Hence this patch
+that restores the code that reports to user space that a read or write
+has been cancelled successfully.
+
+Fixes: 41003a7bcfed ("aio: remove retry-based AIO")
+Cc: Christoph Hellwig <hch@lst.de>
+Cc: Avi Kivity <avi@scylladb.com>
+Cc: Sandeep Dhavale <dhavale@google.com>
+Cc: Jens Axboe <axboe@kernel.dk>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Kent Overstreet <kent.overstreet@linux.dev>
+Cc: stable@vger.kernel.org
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Link: https://lore.kernel.org/r/20240215204739.2677806-3-bvanassche@acm.org
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/aio.c |   27 +++++++++++----------------
+ 1 file changed, 11 insertions(+), 16 deletions(-)
+
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -2141,14 +2141,11 @@ COMPAT_SYSCALL_DEFINE3(io_submit, compat
+ #endif
+ /* sys_io_cancel:
+- *    Attempts to cancel an iocb previously passed to io_submit.  If
+- *    the operation is successfully cancelled, the resulting event is
+- *    copied into the memory pointed to by result without being placed
+- *    into the completion queue and 0 is returned.  May fail with
+- *    -EFAULT if any of the data structures pointed to are invalid.
+- *    May fail with -EINVAL if aio_context specified by ctx_id is
+- *    invalid.  May fail with -EAGAIN if the iocb specified was not
+- *    cancelled.  Will fail with -ENOSYS if not implemented.
++ *    Attempts to cancel an iocb previously passed to io_submit(). If the
++ *    operation is successfully cancelled 0 is returned. May fail with
++ *    -EFAULT if any of the data structures pointed to are invalid. May
++ *    fail with -EINVAL if aio_context specified by ctx_id is invalid. Will
++ *    fail with -ENOSYS if not implemented.
+  */
+ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
+               struct io_event __user *, result)
+@@ -2179,14 +2176,12 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t
+       }
+       spin_unlock_irq(&ctx->ctx_lock);
+-      if (!ret) {
+-              /*
+-               * The result argument is no longer used - the io_event is
+-               * always delivered via the ring buffer. -EINPROGRESS indicates
+-               * cancellation is progress:
+-               */
+-              ret = -EINPROGRESS;
+-      }
++      /*
++       * The result argument is no longer used - the io_event is always
++       * delivered via the ring buffer.
++       */
++      if (ret == 0 && kiocb->rw.ki_flags & IOCB_AIO_RW)
++              aio_complete_rw(&kiocb->rw, -EINTR);
+       percpu_ref_put(&ctx->users);
diff --git a/queue-6.1/pmdomain-qcom-rpmhpd-fix-enabled_corner-aggregation.patch b/queue-6.1/pmdomain-qcom-rpmhpd-fix-enabled_corner-aggregation.patch
new file mode 100644 (file)
index 0000000..3b9e7f7
--- /dev/null
@@ -0,0 +1,66 @@
+From 2a93c6cbd5a703d44c414a3c3945a87ce11430ba Mon Sep 17 00:00:00 2001
+From: Bjorn Andersson <quic_bjorande@quicinc.com>
+Date: Mon, 26 Feb 2024 17:49:57 -0800
+Subject: pmdomain: qcom: rpmhpd: Fix enabled_corner aggregation
+
+From: Bjorn Andersson <quic_bjorande@quicinc.com>
+
+commit 2a93c6cbd5a703d44c414a3c3945a87ce11430ba upstream.
+
+Commit 'e3e56c050ab6 ("soc: qcom: rpmhpd: Make power_on actually enable
+the domain")' aimed to make sure that a power-domain that is being
+enabled without any particular performance-state requested will at least
+turn the rail on, to avoid filling DeviceTree with otherwise unnecessary
+required-opps properties.
+
+But in the event that aggregation happens on a disabled power-domain, with
+an enabled peer without performance-state, both the local and peer
+corner are 0. The peer's enabled_corner is not considered, with the
+result that the underlying (shared) resource is disabled.
+
+One case where this can be observed is when the display stack keeps mmcx
+enabled (but without a particular performance-state vote) in order to
+access registers and sync_state happens in the rpmhpd driver. As mmcx_ao
+is flushed the state of the peer (mmcx) is not considered and mmcx_ao
+ends up turning off "mmcx.lvl" underneath mmcx. This has been observed
+several times, but has been painted over in DeviceTree by adding an
+explicit vote for the lowest non-disabled performance-state.
+
+Fixes: e3e56c050ab6 ("soc: qcom: rpmhpd: Make power_on actually enable the domain")
+Reported-by: Johan Hovold <johan@kernel.org>
+Closes: https://lore.kernel.org/linux-arm-msm/ZdMwZa98L23mu3u6@hovoldconsulting.com/
+Cc:  <stable@vger.kernel.org>
+Signed-off-by: Bjorn Andersson <quic_bjorande@quicinc.com>
+Reviewed-by: Konrad Dybcio <konrad.dybcio@linaro.org>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Tested-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Reviewed-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
+Reviewed-by: Stephen Boyd <swboyd@chromium.org>
+Tested-by: Johan Hovold <johan+linaro@kernel.org>
+Link: https://lore.kernel.org/r/20240226-rpmhpd-enable-corner-fix-v1-1-68c004cec48c@quicinc.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/soc/qcom/rpmhpd.c |    7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/drivers/soc/qcom/rpmhpd.c
++++ b/drivers/soc/qcom/rpmhpd.c
+@@ -492,12 +492,15 @@ static int rpmhpd_aggregate_corner(struc
+       unsigned int active_corner, sleep_corner;
+       unsigned int this_active_corner = 0, this_sleep_corner = 0;
+       unsigned int peer_active_corner = 0, peer_sleep_corner = 0;
++      unsigned int peer_enabled_corner;
+       to_active_sleep(pd, corner, &this_active_corner, &this_sleep_corner);
+-      if (peer && peer->enabled)
+-              to_active_sleep(peer, peer->corner, &peer_active_corner,
++      if (peer && peer->enabled) {
++              peer_enabled_corner = max(peer->corner, peer->enable_corner);
++              to_active_sleep(peer, peer_enabled_corner, &peer_active_corner,
+                               &peer_sleep_corner);
++      }
+       active_corner = max(this_active_corner, peer_active_corner);
index 7353f3a83bc69174f19abb7ca4c1092ef1c2d91a..8a07620ffbb03b6376d1af9153df5f7ebfff4207 100644 (file)
@@ -92,3 +92,8 @@ mmc-core-fix-emmc-initialization-with-1-bit-bus-connection.patch
 mmc-sdhci-xenon-add-timeout-for-phy-init-complete.patch
 mmc-sdhci-xenon-fix-phy-init-clock-stability.patch
 riscv-add-caller_addrx-support.patch
+efivarfs-request-at-most-512-bytes-for-variable-names.patch
+pmdomain-qcom-rpmhpd-fix-enabled_corner-aggregation.patch
+fs-aio-make-io_cancel-generate-completions-again.patch
+x86-e820-don-t-reserve-setup_rng_seed-in-e820.patch
+x86-cpu-intel-detect-tme-keyid-bits-before-setting-mtrr-mask-registers.patch
diff --git a/queue-6.1/x86-cpu-intel-detect-tme-keyid-bits-before-setting-mtrr-mask-registers.patch b/queue-6.1/x86-cpu-intel-detect-tme-keyid-bits-before-setting-mtrr-mask-registers.patch
new file mode 100644 (file)
index 0000000..16dbeac
--- /dev/null
@@ -0,0 +1,243 @@
+From 6890cb1ace350b4386c8aee1343dc3b3ddd214da Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Thu, 1 Feb 2024 00:09:02 +0100
+Subject: x86/cpu/intel: Detect TME keyid bits before setting MTRR mask registers
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 6890cb1ace350b4386c8aee1343dc3b3ddd214da upstream.
+
+MKTME repurposes the high bit of physical address to key id for encryption
+key and, even though MAXPHYADDR in CPUID[0x80000008] remains the same,
+the valid bits in the MTRR mask register are based on the reduced number
+of physical address bits.
+
+detect_tme() in arch/x86/kernel/cpu/intel.c detects TME and subtracts
+it from the total usable physical bits, but it is called too late.
+Move the call to early_init_intel() so that it is called in setup_arch(),
+before MTRRs are setup.
+
+This fixes boot on TDX-enabled systems, which until now only worked with
+"disable_mtrr_cleanup".  Without the patch, the values written to the
+MTRRs mask registers were 52-bit wide (e.g. 0x000fffff_80000800) and
+the writes failed; with the patch, the values are 46-bit wide, which
+matches the reduced MAXPHYADDR that is shown in /proc/cpuinfo.
+
+Reported-by: Zixi Chen <zixchen@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Cc:stable@vger.kernel.org
+Link: https://lore.kernel.org/all/20240131230902.1867092-3-pbonzini%40redhat.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/intel.c |  178 ++++++++++++++++++++++----------------------
+ 1 file changed, 91 insertions(+), 87 deletions(-)
+
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -216,6 +216,90 @@ int intel_cpu_collect_info(struct ucode_
+ }
+ EXPORT_SYMBOL_GPL(intel_cpu_collect_info);
++#define MSR_IA32_TME_ACTIVATE         0x982
++
++/* Helpers to access TME_ACTIVATE MSR */
++#define TME_ACTIVATE_LOCKED(x)                (x & 0x1)
++#define TME_ACTIVATE_ENABLED(x)               (x & 0x2)
++
++#define TME_ACTIVATE_POLICY(x)                ((x >> 4) & 0xf)        /* Bits 7:4 */
++#define TME_ACTIVATE_POLICY_AES_XTS_128       0
++
++#define TME_ACTIVATE_KEYID_BITS(x)    ((x >> 32) & 0xf)       /* Bits 35:32 */
++
++#define TME_ACTIVATE_CRYPTO_ALGS(x)   ((x >> 48) & 0xffff)    /* Bits 63:48 */
++#define TME_ACTIVATE_CRYPTO_AES_XTS_128       1
++
++/* Values for mktme_status (SW only construct) */
++#define MKTME_ENABLED                 0
++#define MKTME_DISABLED                        1
++#define MKTME_UNINITIALIZED           2
++static int mktme_status = MKTME_UNINITIALIZED;
++
++static void detect_tme_early(struct cpuinfo_x86 *c)
++{
++      u64 tme_activate, tme_policy, tme_crypto_algs;
++      int keyid_bits = 0, nr_keyids = 0;
++      static u64 tme_activate_cpu0 = 0;
++
++      rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
++
++      if (mktme_status != MKTME_UNINITIALIZED) {
++              if (tme_activate != tme_activate_cpu0) {
++                      /* Broken BIOS? */
++                      pr_err_once("x86/tme: configuration is inconsistent between CPUs\n");
++                      pr_err_once("x86/tme: MKTME is not usable\n");
++                      mktme_status = MKTME_DISABLED;
++
++                      /* Proceed. We may need to exclude bits from x86_phys_bits. */
++              }
++      } else {
++              tme_activate_cpu0 = tme_activate;
++      }
++
++      if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
++              pr_info_once("x86/tme: not enabled by BIOS\n");
++              mktme_status = MKTME_DISABLED;
++              return;
++      }
++
++      if (mktme_status != MKTME_UNINITIALIZED)
++              goto detect_keyid_bits;
++
++      pr_info("x86/tme: enabled by BIOS\n");
++
++      tme_policy = TME_ACTIVATE_POLICY(tme_activate);
++      if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128)
++              pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy);
++
++      tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate);
++      if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) {
++              pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n",
++                              tme_crypto_algs);
++              mktme_status = MKTME_DISABLED;
++      }
++detect_keyid_bits:
++      keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
++      nr_keyids = (1UL << keyid_bits) - 1;
++      if (nr_keyids) {
++              pr_info_once("x86/mktme: enabled by BIOS\n");
++              pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids);
++      } else {
++              pr_info_once("x86/mktme: disabled by BIOS\n");
++      }
++
++      if (mktme_status == MKTME_UNINITIALIZED) {
++              /* MKTME is usable */
++              mktme_status = MKTME_ENABLED;
++      }
++
++      /*
++       * KeyID bits effectively lower the number of physical address
++       * bits.  Update cpuinfo_x86::x86_phys_bits accordingly.
++       */
++      c->x86_phys_bits -= keyid_bits;
++}
++
+ static void early_init_intel(struct cpuinfo_x86 *c)
+ {
+       u64 misc_enable;
+@@ -367,6 +451,13 @@ static void early_init_intel(struct cpui
+        */
+       if (detect_extended_topology_early(c) < 0)
+               detect_ht_early(c);
++
++      /*
++       * Adjust the number of physical bits early because it affects the
++       * valid bits of the MTRR mask registers.
++       */
++      if (cpu_has(c, X86_FEATURE_TME))
++              detect_tme_early(c);
+ }
+ static void bsp_init_intel(struct cpuinfo_x86 *c)
+@@ -527,90 +618,6 @@ static void srat_detect_node(struct cpui
+ #endif
+ }
+-#define MSR_IA32_TME_ACTIVATE         0x982
+-
+-/* Helpers to access TME_ACTIVATE MSR */
+-#define TME_ACTIVATE_LOCKED(x)                (x & 0x1)
+-#define TME_ACTIVATE_ENABLED(x)               (x & 0x2)
+-
+-#define TME_ACTIVATE_POLICY(x)                ((x >> 4) & 0xf)        /* Bits 7:4 */
+-#define TME_ACTIVATE_POLICY_AES_XTS_128       0
+-
+-#define TME_ACTIVATE_KEYID_BITS(x)    ((x >> 32) & 0xf)       /* Bits 35:32 */
+-
+-#define TME_ACTIVATE_CRYPTO_ALGS(x)   ((x >> 48) & 0xffff)    /* Bits 63:48 */
+-#define TME_ACTIVATE_CRYPTO_AES_XTS_128       1
+-
+-/* Values for mktme_status (SW only construct) */
+-#define MKTME_ENABLED                 0
+-#define MKTME_DISABLED                        1
+-#define MKTME_UNINITIALIZED           2
+-static int mktme_status = MKTME_UNINITIALIZED;
+-
+-static void detect_tme(struct cpuinfo_x86 *c)
+-{
+-      u64 tme_activate, tme_policy, tme_crypto_algs;
+-      int keyid_bits = 0, nr_keyids = 0;
+-      static u64 tme_activate_cpu0 = 0;
+-
+-      rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
+-
+-      if (mktme_status != MKTME_UNINITIALIZED) {
+-              if (tme_activate != tme_activate_cpu0) {
+-                      /* Broken BIOS? */
+-                      pr_err_once("x86/tme: configuration is inconsistent between CPUs\n");
+-                      pr_err_once("x86/tme: MKTME is not usable\n");
+-                      mktme_status = MKTME_DISABLED;
+-
+-                      /* Proceed. We may need to exclude bits from x86_phys_bits. */
+-              }
+-      } else {
+-              tme_activate_cpu0 = tme_activate;
+-      }
+-
+-      if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
+-              pr_info_once("x86/tme: not enabled by BIOS\n");
+-              mktme_status = MKTME_DISABLED;
+-              return;
+-      }
+-
+-      if (mktme_status != MKTME_UNINITIALIZED)
+-              goto detect_keyid_bits;
+-
+-      pr_info("x86/tme: enabled by BIOS\n");
+-
+-      tme_policy = TME_ACTIVATE_POLICY(tme_activate);
+-      if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128)
+-              pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy);
+-
+-      tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate);
+-      if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) {
+-              pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n",
+-                              tme_crypto_algs);
+-              mktme_status = MKTME_DISABLED;
+-      }
+-detect_keyid_bits:
+-      keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
+-      nr_keyids = (1UL << keyid_bits) - 1;
+-      if (nr_keyids) {
+-              pr_info_once("x86/mktme: enabled by BIOS\n");
+-              pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids);
+-      } else {
+-              pr_info_once("x86/mktme: disabled by BIOS\n");
+-      }
+-
+-      if (mktme_status == MKTME_UNINITIALIZED) {
+-              /* MKTME is usable */
+-              mktme_status = MKTME_ENABLED;
+-      }
+-
+-      /*
+-       * KeyID bits effectively lower the number of physical address
+-       * bits.  Update cpuinfo_x86::x86_phys_bits accordingly.
+-       */
+-      c->x86_phys_bits -= keyid_bits;
+-}
+-
+ static void init_cpuid_fault(struct cpuinfo_x86 *c)
+ {
+       u64 msr;
+@@ -747,9 +754,6 @@ static void init_intel(struct cpuinfo_x8
+       init_ia32_feat_ctl(c);
+-      if (cpu_has(c, X86_FEATURE_TME))
+-              detect_tme(c);
+-
+       init_intel_misc_features(c);
+       split_lock_init();
diff --git a/queue-6.1/x86-e820-don-t-reserve-setup_rng_seed-in-e820.patch b/queue-6.1/x86-e820-don-t-reserve-setup_rng_seed-in-e820.patch
new file mode 100644 (file)
index 0000000..57f8c69
--- /dev/null
@@ -0,0 +1,56 @@
+From 7fd817c906503b6813ea3b41f5fdf4192449a707 Mon Sep 17 00:00:00 2001
+From: Jiri Bohac <jbohac@suse.cz>
+Date: Wed, 31 Jan 2024 01:04:28 +0100
+Subject: x86/e820: Don't reserve SETUP_RNG_SEED in e820
+
+From: Jiri Bohac <jbohac@suse.cz>
+
+commit 7fd817c906503b6813ea3b41f5fdf4192449a707 upstream.
+
+SETUP_RNG_SEED in setup_data is supplied by kexec and should
+not be reserved in the e820 map.
+
+Doing so reserves 16 bytes of RAM when booting with kexec.
+(16 bytes because data->len is zeroed by parse_setup_data so only
+sizeof(setup_data) is reserved.)
+
+When kexec is used repeatedly, each boot adds two entries in the
+kexec-provided e820 map as the 16-byte range splits a larger
+range of usable memory. Eventually all of the 128 available entries
+get used up. The next split will result in losing usable memory
+as the new entries cannot be added to the e820 map.
+
+Fixes: 68b8e9713c8e ("x86/setup: Use rng seeds from setup_data")
+Signed-off-by: Jiri Bohac <jbohac@suse.cz>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: <stable@kernel.org>
+Link: https://lore.kernel.org/r/ZbmOjKnARGiaYBd5@dwarf.suse.cz
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/e820.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
+index fb8cf953380d..b66f540de054 100644
+--- a/arch/x86/kernel/e820.c
++++ b/arch/x86/kernel/e820.c
+@@ -1017,10 +1017,12 @@ void __init e820__reserve_setup_data(void)
+               e820__range_update(pa_data, sizeof(*data)+data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
+               /*
+-               * SETUP_EFI and SETUP_IMA are supplied by kexec and do not need
+-               * to be reserved.
++               * SETUP_EFI, SETUP_IMA and SETUP_RNG_SEED are supplied by
++               * kexec and do not need to be reserved.
+                */
+-              if (data->type != SETUP_EFI && data->type != SETUP_IMA)
++              if (data->type != SETUP_EFI &&
++                  data->type != SETUP_IMA &&
++                  data->type != SETUP_RNG_SEED)
+                       e820__range_update_kexec(pa_data,
+                                                sizeof(*data) + data->len,
+                                                E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
+-- 
+2.44.0
+