--- /dev/null
+From d51847acb018d83186e4af67bc93f9a00a8644f7 Mon Sep 17 00:00:00 2001
+From: Doug Smythies <dsmythies@telus.net>
+Date: Sun, 20 Aug 2023 13:46:49 -0700
+Subject: cpufreq: intel_pstate: set stale CPU frequency to minimum
+
+From: Doug Smythies <dsmythies@telus.net>
+
+commit d51847acb018d83186e4af67bc93f9a00a8644f7 upstream.
+
+The intel_pstate CPU frequency scaling driver does not
+use policy->cur and it is 0.
+When the CPU frequency is outdated arch_freq_get_on_cpu()
+will default to the nominal clock frequency when its call to
+cpufreq_quick_getpolicy_cur returns the never updated 0.
+Thus, the listed frequency might be outside of currently
+set limits. Some users are complaining about the high
+reported frequency, albeit stale, when their system is
+idle and/or it is above the reduced maximum they have set.
+
+This patch will maintain policy_cur for the intel_pstate
+driver at the current minimum CPU frequency.
+
+Reported-by: Yang Jie <yang.jie@linux.intel.com>
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=217597
+Signed-off-by: Doug Smythies <dsmythies@telus.net>
+[ rjw: White space damage fixes and comment adjustment ]
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Keyon Jie <yang.jie@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/cpufreq/intel_pstate.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -2570,6 +2570,11 @@ static int intel_pstate_set_policy(struc
+ intel_pstate_clear_update_util_hook(policy->cpu);
+ intel_pstate_hwp_set(policy->cpu);
+ }
++ /*
++ * policy->cur is never updated with the intel_pstate driver, but it
++ * is used as a stale frequency value. So, keep it within limits.
++ */
++ policy->cur = policy->min;
+
+ mutex_unlock(&intel_pstate_limits_lock);
+
--- /dev/null
+From 106397376c0369fcc01c58dd189ff925a2724a57 Mon Sep 17 00:00:00 2001
+From: David Jeffery <djeffery@redhat.com>
+Date: Fri, 21 Jul 2023 17:57:15 +0800
+Subject: sbitmap: fix batching wakeup
+
+From: David Jeffery <djeffery@redhat.com>
+
+commit 106397376c0369fcc01c58dd189ff925a2724a57 upstream.
+
+Current code supposes that it is enough to provide forward progress by
+just waking up one wait queue after one completion batch is done.
+
+Unfortunately this way isn't enough, cause waiter can be added to wait
+queue just after it is woken up.
+
+Follows one example(64 depth, wake_batch is 8)
+
+1) all 64 tags are active
+
+2) in each wait queue, there is only one single waiter
+
+3) each time one completion batch(8 completions) wakes up just one
+ waiter in each wait queue, then immediately one new sleeper is added
+ to this wait queue
+
+4) after 64 completions, 8 waiters are wakeup, and there are still 8
+ waiters in each wait queue
+
+5) after another 8 active tags are completed, only one waiter can be
+ wakeup, and the other 7 can't be waken up anymore.
+
+Turns out it isn't easy to fix this problem, so simply wakeup enough
+waiters for single batch.
+
+Cc: Kemeng Shi <shikemeng@huaweicloud.com>
+Cc: Chengming Zhou <zhouchengming@bytedance.com>
+Cc: Jan Kara <jack@suse.cz>
+Signed-off-by: David Jeffery <djeffery@redhat.com>
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Reviewed-by: Gabriel Krisman Bertazi <krisman@suse.de>
+Reviewed-by: Keith Busch <kbusch@kernel.org>
+Link: https://lore.kernel.org/r/20230721095715.232728-1-ming.lei@redhat.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/sbitmap.c | 15 +++++++--------
+ 1 file changed, 7 insertions(+), 8 deletions(-)
+
+--- a/lib/sbitmap.c
++++ b/lib/sbitmap.c
+@@ -550,7 +550,7 @@ EXPORT_SYMBOL_GPL(sbitmap_queue_min_shal
+
+ static void __sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr)
+ {
+- int i, wake_index;
++ int i, wake_index, woken;
+
+ if (!atomic_read(&sbq->ws_active))
+ return;
+@@ -567,13 +567,12 @@ static void __sbitmap_queue_wake_up(stru
+ */
+ wake_index = sbq_index_inc(wake_index);
+
+- /*
+- * It is sufficient to wake up at least one waiter to
+- * guarantee forward progress.
+- */
+- if (waitqueue_active(&ws->wait) &&
+- wake_up_nr(&ws->wait, nr))
+- break;
++ if (waitqueue_active(&ws->wait)) {
++ woken = wake_up_nr(&ws->wait, nr);
++ if (woken == nr)
++ break;
++ nr -= woken;
++ }
+ }
+
+ if (wake_index != atomic_read(&sbq->wake_index))
net-sfp-handle-100g-25g-active-optical-cables-in-sfp.patch
tracing-introduce-pipe_cpumask-to-avoid-race-on-trac.patch
platform-mellanox-fix-mlxbf-tmfifo-not-handling-all-.patch
+sbitmap-fix-batching-wakeup.patch
+cpufreq-intel_pstate-set-stale-cpu-frequency-to-minimum.patch
+tpm-enable-hwrng-only-for-pluton-on-amd-cpus.patch
--- /dev/null
+From 8f7f35e5aa6f2182eabcfa3abef4d898a48e9aa8 Mon Sep 17 00:00:00 2001
+From: Jarkko Sakkinen <jarkko@kernel.org>
+Date: Mon, 4 Sep 2023 21:12:10 +0300
+Subject: tpm: Enable hwrng only for Pluton on AMD CPUs
+
+From: Jarkko Sakkinen <jarkko@kernel.org>
+
+commit 8f7f35e5aa6f2182eabcfa3abef4d898a48e9aa8 upstream.
+
+The vendor check introduced by commit 554b841d4703 ("tpm: Disable RNG for
+all AMD fTPMs") doesn't work properly on a number of Intel fTPMs. On the
+reported systems the TPM doesn't reply at bootup and returns back the
+command code. This makes the TPM fail probe on Lenovo Legion Y540 laptop.
+
+Since only Microsoft Pluton is the only known combination of AMD CPU and
+fTPM from other vendor, disable hwrng otherwise. In order to make sysadmin
+aware of this, print also info message to the klog.
+
+Cc: stable@vger.kernel.org
+Fixes: 554b841d4703 ("tpm: Disable RNG for all AMD fTPMs")
+Reported-by: Todd Brandt <todd.e.brandt@intel.com>
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=217804
+Reported-by: Patrick Steinhardt <ps@pks.im>
+Reported-by: Raymond Jay Golo <rjgolo@gmail.com>
+Reported-by: Ronan Pigott <ronan@rjp.ie>
+Reviewed-by: Jerry Snitselaar <jsnitsel@redhat.com>
+Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
+Cc: Thorsten Leemhuis <regressions@leemhuis.info>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/char/tpm/tpm_crb.c | 33 ++++++++-------------------------
+ 1 file changed, 8 insertions(+), 25 deletions(-)
+
+--- a/drivers/char/tpm/tpm_crb.c
++++ b/drivers/char/tpm/tpm_crb.c
+@@ -463,28 +463,6 @@ static bool crb_req_canceled(struct tpm_
+ return (cancel & CRB_CANCEL_INVOKE) == CRB_CANCEL_INVOKE;
+ }
+
+-static int crb_check_flags(struct tpm_chip *chip)
+-{
+- u32 val;
+- int ret;
+-
+- ret = crb_request_locality(chip, 0);
+- if (ret)
+- return ret;
+-
+- ret = tpm2_get_tpm_pt(chip, TPM2_PT_MANUFACTURER, &val, NULL);
+- if (ret)
+- goto release;
+-
+- if (val == 0x414D4400U /* AMD */)
+- chip->flags |= TPM_CHIP_FLAG_HWRNG_DISABLED;
+-
+-release:
+- crb_relinquish_locality(chip, 0);
+-
+- return ret;
+-}
+-
+ static const struct tpm_class_ops tpm_crb = {
+ .flags = TPM_OPS_AUTO_STARTUP,
+ .status = crb_status,
+@@ -826,9 +804,14 @@ static int crb_acpi_add(struct acpi_devi
+ if (rc)
+ goto out;
+
+- rc = crb_check_flags(chip);
+- if (rc)
+- goto out;
++#ifdef CONFIG_X86
++ /* A quirk for https://www.amd.com/en/support/kb/faq/pa-410 */
++ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
++ priv->sm != ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) {
++ dev_info(dev, "Disabling hwrng\n");
++ chip->flags |= TPM_CHIP_FLAG_HWRNG_DISABLED;
++ }
++#endif /* CONFIG_X86 */
+
+ rc = tpm_chip_register(chip);
+