--- /dev/null
+From 38a8c4d1d45006841f0643f4cb29b5e50758837c Mon Sep 17 00:00:00 2001
+From: Keith Busch <kbusch@kernel.org>
+Date: Fri, 31 Mar 2023 11:00:56 -0700
+Subject: blk-mq: directly poll requests
+
+From: Keith Busch <kbusch@kernel.org>
+
+commit 38a8c4d1d45006841f0643f4cb29b5e50758837c upstream.
+
+Polling needs a bio with a valid bi_bdev, but neither of those are
+guaranteed for polled driver requests. Make request based polling
+directly use blk-mq's polling function instead.
+
+When executing a request from a polled hctx, we know the request's
+cookie, and that it's from a live blk-mq queue that supports polling, so
+we can safely skip everything that bio_poll provides.
+
+Cc: stable@kernel.org
+Reported-by: Martin Belanger <Martin.Belanger@dell.com>
+Reported-by: Daniel Wagner <dwagner@suse.de>
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+Tested-by: Daniel Wagner <dwagner@suse.de>
+Revieded-by: Daniel Wagner <dwagner@suse.de>
+Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
+Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Tested-by: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
+Link: https://lore.kernel.org/r/20230331180056.1155862-1-kbusch@meta.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/blk-mq.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -1327,8 +1327,6 @@ bool blk_rq_is_poll(struct request *rq)
+ return false;
+ if (rq->mq_hctx->type != HCTX_TYPE_POLL)
+ return false;
+- if (WARN_ON_ONCE(!rq->bio))
+- return false;
+ return true;
+ }
+ EXPORT_SYMBOL_GPL(blk_rq_is_poll);
+@@ -1336,7 +1334,7 @@ EXPORT_SYMBOL_GPL(blk_rq_is_poll);
+ static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
+ {
+ do {
+- bio_poll(rq->bio, NULL, 0);
++ blk_mq_poll(rq->q, blk_rq_to_qc(rq), NULL, 0);
+ cond_resched();
+ } while (!completion_done(wait));
+ }
--- /dev/null
+From bf84937e882009075f57fd213836256fc65d96bc Mon Sep 17 00:00:00 2001
+From: Steve Clevenger <scclevenger@os.amperecomputing.com>
+Date: Mon, 27 Feb 2023 16:54:32 -0700
+Subject: coresight-etm4: Fix for() loop drvdata->nr_addr_cmp range bug
+
+From: Steve Clevenger <scclevenger@os.amperecomputing.com>
+
+commit bf84937e882009075f57fd213836256fc65d96bc upstream.
+
+In etm4_enable_hw, fix for() loop range to represent address comparator pairs.
+
+Fixes: 2e1cdfe184b5 ("coresight-etm4x: Adding CoreSight ETM4x driver")
+Cc: stable@vger.kernel.org
+Signed-off-by: Steve Clevenger <scclevenger@os.amperecomputing.com>
+Reviewed-by: James Clark <james.clark@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Link: https://lore.kernel.org/r/4a4ee61ce8ef402615a4528b21a051de3444fb7b.1677540079.git.scclevenger@os.amperecomputing.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/hwtracing/coresight/coresight-etm4x-core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
++++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
+@@ -451,7 +451,7 @@ static int etm4_enable_hw(struct etmv4_d
+ if (etm4x_sspcicrn_present(drvdata, i))
+ etm4x_relaxed_write32(csa, config->ss_pe_cmp[i], TRCSSPCICRn(i));
+ }
+- for (i = 0; i < drvdata->nr_addr_cmp; i++) {
++ for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
+ etm4x_relaxed_write64(csa, config->addr_val[i], TRCACVRn(i));
+ etm4x_relaxed_write64(csa, config->addr_acc[i], TRCACATRn(i));
+ }
--- /dev/null
+From 735e7b30a53a1679c050cddb73f5e5316105d2e3 Mon Sep 17 00:00:00 2001
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+Date: Tue, 21 Mar 2023 10:45:30 +0000
+Subject: coresight: etm4x: Do not access TRCIDR1 for identification
+
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+
+commit 735e7b30a53a1679c050cddb73f5e5316105d2e3 upstream.
+
+CoreSight ETM4x architecture clearly provides ways to identify a device
+via registers in the "Management" class, TRCDEVARCH and TRCDEVTYPE. These
+registers can be accessed without the Trace domain being powered on.
+We additionally added TRCIDR1 as fallback in order to cover for any
+ETMs that may not have implemented TRCDEVARCH. So far, nobody has
+reported hitting a WARNING we placed to catch such systems.
+
+Also, more importantly it is problematic to access TRCIDR1, which is a
+"Trace" register via MMIO access, without clearing the OSLK. But we cannot
+mess with the OSLK until we know for sure that this is an ETMv4 device.
+Thus, this kind of creates a chicken and egg problem unnecessarily for
+systems "which are compliant" to the ETMv4 architecture.
+
+Let us remove the TRCIDR1 fall back check and rely only on TRCDEVARCH.
+
+Fixes: 8b94db1edaee ("coresight: etm4x: Use TRCDEVARCH for component discovery")
+Cc: stable@vger.kernel.org
+Reported-by: Steve Clevenger <scclevenger@os.amperecomputing.com>
+Link: https://lore.kernel.org/all/143540e5623d4c7393d24833f2b80600d8d745d2.1677881753.git.scclevenger@os.amperecomputing.com/
+Cc: Mike Leach <mike.leach@linaro.org>
+Cc: James Clark <james.clark@arm.com>
+Reviewed-by: Mike Leach <mike.leach@linaro.org>
+Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Link: https://lore.kernel.org/r/20230321104530.1547136-1-suzuki.poulose@arm.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/hwtracing/coresight/coresight-etm4x-core.c | 22 ++++++++-------------
+ drivers/hwtracing/coresight/coresight-etm4x.h | 20 +++++--------------
+ 2 files changed, 15 insertions(+), 27 deletions(-)
+
+--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
++++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
+@@ -1010,25 +1010,21 @@ static bool etm4_init_iomem_access(struc
+ struct csdev_access *csa)
+ {
+ u32 devarch = readl_relaxed(drvdata->base + TRCDEVARCH);
+- u32 idr1 = readl_relaxed(drvdata->base + TRCIDR1);
+
+ /*
+ * All ETMs must implement TRCDEVARCH to indicate that
+- * the component is an ETMv4. To support any broken
+- * implementations we fall back to TRCIDR1 check, which
+- * is not really reliable.
++ * the component is an ETMv4. Even though TRCIDR1 also
++ * contains the information, it is part of the "Trace"
++ * register and must be accessed with the OSLK cleared,
++ * with MMIO. But we cannot touch the OSLK until we are
++ * sure this is an ETM. So rely only on the TRCDEVARCH.
+ */
+- if ((devarch & ETM_DEVARCH_ID_MASK) == ETM_DEVARCH_ETMv4x_ARCH) {
+- drvdata->arch = etm_devarch_to_arch(devarch);
+- } else {
+- pr_warn("CPU%d: ETM4x incompatible TRCDEVARCH: %x, falling back to TRCIDR1\n",
+- smp_processor_id(), devarch);
+-
+- if (ETM_TRCIDR1_ARCH_MAJOR(idr1) != ETM_TRCIDR1_ARCH_ETMv4)
+- return false;
+- drvdata->arch = etm_trcidr_to_arch(idr1);
++ if ((devarch & ETM_DEVARCH_ID_MASK) != ETM_DEVARCH_ETMv4x_ARCH) {
++ pr_warn_once("TRCDEVARCH doesn't match ETMv4 architecture\n");
++ return false;
+ }
+
++ drvdata->arch = etm_devarch_to_arch(devarch);
+ *csa = CSDEV_ACCESS_IOMEM(drvdata->base);
+ return true;
+ }
+--- a/drivers/hwtracing/coresight/coresight-etm4x.h
++++ b/drivers/hwtracing/coresight/coresight-etm4x.h
+@@ -753,14 +753,12 @@
+ * TRCDEVARCH - CoreSight architected register
+ * - Bits[15:12] - Major version
+ * - Bits[19:16] - Minor version
+- * TRCIDR1 - ETM architected register
+- * - Bits[11:8] - Major version
+- * - Bits[7:4] - Minor version
+- * We must rely on TRCDEVARCH for the version information,
+- * however we don't want to break the support for potential
+- * old implementations which might not implement it. Thus
+- * we fall back to TRCIDR1 if TRCDEVARCH is not implemented
+- * for memory mapped components.
++ *
++ * We must rely only on TRCDEVARCH for the version information. Even though,
++ * TRCIDR1 also provides the architecture version, it is a "Trace" register
++ * and as such must be accessed only with Trace power domain ON. This may
++ * not be available at probe time.
++ *
+ * Now to make certain decisions easier based on the version
+ * we use an internal representation of the version in the
+ * driver, as follows :
+@@ -786,12 +784,6 @@ static inline u8 etm_devarch_to_arch(u32
+ ETM_DEVARCH_REVISION(devarch));
+ }
+
+-static inline u8 etm_trcidr_to_arch(u32 trcidr1)
+-{
+- return ETM_ARCH_VERSION(ETM_TRCIDR1_ARCH_MAJOR(trcidr1),
+- ETM_TRCIDR1_ARCH_MINOR(trcidr1));
+-}
+-
+ enum etm_impdef_type {
+ ETM4_IMPDEF_HISI_CORE_COMMIT,
+ ETM4_IMPDEF_FEATURE_MAX,
--- /dev/null
+From 4aa3b75c74603c3374877d5fd18ad9cc3a9a62ed Mon Sep 17 00:00:00 2001
+From: William Breathitt Gray <william.gray@linaro.org>
+Date: Sun, 12 Mar 2023 19:15:49 -0400
+Subject: counter: 104-quad-8: Fix race condition between FLAG and CNTR reads
+
+From: William Breathitt Gray <william.gray@linaro.org>
+
+commit 4aa3b75c74603c3374877d5fd18ad9cc3a9a62ed upstream.
+
+The Counter (CNTR) register is 24 bits wide, but we can have an
+effective 25-bit count value by setting bit 24 to the XOR of the Borrow
+flag and Carry flag. The flags can be read from the FLAG register, but a
+race condition exists: the Borrow flag and Carry flag are instantaneous
+and could change by the time the count value is read from the CNTR
+register.
+
+Since the race condition could result in an incorrect 25-bit count
+value, remove support for 25-bit count values from this driver;
+hard-coded maximum count values are replaced by a LS7267_CNTR_MAX define
+for consistency and clarity.
+
+Fixes: 28e5d3bb0325 ("iio: 104-quad-8: Add IIO support for the ACCES 104-QUAD-8")
+Cc: <stable@vger.kernel.org> # 6.1.x
+Cc: <stable@vger.kernel.org> # 6.2.x
+Link: https://lore.kernel.org/r/20230312231554.134858-1-william.gray@linaro.org/
+Signed-off-by: William Breathitt Gray <william.gray@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/counter/104-quad-8.c | 29 ++++++++---------------------
+ 1 file changed, 8 insertions(+), 21 deletions(-)
+
+--- a/drivers/counter/104-quad-8.c
++++ b/drivers/counter/104-quad-8.c
+@@ -97,10 +97,6 @@ struct quad8 {
+ struct quad8_reg __iomem *reg;
+ };
+
+-/* Borrow Toggle flip-flop */
+-#define QUAD8_FLAG_BT BIT(0)
+-/* Carry Toggle flip-flop */
+-#define QUAD8_FLAG_CT BIT(1)
+ /* Error flag */
+ #define QUAD8_FLAG_E BIT(4)
+ /* Up/Down flag */
+@@ -133,6 +129,9 @@ struct quad8 {
+ #define QUAD8_CMR_QUADRATURE_X2 0x10
+ #define QUAD8_CMR_QUADRATURE_X4 0x18
+
++/* Each Counter is 24 bits wide */
++#define LS7267_CNTR_MAX GENMASK(23, 0)
++
+ static int quad8_signal_read(struct counter_device *counter,
+ struct counter_signal *signal,
+ enum counter_signal_level *level)
+@@ -156,18 +155,10 @@ static int quad8_count_read(struct count
+ {
+ struct quad8 *const priv = counter_priv(counter);
+ struct channel_reg __iomem *const chan = priv->reg->channel + count->id;
+- unsigned int flags;
+- unsigned int borrow;
+- unsigned int carry;
+ unsigned long irqflags;
+ int i;
+
+- flags = ioread8(&chan->control);
+- borrow = flags & QUAD8_FLAG_BT;
+- carry = !!(flags & QUAD8_FLAG_CT);
+-
+- /* Borrow XOR Carry effectively doubles count range */
+- *val = (unsigned long)(borrow ^ carry) << 24;
++ *val = 0;
+
+ spin_lock_irqsave(&priv->lock, irqflags);
+
+@@ -191,8 +182,7 @@ static int quad8_count_write(struct coun
+ unsigned long irqflags;
+ int i;
+
+- /* Only 24-bit values are supported */
+- if (val > 0xFFFFFF)
++ if (val > LS7267_CNTR_MAX)
+ return -ERANGE;
+
+ spin_lock_irqsave(&priv->lock, irqflags);
+@@ -806,8 +796,7 @@ static int quad8_count_preset_write(stru
+ struct quad8 *const priv = counter_priv(counter);
+ unsigned long irqflags;
+
+- /* Only 24-bit values are supported */
+- if (preset > 0xFFFFFF)
++ if (preset > LS7267_CNTR_MAX)
+ return -ERANGE;
+
+ spin_lock_irqsave(&priv->lock, irqflags);
+@@ -834,8 +823,7 @@ static int quad8_count_ceiling_read(stru
+ *ceiling = priv->preset[count->id];
+ break;
+ default:
+- /* By default 0x1FFFFFF (25 bits unsigned) is maximum count */
+- *ceiling = 0x1FFFFFF;
++ *ceiling = LS7267_CNTR_MAX;
+ break;
+ }
+
+@@ -850,8 +838,7 @@ static int quad8_count_ceiling_write(str
+ struct quad8 *const priv = counter_priv(counter);
+ unsigned long irqflags;
+
+- /* Only 24-bit values are supported */
+- if (ceiling > 0xFFFFFF)
++ if (ceiling > LS7267_CNTR_MAX)
+ return -ERANGE;
+
+ spin_lock_irqsave(&priv->lock, irqflags);
--- /dev/null
+From 00f4bc5184c19cb33f468f1ea409d70d19f8f502 Mon Sep 17 00:00:00 2001
+From: William Breathitt Gray <william.gray@linaro.org>
+Date: Thu, 16 Mar 2023 16:34:26 -0400
+Subject: counter: 104-quad-8: Fix Synapse action reported for Index signals
+
+From: William Breathitt Gray <william.gray@linaro.org>
+
+commit 00f4bc5184c19cb33f468f1ea409d70d19f8f502 upstream.
+
+Signal 16 and higher represent the device's Index lines. The
+priv->preset_enable array holds the device configuration for these Index
+lines. The preset_enable configuration is active low on the device, so
+invert the conditional check in quad8_action_read() to properly handle
+the logical state of preset_enable.
+
+Fixes: f1d8a071d45b ("counter: 104-quad-8: Add Generic Counter interface support")
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20230316203426.224745-1-william.gray@linaro.org/
+Signed-off-by: William Breathitt Gray <william.gray@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/counter/104-quad-8.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/counter/104-quad-8.c
++++ b/drivers/counter/104-quad-8.c
+@@ -368,7 +368,7 @@ static int quad8_action_read(struct coun
+
+ /* Handle Index signals */
+ if (synapse->signal->id >= 16) {
+- if (priv->preset_enable[count->id])
++ if (!priv->preset_enable[count->id])
+ *action = COUNTER_SYNAPSE_ACTION_RISING_EDGE;
+ else
+ *action = COUNTER_SYNAPSE_ACTION_NONE;
--- /dev/null
+From 80962ec912db56d323883154efc2297473e692cb Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Wed, 22 Mar 2023 07:33:00 -0700
+Subject: KVM: nVMX: Do not report error code when synthesizing VM-Exit from Real Mode
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit 80962ec912db56d323883154efc2297473e692cb upstream.
+
+Don't report an error code to L1 when synthesizing a nested VM-Exit and
+L2 is in Real Mode. Per Intel's SDM, regarding the error code valid bit:
+
+ This bit is always 0 if the VM exit occurred while the logical processor
+ was in real-address mode (CR0.PE=0).
+
+The bug was introduced by a recent fix for AMD's Paged Real Mode, which
+moved the error code suppression from the common "queue exception" path
+to the "inject exception" path, but missed VMX's "synthesize VM-Exit"
+path.
+
+Fixes: b97f07458373 ("KVM: x86: determine if an exception has an error code only when injecting it.")
+Cc: stable@vger.kernel.org
+Cc: Maxim Levitsky <mlevitsk@redhat.com>
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20230322143300.2209476-3-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx/nested.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -3845,7 +3845,12 @@ static void nested_vmx_inject_exception_
+ exit_qual = 0;
+ }
+
+- if (ex->has_error_code) {
++ /*
++ * Unlike AMD's Paged Real Mode, which reports an error code on #PF
++ * VM-Exits even if the CPU is in Real Mode, Intel VMX never sets the
++ * "has error code" flags on VM-Exit if the CPU is in Real Mode.
++ */
++ if (ex->has_error_code && is_protmode(vcpu)) {
+ /*
+ * Intel CPUs do not generate error codes with bits 31:16 set,
+ * and more importantly VMX disallows setting bits 31:16 in the
--- /dev/null
+From 6c41468c7c12d74843bb414fc00307ea8a6318c3 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Wed, 22 Mar 2023 07:32:59 -0700
+Subject: KVM: x86: Clear "has_error_code", not "error_code", for RM exception injection
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit 6c41468c7c12d74843bb414fc00307ea8a6318c3 upstream.
+
+When injecting an exception into a vCPU in Real Mode, suppress the error
+code by clearing the flag that tracks whether the error code is valid, not
+by clearing the error code itself. The "typo" was introduced by recent
+fix for SVM's funky Paged Real Mode.
+
+Opportunistically hoist the logic above the tracepoint so that the trace
+is coherent with respect to what is actually injected (this was also the
+behavior prior to the buggy commit).
+
+Fixes: b97f07458373 ("KVM: x86: determine if an exception has an error code only when injecting it.")
+Cc: stable@vger.kernel.org
+Cc: Maxim Levitsky <mlevitsk@redhat.com>
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20230322143300.2209476-2-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/x86.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -9853,13 +9853,20 @@ int kvm_check_nested_events(struct kvm_v
+
+ static void kvm_inject_exception(struct kvm_vcpu *vcpu)
+ {
++ /*
++ * Suppress the error code if the vCPU is in Real Mode, as Real Mode
++ * exceptions don't report error codes. The presence of an error code
++ * is carried with the exception and only stripped when the exception
++ * is injected as intercepted #PF VM-Exits for AMD's Paged Real Mode do
++ * report an error code despite the CPU being in Real Mode.
++ */
++ vcpu->arch.exception.has_error_code &= is_protmode(vcpu);
++
+ trace_kvm_inj_exception(vcpu->arch.exception.vector,
+ vcpu->arch.exception.has_error_code,
+ vcpu->arch.exception.error_code,
+ vcpu->arch.exception.injected);
+
+- if (vcpu->arch.exception.error_code && !is_protmode(vcpu))
+- vcpu->arch.exception.error_code = false;
+ static_call(kvm_x86_inject_exception)(vcpu);
+ }
+
--- /dev/null
+From 1f2803b2660f4b04d48d065072c0ae0c9ca255fd Mon Sep 17 00:00:00 2001
+From: Muchun Song <songmuchun@bytedance.com>
+Date: Thu, 23 Mar 2023 10:50:03 +0800
+Subject: mm: kfence: fix handling discontiguous page
+
+From: Muchun Song <songmuchun@bytedance.com>
+
+commit 1f2803b2660f4b04d48d065072c0ae0c9ca255fd upstream.
+
+The struct pages could be discontiguous when the kfence pool is allocated
+via alloc_contig_pages() with CONFIG_SPARSEMEM and
+!CONFIG_SPARSEMEM_VMEMMAP.
+
+This may result in setting PG_slab and memcg_data to a arbitrary
+address (may be not used as a struct page), which in the worst case
+might corrupt the kernel.
+
+So the iteration should use nth_page().
+
+Link: https://lkml.kernel.org/r/20230323025003.94447-1-songmuchun@bytedance.com
+Fixes: 0ce20dd84089 ("mm: add Kernel Electric-Fence infrastructure")
+Signed-off-by: Muchun Song <songmuchun@bytedance.com>
+Reviewed-by: Marco Elver <elver@google.com>
+Reviewed-by: Kefeng Wang <wangkefeng.wang@huawei.com>
+Cc: Alexander Potapenko <glider@google.com>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Cc: Jann Horn <jannh@google.com>
+Cc: SeongJae Park <sjpark@amazon.de>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/kfence/core.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/mm/kfence/core.c
++++ b/mm/kfence/core.c
+@@ -557,7 +557,7 @@ static unsigned long kfence_init_pool(vo
+ * enters __slab_free() slow-path.
+ */
+ for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
+- struct slab *slab = page_slab(&pages[i]);
++ struct slab *slab = page_slab(nth_page(pages, i));
+
+ if (!i || (i % 2))
+ continue;
+@@ -603,7 +603,7 @@ static unsigned long kfence_init_pool(vo
+
+ reset_slab:
+ for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
+- struct slab *slab = page_slab(&pages[i]);
++ struct slab *slab = page_slab(nth_page(pages, i));
+
+ if (!i || (i % 2))
+ continue;
--- /dev/null
+From 3ee2d7471fa4963a2ced0a84f0653ce88b43c5b2 Mon Sep 17 00:00:00 2001
+From: Muchun Song <songmuchun@bytedance.com>
+Date: Mon, 20 Mar 2023 11:00:59 +0800
+Subject: mm: kfence: fix PG_slab and memcg_data clearing
+
+From: Muchun Song <songmuchun@bytedance.com>
+
+commit 3ee2d7471fa4963a2ced0a84f0653ce88b43c5b2 upstream.
+
+It does not reset PG_slab and memcg_data when KFENCE fails to initialize
+kfence pool at runtime. It is reporting a "Bad page state" message when
+kfence pool is freed to buddy. The checking of whether it is a compound
+head page seems unnecessary since we already guarantee this when
+allocating kfence pool. Remove the check to simplify the code.
+
+Link: https://lkml.kernel.org/r/20230320030059.20189-1-songmuchun@bytedance.com
+Fixes: 0ce20dd84089 ("mm: add Kernel Electric-Fence infrastructure")
+Signed-off-by: Muchun Song <songmuchun@bytedance.com>
+Cc: Alexander Potapenko <glider@google.com>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Cc: Jann Horn <jannh@google.com>
+Cc: Marco Elver <elver@google.com>
+Cc: Roman Gushchin <roman.gushchin@linux.dev>
+Cc: SeongJae Park <sjpark@amazon.de>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/kfence/core.c | 30 +++++++++++++++---------------
+ 1 file changed, 15 insertions(+), 15 deletions(-)
+
+--- a/mm/kfence/core.c
++++ b/mm/kfence/core.c
+@@ -562,10 +562,6 @@ static unsigned long kfence_init_pool(vo
+ if (!i || (i % 2))
+ continue;
+
+- /* Verify we do not have a compound head page. */
+- if (WARN_ON(compound_head(&pages[i]) != &pages[i]))
+- return addr;
+-
+ __folio_set_slab(slab_folio(slab));
+ #ifdef CONFIG_MEMCG
+ slab->memcg_data = (unsigned long)&kfence_metadata[i / 2 - 1].objcg |
+@@ -598,12 +594,26 @@ static unsigned long kfence_init_pool(vo
+
+ /* Protect the right redzone. */
+ if (unlikely(!kfence_protect(addr + PAGE_SIZE)))
+- return addr;
++ goto reset_slab;
+
+ addr += 2 * PAGE_SIZE;
+ }
+
+ return 0;
++
++reset_slab:
++ for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
++ struct slab *slab = page_slab(&pages[i]);
++
++ if (!i || (i % 2))
++ continue;
++#ifdef CONFIG_MEMCG
++ slab->memcg_data = 0;
++#endif
++ __folio_clear_slab(slab_folio(slab));
++ }
++
++ return addr;
+ }
+
+ static bool __init kfence_init_pool_early(void)
+@@ -633,16 +643,6 @@ static bool __init kfence_init_pool_earl
+ * fails for the first page, and therefore expect addr==__kfence_pool in
+ * most failure cases.
+ */
+- for (char *p = (char *)addr; p < __kfence_pool + KFENCE_POOL_SIZE; p += PAGE_SIZE) {
+- struct slab *slab = virt_to_slab(p);
+-
+- if (!slab)
+- continue;
+-#ifdef CONFIG_MEMCG
+- slab->memcg_data = 0;
+-#endif
+- __folio_clear_slab(slab_folio(slab));
+- }
+ memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
+ __kfence_pool = NULL;
+ return false;
ksmbd-fix-slab-out-of-bounds-in-init_smb2_rsp_hdr.patch
alsa-hda-realtek-add-quirk-for-clevo-x370snw.patch
alsa-hda-realtek-fix-mute-micmute-leds-for-a-hp-probook.patch
+x86-acpi-boot-correct-acpi_is_processor_usable-check.patch
+x86-acpi-boot-use-fadt-version-to-check-support-for-online-capable.patch
+kvm-x86-clear-has_error_code-not-error_code-for-rm-exception-injection.patch
+kvm-nvmx-do-not-report-error-code-when-synthesizing-vm-exit-from-real-mode.patch
+mm-kfence-fix-pg_slab-and-memcg_data-clearing.patch
+mm-kfence-fix-handling-discontiguous-page.patch
+coresight-etm4x-do-not-access-trcidr1-for-identification.patch
+coresight-etm4-fix-for-loop-drvdata-nr_addr_cmp-range-bug.patch
+counter-104-quad-8-fix-race-condition-between-flag-and-cntr-reads.patch
+counter-104-quad-8-fix-synapse-action-reported-for-index-signals.patch
+blk-mq-directly-poll-requests.patch
iio-adc-ad7791-fix-irq-flags.patch
io_uring-fix-return-value-when-removing-provided-buf.patch
io_uring-fix-memory-leak-when-removing-provided-buff.patch
--- /dev/null
+From fed8d8773b8ea68ad99d9eee8c8343bef9da2c2c Mon Sep 17 00:00:00 2001
+From: Eric DeVolder <eric.devolder@oracle.com>
+Date: Mon, 27 Mar 2023 15:10:26 -0400
+Subject: x86/acpi/boot: Correct acpi_is_processor_usable() check
+
+From: Eric DeVolder <eric.devolder@oracle.com>
+
+commit fed8d8773b8ea68ad99d9eee8c8343bef9da2c2c upstream.
+
+The logic in acpi_is_processor_usable() requires the online capable
+bit be set for hotpluggable CPUs. The online capable bit has been
+introduced in ACPI 6.3.
+
+However, for ACPI revisions < 6.3 which do not support that bit, CPUs
+should be reported as usable, not the other way around.
+
+Reverse the check.
+
+ [ bp: Rewrite commit message. ]
+
+Fixes: e2869bd7af60 ("x86/acpi/boot: Do not register processors that cannot be onlined for x2APIC")
+Suggested-by: Miguel Luis <miguel.luis@oracle.com>
+Suggested-by: Boris Ostrovsky <boris.ovstrosky@oracle.com>
+Signed-off-by: Eric DeVolder <eric.devolder@oracle.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Tested-by: David R <david@unsolicited.net>
+Cc: <stable@kernel.org>
+Link: https://lore.kernel.org/r/20230327191026.3454-2-eric.devolder@oracle.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/acpi/boot.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/acpi/boot.c
++++ b/arch/x86/kernel/acpi/boot.c
+@@ -193,7 +193,8 @@ static bool __init acpi_is_processor_usa
+ if (lapic_flags & ACPI_MADT_ENABLED)
+ return true;
+
+- if (acpi_support_online_capable && (lapic_flags & ACPI_MADT_ONLINE_CAPABLE))
++ if (!acpi_support_online_capable ||
++ (lapic_flags & ACPI_MADT_ONLINE_CAPABLE))
+ return true;
+
+ return false;
--- /dev/null
+From a74fabfbd1b7013045afc8cc541e6cab3360ccb5 Mon Sep 17 00:00:00 2001
+From: Mario Limonciello <mario.limonciello@amd.com>
+Date: Wed, 29 Mar 2023 12:45:35 -0500
+Subject: x86/ACPI/boot: Use FADT version to check support for online capable
+
+From: Mario Limonciello <mario.limonciello@amd.com>
+
+commit a74fabfbd1b7013045afc8cc541e6cab3360ccb5 upstream.
+
+ACPI 6.3 introduced the online capable bit, and also introduced MADT
+version 5.
+
+Latter was used to distinguish whether the offset storing online capable
+could be used. However ACPI 6.2b has MADT version "45" which is for
+an errata version of the ACPI 6.2 spec. This means that the Linux code
+for detecting availability of MADT will mistakenly flag ACPI 6.2b as
+supporting online capable which is inaccurate as it's an ACPI 6.3 feature.
+
+Instead use the FADT major and minor revision fields to distinguish this.
+
+ [ bp: Massage. ]
+
+Fixes: aa06e20f1be6 ("x86/ACPI: Don't add CPUs that are not online capable")
+Reported-by: Eric DeVolder <eric.devolder@oracle.com>
+Reported-by: Borislav Petkov <bp@alien8.de>
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Cc: <stable@kernel.org>
+Link: https://lore.kernel.org/r/943d2445-84df-d939-f578-5d8240d342cc@unsolicited.net
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/acpi/boot.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/acpi/boot.c
++++ b/arch/x86/kernel/acpi/boot.c
+@@ -146,7 +146,11 @@ static int __init acpi_parse_madt(struct
+
+ pr_debug("Local APIC address 0x%08x\n", madt->address);
+ }
+- if (madt->header.revision >= 5)
++
++ /* ACPI 6.3 and newer support the online capable bit. */
++ if (acpi_gbl_FADT.header.revision > 6 ||
++ (acpi_gbl_FADT.header.revision == 6 &&
++ acpi_gbl_FADT.minor_revision >= 3))
+ acpi_support_online_capable = true;
+
+ default_acpi_madt_oem_check(madt->header.oem_id,