]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.19-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 30 Nov 2023 13:42:28 +0000 (13:42 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 30 Nov 2023 13:42:28 +0000 (13:42 +0000)
added patches:
arm64-cpufeature-extract-capped-perfmon-fields.patch
kvm-arm64-limit-pmu-version-to-pmuv3-for-armv8.1.patch
mtd-rawnand-brcmnand-fix-ecc-chunk-calculation-for-erased-page-bitfips.patch
tracing-kprobes-return-eaddrnotavail-when-func-matches-several-symbols.patch

queue-4.19/arm64-cpufeature-extract-capped-perfmon-fields.patch [new file with mode: 0644]
queue-4.19/kvm-arm64-limit-pmu-version-to-pmuv3-for-armv8.1.patch [new file with mode: 0644]
queue-4.19/mtd-rawnand-brcmnand-fix-ecc-chunk-calculation-for-erased-page-bitfips.patch [new file with mode: 0644]
queue-4.19/series
queue-4.19/tracing-kprobes-return-eaddrnotavail-when-func-matches-several-symbols.patch [new file with mode: 0644]

diff --git a/queue-4.19/arm64-cpufeature-extract-capped-perfmon-fields.patch b/queue-4.19/arm64-cpufeature-extract-capped-perfmon-fields.patch
new file mode 100644 (file)
index 0000000..390b82f
--- /dev/null
@@ -0,0 +1,70 @@
+From stable+bounces-2863-greg=kroah.com@vger.kernel.org Tue Nov 28 07:47:05 2023
+From: Zenghui Yu <yuzenghui@huawei.com>
+Date: Tue, 28 Nov 2023 15:46:32 +0800
+Subject: arm64: cpufeature: Extract capped perfmon fields
+To: <stable@vger.kernel.org>, <gregkh@linuxfoundation.org>, <sashal@kernel.org>
+Cc: <linux-arm-kernel@lists.infradead.org>, <kvmarm@lists.linux.dev>, <andrew.murray@arm.com>, <mark.rutland@arm.com>, <suzuki.poulose@arm.com>, <wanghaibin.wang@huawei.com>, <will@kernel.org>, Zenghui Yu <yuzenghui@huawei.com>
+Message-ID: <20231128074633.646-2-yuzenghui@huawei.com>
+
+From: Andrew Murray <andrew.murray@arm.com>
+
+commit 8e35aa642ee4dab01b16cc4b2df59d1936f3b3c2 upstream.
+
+When emulating ID registers there is often a need to cap the version
+bits of a feature such that the guest will not use features that the
+host is not aware of. For example, when KVM mediates access to the PMU
+by emulating register accesses.
+
+Let's add a helper that extracts a performance monitors ID field and
+caps the version to a given value.
+
+Fields that identify the version of the Performance Monitors Extension
+do not follow the standard ID scheme, and instead follow the scheme
+described in ARM DDI 0487E.a page D13-2825 "Alternative ID scheme used
+for the Performance Monitors Extension version". The value 0xF means an
+IMPLEMENTATION DEFINED PMU is present, and values 0x0-OxE can be treated
+the same as an unsigned field with 0x0 meaning no PMU is present.
+
+Signed-off-by: Andrew Murray <andrew.murray@arm.com>
+Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+[Mark: rework to handle perfmon fields]
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Zenghui Yu <yuzenghui@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/cpufeature.h |   23 +++++++++++++++++++++++
+ 1 file changed, 23 insertions(+)
+
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -422,6 +422,29 @@ cpuid_feature_extract_unsigned_field(u64
+       return cpuid_feature_extract_unsigned_field_width(features, field, 4);
+ }
++/*
++ * Fields that identify the version of the Performance Monitors Extension do
++ * not follow the standard ID scheme. See ARM DDI 0487E.a page D13-2825,
++ * "Alternative ID scheme used for the Performance Monitors Extension version".
++ */
++static inline u64 __attribute_const__
++cpuid_feature_cap_perfmon_field(u64 features, int field, u64 cap)
++{
++      u64 val = cpuid_feature_extract_unsigned_field(features, field);
++      u64 mask = GENMASK_ULL(field + 3, field);
++
++      /* Treat IMPLEMENTATION DEFINED functionality as unimplemented */
++      if (val == 0xf)
++              val = 0;
++
++      if (val > cap) {
++              features &= ~mask;
++              features |= (cap << field) & mask;
++      }
++
++      return features;
++}
++
+ static inline u64 arm64_ftr_mask(const struct arm64_ftr_bits *ftrp)
+ {
+       return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift);
diff --git a/queue-4.19/kvm-arm64-limit-pmu-version-to-pmuv3-for-armv8.1.patch b/queue-4.19/kvm-arm64-limit-pmu-version-to-pmuv3-for-armv8.1.patch
new file mode 100644 (file)
index 0000000..f832923
--- /dev/null
@@ -0,0 +1,78 @@
+From stable+bounces-2864-greg=kroah.com@vger.kernel.org Tue Nov 28 07:47:10 2023
+From: Zenghui Yu <yuzenghui@huawei.com>
+Date: Tue, 28 Nov 2023 15:46:33 +0800
+Subject: KVM: arm64: limit PMU version to PMUv3 for ARMv8.1
+To: <stable@vger.kernel.org>, <gregkh@linuxfoundation.org>, <sashal@kernel.org>
+Cc: <linux-arm-kernel@lists.infradead.org>, <kvmarm@lists.linux.dev>, <andrew.murray@arm.com>, <mark.rutland@arm.com>, <suzuki.poulose@arm.com>, <wanghaibin.wang@huawei.com>, <will@kernel.org>, Zenghui Yu <yuzenghui@huawei.com>
+Message-ID: <20231128074633.646-3-yuzenghui@huawei.com>
+
+From: Andrew Murray <andrew.murray@arm.com>
+
+commit c854188ea01062f5a5fd7f05658feb1863774eaa upstream.
+
+We currently expose the PMU version of the host to the guest via
+emulation of the DFR0_EL1 and AA64DFR0_EL1 debug feature registers.
+However many of the features offered beyond PMUv3 for 8.1 are not
+supported in KVM. Examples of this include support for the PMMIR
+registers (added in PMUv3 for ARMv8.4) and 64-bit event counters
+added in (PMUv3 for ARMv8.5).
+
+Let's trap the Debug Feature Registers in order to limit
+PMUVer/PerfMon in the Debug Feature Registers to PMUv3 for ARMv8.1
+to avoid unexpected behaviour.
+
+Both ID_AA64DFR0.PMUVer and ID_DFR0.PerfMon follow the "Alternative ID
+scheme used for the Performance Monitors Extension version" where 0xF
+means an IMPLEMENTATION DEFINED PMU is implemented, and values 0x0-0xE
+are treated as with an unsigned field (with 0x0 meaning no PMU is
+present). As we don't expect to expose an IMPLEMENTATION DEFINED PMU,
+and our cap is below 0xF, we can treat these fields as unsigned when
+applying the cap.
+
+Signed-off-by: Andrew Murray <andrew.murray@arm.com>
+Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+[Mark: make field names consistent, use perfmon cap]
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Will Deacon <will@kernel.org>
+[yuzenghui@huawei.com: adjust the context in read_id_reg()]
+Signed-off-by: Zenghui Yu <yuzenghui@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/sysreg.h |    6 ++++++
+ arch/arm64/kvm/sys_regs.c       |   10 ++++++++++
+ 2 files changed, 16 insertions(+)
+
+--- a/arch/arm64/include/asm/sysreg.h
++++ b/arch/arm64/include/asm/sysreg.h
+@@ -622,6 +622,12 @@
+ #define ID_AA64DFR0_TRACEVER_SHIFT    4
+ #define ID_AA64DFR0_DEBUGVER_SHIFT    0
++#define ID_AA64DFR0_PMUVER_8_1                0x4
++
++#define ID_DFR0_PERFMON_SHIFT         24
++
++#define ID_DFR0_PERFMON_8_1           0x4
++
+ #define ID_ISAR5_RDM_SHIFT            24
+ #define ID_ISAR5_CRC32_SHIFT          16
+ #define ID_ISAR5_SHA2_SHIFT           12
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -1049,6 +1049,16 @@ static u64 read_id_reg(struct sys_reg_de
+                       kvm_debug("LORegions unsupported for guests, suppressing\n");
+               val &= ~(0xfUL << ID_AA64MMFR1_LOR_SHIFT);
++      } else if (id == SYS_ID_AA64DFR0_EL1) {
++              /* Limit guests to PMUv3 for ARMv8.1 */
++              val = cpuid_feature_cap_perfmon_field(val,
++                                              ID_AA64DFR0_PMUVER_SHIFT,
++                                              ID_AA64DFR0_PMUVER_8_1);
++      } else if (id == SYS_ID_DFR0_EL1) {
++              /* Limit guests to PMUv3 for ARMv8.1 */
++              val = cpuid_feature_cap_perfmon_field(val,
++                                              ID_DFR0_PERFMON_SHIFT,
++                                              ID_DFR0_PERFMON_8_1);
+       }
+       return val;
diff --git a/queue-4.19/mtd-rawnand-brcmnand-fix-ecc-chunk-calculation-for-erased-page-bitfips.patch b/queue-4.19/mtd-rawnand-brcmnand-fix-ecc-chunk-calculation-for-erased-page-bitfips.patch
new file mode 100644 (file)
index 0000000..ebb2255
--- /dev/null
@@ -0,0 +1,44 @@
+From 7f852cc1579297fd763789f8cd370639d0c654b6 Mon Sep 17 00:00:00 2001
+From: Claire Lin <claire.lin@broadcom.com>
+Date: Mon, 26 Aug 2019 15:57:56 -0400
+Subject: mtd: rawnand: brcmnand: Fix ecc chunk calculation for erased page bitfips
+
+From: Claire Lin <claire.lin@broadcom.com>
+
+commit 7f852cc1579297fd763789f8cd370639d0c654b6 upstream.
+
+In brcmstb_nand_verify_erased_page(), the ECC chunk pointer calculation
+while correcting erased page bitflips is wrong, fix it.
+
+Fixes: 02b88eea9f9c ("mtd: brcmnand: Add check for erased page bitflips")
+Signed-off-by: Claire Lin <claire.lin@broadcom.com>
+Reviewed-by: Ray Jui <ray.jui@broadcom.com>
+Signed-off-by: Kamal Dasu <kdasu.kdev@gmail.com>
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Signed-off-by: Yuta Hayama <hayama@lineo.co.jp>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mtd/nand/raw/brcmnand/brcmnand.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
++++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+@@ -1753,6 +1753,7 @@ static int brcmstb_nand_verify_erased_pa
+       int bitflips = 0;
+       int page = addr >> chip->page_shift;
+       int ret;
++      void *ecc_chunk;
+       if (!buf) {
+               buf = chip->data_buf;
+@@ -1768,7 +1769,9 @@ static int brcmstb_nand_verify_erased_pa
+               return ret;
+       for (i = 0; i < chip->ecc.steps; i++, oob += sas) {
+-              ret = nand_check_erased_ecc_chunk(buf, chip->ecc.size,
++              ecc_chunk = buf + chip->ecc.size * i;
++              ret = nand_check_erased_ecc_chunk(ecc_chunk,
++                                                chip->ecc.size,
+                                                 oob, sas, NULL, 0,
+                                                 chip->ecc.strength);
+               if (ret < 0)
index 518062d95aca9284ab1729fbde40afbda5561a5e..64a7cb71e7c725d980f9d92f853aa5e9b1ec3dbf 100644 (file)
@@ -12,3 +12,7 @@ amd-xgbe-handle-the-corner-case-during-tx-completion.patch
 amd-xgbe-propagate-the-correct-speed-and-duplex-stat.patch
 net-axienet-fix-check-for-partial-tx-checksum.patch
 mips-kvm-fix-a-build-warning-about-variable-set-but-.patch
+tracing-kprobes-return-eaddrnotavail-when-func-matches-several-symbols.patch
+arm64-cpufeature-extract-capped-perfmon-fields.patch
+kvm-arm64-limit-pmu-version-to-pmuv3-for-armv8.1.patch
+mtd-rawnand-brcmnand-fix-ecc-chunk-calculation-for-erased-page-bitfips.patch
diff --git a/queue-4.19/tracing-kprobes-return-eaddrnotavail-when-func-matches-several-symbols.patch b/queue-4.19/tracing-kprobes-return-eaddrnotavail-when-func-matches-several-symbols.patch
new file mode 100644 (file)
index 0000000..c28089d
--- /dev/null
@@ -0,0 +1,98 @@
+From b022f0c7e404887a7c5229788fc99eff9f9a80d5 Mon Sep 17 00:00:00 2001
+From: Francis Laniel <flaniel@linux.microsoft.com>
+Date: Fri, 20 Oct 2023 13:42:49 +0300
+Subject: tracing/kprobes: Return EADDRNOTAVAIL when func matches several symbols
+
+From: Francis Laniel <flaniel@linux.microsoft.com>
+
+commit b022f0c7e404887a7c5229788fc99eff9f9a80d5 upstream.
+
+When a kprobe is attached to a function that's name is not unique (is
+static and shares the name with other functions in the kernel), the
+kprobe is attached to the first function it finds. This is a bug as the
+function that it is attaching to is not necessarily the one that the
+user wants to attach to.
+
+Instead of blindly picking a function to attach to what is ambiguous,
+error with EADDRNOTAVAIL to let the user know that this function is not
+unique, and that the user must use another unique function with an
+address offset to get to the function they want to attach to.
+
+Link: https://lore.kernel.org/all/20231020104250.9537-2-flaniel@linux.microsoft.com/
+
+Cc: stable@vger.kernel.org
+Fixes: 413d37d1eb69 ("tracing: Add kprobe-based event tracer")
+Suggested-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Francis Laniel <flaniel@linux.microsoft.com>
+Link: https://lore.kernel.org/lkml/20230819101105.b0c104ae4494a7d1f2eea742@kernel.org/
+Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace_kprobe.c |   48 ++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 48 insertions(+)
+
+--- a/kernel/trace/trace_kprobe.c
++++ b/kernel/trace/trace_kprobe.c
+@@ -715,6 +715,36 @@ static inline void sanitize_event_name(c
+                       *name = '_';
+ }
++struct count_symbols_struct {
++      const char *func_name;
++      unsigned int count;
++};
++
++static int count_symbols(void *data, const char *name, struct module *unused0,
++                       unsigned long unused1)
++{
++      struct count_symbols_struct *args = data;
++
++      if (strcmp(args->func_name, name))
++              return 0;
++
++      args->count++;
++
++      return 0;
++}
++
++static unsigned int number_of_same_symbols(char *func_name)
++{
++      struct count_symbols_struct args = {
++              .func_name = func_name,
++              .count = 0,
++      };
++
++      kallsyms_on_each_symbol(count_symbols, &args);
++
++      return args.count;
++}
++
+ static int create_trace_kprobe(int argc, char **argv)
+ {
+       /*
+@@ -845,6 +875,24 @@ static int create_trace_kprobe(int argc,
+       }
+       argc -= 2; argv += 2;
++      if (symbol && !strchr(symbol, ':')) {
++              unsigned int count;
++
++              count = number_of_same_symbols(symbol);
++              if (count > 1)
++                      /*
++                       * Users should use ADDR to remove the ambiguity of
++                       * using KSYM only.
++                       */
++                      return -EADDRNOTAVAIL;
++              else if (count == 0)
++                      /*
++                       * We can return ENOENT earlier than when register the
++                       * kprobe.
++                       */
++                      return -ENOENT;
++      }
++
+       /* setup a probe */
+       if (!event) {
+               /* Make a new event name */