--- /dev/null
+From stable+bounces-2888-greg=kroah.com@vger.kernel.org Tue Nov 28 11:57:47 2023
+From: Zenghui Yu <yuzenghui@huawei.com>
+Date: Tue, 28 Nov 2023 19:57:24 +0800
+Subject: arm64: cpufeature: Extract capped perfmon fields
+To: <stable@vger.kernel.org>, <gregkh@linuxfoundation.org>, <sashal@kernel.org>
+Cc: <linux-arm-kernel@lists.infradead.org>, <kvmarm@lists.linux.dev>, <andrew.murray@arm.com>, <mark.rutland@arm.com>, <suzuki.poulose@arm.com>, <wanghaibin.wang@huawei.com>, <will@kernel.org>, Zenghui Yu <yuzenghui@huawei.com>
+Message-ID: <20231128115725.964-2-yuzenghui@huawei.com>
+
+From: Andrew Murray <andrew.murray@arm.com>
+
+commit 8e35aa642ee4dab01b16cc4b2df59d1936f3b3c2 upstream.
+
+When emulating ID registers there is often a need to cap the version
+bits of a feature such that the guest will not use features that the
+host is not aware of. For example, when KVM mediates access to the PMU
+by emulating register accesses.
+
+Let's add a helper that extracts a performance monitors ID field and
+caps the version to a given value.
+
+Fields that identify the version of the Performance Monitors Extension
+do not follow the standard ID scheme, and instead follow the scheme
+described in ARM DDI 0487E.a page D13-2825 "Alternative ID scheme used
+for the Performance Monitors Extension version". The value 0xF means an
+IMPLEMENTATION DEFINED PMU is present, and values 0x0-OxE can be treated
+the same as an unsigned field with 0x0 meaning no PMU is present.
+
+Signed-off-by: Andrew Murray <andrew.murray@arm.com>
+Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+[Mark: rework to handle perfmon fields]
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Zenghui Yu <yuzenghui@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/cpufeature.h | 23 +++++++++++++++++++++++
+ 1 file changed, 23 insertions(+)
+
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -449,6 +449,29 @@ cpuid_feature_extract_unsigned_field(u64
+ return cpuid_feature_extract_unsigned_field_width(features, field, 4);
+ }
+
++/*
++ * Fields that identify the version of the Performance Monitors Extension do
++ * not follow the standard ID scheme. See ARM DDI 0487E.a page D13-2825,
++ * "Alternative ID scheme used for the Performance Monitors Extension version".
++ */
++static inline u64 __attribute_const__
++cpuid_feature_cap_perfmon_field(u64 features, int field, u64 cap)
++{
++ u64 val = cpuid_feature_extract_unsigned_field(features, field);
++ u64 mask = GENMASK_ULL(field + 3, field);
++
++ /* Treat IMPLEMENTATION DEFINED functionality as unimplemented */
++ if (val == 0xf)
++ val = 0;
++
++ if (val > cap) {
++ features &= ~mask;
++ features |= (cap << field) & mask;
++ }
++
++ return features;
++}
++
+ static inline u64 arm64_ftr_mask(const struct arm64_ftr_bits *ftrp)
+ {
+ return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift);
--- /dev/null
+From stable+bounces-2889-greg=kroah.com@vger.kernel.org Tue Nov 28 11:57:52 2023
+From: Zenghui Yu <yuzenghui@huawei.com>
+Date: Tue, 28 Nov 2023 19:57:25 +0800
+Subject: KVM: arm64: limit PMU version to PMUv3 for ARMv8.1
+To: <stable@vger.kernel.org>, <gregkh@linuxfoundation.org>, <sashal@kernel.org>
+Cc: <linux-arm-kernel@lists.infradead.org>, <kvmarm@lists.linux.dev>, <andrew.murray@arm.com>, <mark.rutland@arm.com>, <suzuki.poulose@arm.com>, <wanghaibin.wang@huawei.com>, <will@kernel.org>, Zenghui Yu <yuzenghui@huawei.com>
+Message-ID: <20231128115725.964-3-yuzenghui@huawei.com>
+
+From: Andrew Murray <andrew.murray@arm.com>
+
+commit c854188ea01062f5a5fd7f05658feb1863774eaa upstream.
+
+We currently expose the PMU version of the host to the guest via
+emulation of the DFR0_EL1 and AA64DFR0_EL1 debug feature registers.
+However many of the features offered beyond PMUv3 for 8.1 are not
+supported in KVM. Examples of this include support for the PMMIR
+registers (added in PMUv3 for ARMv8.4) and 64-bit event counters
+added in (PMUv3 for ARMv8.5).
+
+Let's trap the Debug Feature Registers in order to limit
+PMUVer/PerfMon in the Debug Feature Registers to PMUv3 for ARMv8.1
+to avoid unexpected behaviour.
+
+Both ID_AA64DFR0.PMUVer and ID_DFR0.PerfMon follow the "Alternative ID
+scheme used for the Performance Monitors Extension version" where 0xF
+means an IMPLEMENTATION DEFINED PMU is implemented, and values 0x0-0xE
+are treated as with an unsigned field (with 0x0 meaning no PMU is
+present). As we don't expect to expose an IMPLEMENTATION DEFINED PMU,
+and our cap is below 0xF, we can treat these fields as unsigned when
+applying the cap.
+
+Signed-off-by: Andrew Murray <andrew.murray@arm.com>
+Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+[Mark: make field names consistent, use perfmon cap]
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Zenghui Yu <yuzenghui@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/sysreg.h | 6 ++++++
+ arch/arm64/kvm/sys_regs.c | 10 ++++++++++
+ 2 files changed, 16 insertions(+)
+
+--- a/arch/arm64/include/asm/sysreg.h
++++ b/arch/arm64/include/asm/sysreg.h
+@@ -697,6 +697,12 @@
+ #define ID_AA64DFR0_TRACEVER_SHIFT 4
+ #define ID_AA64DFR0_DEBUGVER_SHIFT 0
+
++#define ID_AA64DFR0_PMUVER_8_1 0x4
++
++#define ID_DFR0_PERFMON_SHIFT 24
++
++#define ID_DFR0_PERFMON_8_1 0x4
++
+ #define ID_ISAR5_RDM_SHIFT 24
+ #define ID_ISAR5_CRC32_SHIFT 16
+ #define ID_ISAR5_SHA2_SHIFT 12
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -1089,6 +1089,16 @@ static u64 read_id_reg(const struct kvm_
+ (0xfUL << ID_AA64ISAR1_API_SHIFT) |
+ (0xfUL << ID_AA64ISAR1_GPA_SHIFT) |
+ (0xfUL << ID_AA64ISAR1_GPI_SHIFT));
++ } else if (id == SYS_ID_AA64DFR0_EL1) {
++ /* Limit guests to PMUv3 for ARMv8.1 */
++ val = cpuid_feature_cap_perfmon_field(val,
++ ID_AA64DFR0_PMUVER_SHIFT,
++ ID_AA64DFR0_PMUVER_8_1);
++ } else if (id == SYS_ID_DFR0_EL1) {
++ /* Limit guests to PMUv3 for ARMv8.1 */
++ val = cpuid_feature_cap_perfmon_field(val,
++ ID_DFR0_PERFMON_SHIFT,
++ ID_DFR0_PERFMON_8_1);
+ }
+
+ return val;
ext4-using-nofail-preallocation-in-ext4_es_insert_ex.patch
ext4-fix-slab-use-after-free-in-ext4_es_insert_exten.patch
ext4-make-sure-allocate-pending-entry-not-fail.patch
+tracing-kprobes-return-eaddrnotavail-when-func-matches-several-symbols.patch
+arm64-cpufeature-extract-capped-perfmon-fields.patch
+kvm-arm64-limit-pmu-version-to-pmuv3-for-armv8.1.patch
--- /dev/null
+From b022f0c7e404887a7c5229788fc99eff9f9a80d5 Mon Sep 17 00:00:00 2001
+From: Francis Laniel <flaniel@linux.microsoft.com>
+Date: Fri, 20 Oct 2023 13:42:49 +0300
+Subject: tracing/kprobes: Return EADDRNOTAVAIL when func matches several symbols
+
+From: Francis Laniel <flaniel@linux.microsoft.com>
+
+commit b022f0c7e404887a7c5229788fc99eff9f9a80d5 upstream.
+
+When a kprobe is attached to a function that's name is not unique (is
+static and shares the name with other functions in the kernel), the
+kprobe is attached to the first function it finds. This is a bug as the
+function that it is attaching to is not necessarily the one that the
+user wants to attach to.
+
+Instead of blindly picking a function to attach to what is ambiguous,
+error with EADDRNOTAVAIL to let the user know that this function is not
+unique, and that the user must use another unique function with an
+address offset to get to the function they want to attach to.
+
+Link: https://lore.kernel.org/all/20231020104250.9537-2-flaniel@linux.microsoft.com/
+
+Cc: stable@vger.kernel.org
+Fixes: 413d37d1eb69 ("tracing: Add kprobe-based event tracer")
+Suggested-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Francis Laniel <flaniel@linux.microsoft.com>
+Link: https://lore.kernel.org/lkml/20230819101105.b0c104ae4494a7d1f2eea742@kernel.org/
+Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace_kprobe.c | 74 ++++++++++++++++++++++++++++++++++++++++++++
+ kernel/trace/trace_probe.h | 1
+ 2 files changed, 75 insertions(+)
+
+--- a/kernel/trace/trace_kprobe.c
++++ b/kernel/trace/trace_kprobe.c
+@@ -714,6 +714,36 @@ static inline void sanitize_event_name(c
+ *name = '_';
+ }
+
++struct count_symbols_struct {
++ const char *func_name;
++ unsigned int count;
++};
++
++static int count_symbols(void *data, const char *name, struct module *unused0,
++ unsigned long unused1)
++{
++ struct count_symbols_struct *args = data;
++
++ if (strcmp(args->func_name, name))
++ return 0;
++
++ args->count++;
++
++ return 0;
++}
++
++static unsigned int number_of_same_symbols(char *func_name)
++{
++ struct count_symbols_struct args = {
++ .func_name = func_name,
++ .count = 0,
++ };
++
++ kallsyms_on_each_symbol(count_symbols, &args);
++
++ return args.count;
++}
++
+ static int trace_kprobe_create(int argc, const char *argv[])
+ {
+ /*
+@@ -825,6 +855,31 @@ static int trace_kprobe_create(int argc,
+ }
+ }
+
++ if (symbol && !strchr(symbol, ':')) {
++ unsigned int count;
++
++ count = number_of_same_symbols(symbol);
++ if (count > 1) {
++ /*
++ * Users should use ADDR to remove the ambiguity of
++ * using KSYM only.
++ */
++ trace_probe_log_err(0, NON_UNIQ_SYMBOL);
++ ret = -EADDRNOTAVAIL;
++
++ goto error;
++ } else if (count == 0) {
++ /*
++ * We can return ENOENT earlier than when register the
++ * kprobe.
++ */
++ trace_probe_log_err(0, BAD_PROBE_ADDR);
++ ret = -ENOENT;
++
++ goto error;
++ }
++ }
++
+ trace_probe_log_set_index(0);
+ if (event) {
+ ret = traceprobe_parse_event_name(&event, &group, buf,
+@@ -1596,6 +1651,7 @@ static int unregister_kprobe_event(struc
+ }
+
+ #ifdef CONFIG_PERF_EVENTS
++
+ /* create a trace_kprobe, but don't add it to global lists */
+ struct trace_event_call *
+ create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
+@@ -1605,6 +1661,24 @@ create_local_trace_kprobe(char *func, vo
+ int ret;
+ char *event;
+
++ if (func) {
++ unsigned int count;
++
++ count = number_of_same_symbols(func);
++ if (count > 1)
++ /*
++ * Users should use addr to remove the ambiguity of
++ * using func only.
++ */
++ return ERR_PTR(-EADDRNOTAVAIL);
++ else if (count == 0)
++ /*
++ * We can return ENOENT earlier than when register the
++ * kprobe.
++ */
++ return ERR_PTR(-ENOENT);
++ }
++
+ /*
+ * local trace_kprobes are not added to dyn_event, so they are never
+ * searched in find_trace_kprobe(). Therefore, there is no concern of
+--- a/kernel/trace/trace_probe.h
++++ b/kernel/trace/trace_probe.h
+@@ -403,6 +403,7 @@ extern int traceprobe_define_arg_fields(
+ C(BAD_MAXACT, "Invalid maxactive number"), \
+ C(MAXACT_TOO_BIG, "Maxactive is too big"), \
+ C(BAD_PROBE_ADDR, "Invalid probed address or symbol"), \
++ C(NON_UNIQ_SYMBOL, "The symbol is not unique"), \
+ C(BAD_RETPROBE, "Retprobe address must be an function entry"), \
+ C(NO_GROUP_NAME, "Group name is not specified"), \
+ C(GROUP_TOO_LONG, "Group name is too long"), \