--- /dev/null
+From e9bd9c498cb0f5843996dbe5cbce7a1836a83c70 Mon Sep 17 00:00:00 2001
+From: Eduard Zingerman <eddyz87@gmail.com>
+Date: Tue, 24 Sep 2024 14:08:43 -0700
+Subject: bpf: sync_linked_regs() must preserve subreg_def
+
+From: Eduard Zingerman <eddyz87@gmail.com>
+
+commit e9bd9c498cb0f5843996dbe5cbce7a1836a83c70 upstream.
+
+Range propagation must not affect subreg_def marks, otherwise the
+following example is rewritten by verifier incorrectly when
+BPF_F_TEST_RND_HI32 flag is set:
+
+ 0: call bpf_ktime_get_ns call bpf_ktime_get_ns
+ 1: r0 &= 0x7fffffff after verifier r0 &= 0x7fffffff
+ 2: w1 = w0 rewrites w1 = w0
+ 3: if w0 < 10 goto +0 --------------> r11 = 0x2f5674a6 (r)
+ 4: r1 >>= 32 r11 <<= 32 (r)
+ 5: r0 = r1 r1 |= r11 (r)
+ 6: exit; if w0 < 0xa goto pc+0
+ r1 >>= 32
+ r0 = r1
+ exit
+
+(or zero extension of w1 at (2) is missing for architectures that
+ require zero extension for upper register half).
+
+The following happens w/o this patch:
+- r0 is marked as not a subreg at (0);
+- w1 is marked as subreg at (2);
+- w1 subreg_def is overridden at (3) by copy_register_state();
+- w1 is read at (5) but mark_insn_zext() does not mark (2)
+ for zero extension, because w1 subreg_def is not set;
+- because of BPF_F_TEST_RND_HI32 flag verifier inserts random
+ value for hi32 bits of (2) (marked (r));
+- this random value is read at (5).
+
+Fixes: 75748837b7e5 ("bpf: Propagate scalar ranges through register assignments.")
+Reported-by: Lonial Con <kongln9170@gmail.com>
+Signed-off-by: Lonial Con <kongln9170@gmail.com>
+Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
+Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Daniel Borkmann <daniel@iogearbox.net>
+Closes: https://lore.kernel.org/bpf/7e2aa30a62d740db182c170fdd8f81c596df280d.camel@gmail.com
+Link: https://lore.kernel.org/bpf/20240924210844.1758441-1-eddyz87@gmail.com
+[ shung-hsi.yu: sync_linked_regs() was called find_equal_scalars() before commit
+ 4bf79f9be434 ("bpf: Track equal scalars history on per-instruction level"), and
+ modification is done because there is only a single call to
+ copy_register_state() before commit 98d7ca374ba4 ("bpf: Track delta between
+ "linked" registers."). ]
+Signed-off-by: Shung-Hsi Yu <shung-hsi.yu@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/verifier.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -14497,8 +14497,11 @@ static void find_equal_scalars(struct bp
+ struct bpf_reg_state *reg;
+
+ bpf_for_each_reg_in_vstate(vstate, state, reg, ({
+- if (reg->type == SCALAR_VALUE && reg->id == known_reg->id)
++ if (reg->type == SCALAR_VALUE && reg->id == known_reg->id) {
++ s32 saved_subreg_def = reg->subreg_def;
+ copy_register_state(reg, known_reg);
++ reg->subreg_def = saved_subreg_def;
++ }
+ }));
+ }
+
--- /dev/null
+From 6685f5d572c22e1003e7c0d089afe1c64340ab1f Mon Sep 17 00:00:00 2001
+From: James Morse <james.morse@arm.com>
+Date: Wed, 30 Oct 2024 16:03:16 +0000
+Subject: KVM: arm64: Disable MPAM visibility by default and ignore VMM writes
+
+From: James Morse <james.morse@arm.com>
+
+commit 6685f5d572c22e1003e7c0d089afe1c64340ab1f upstream.
+
+commit 011e5f5bf529f ("arm64/cpufeature: Add remaining feature bits in
+ID_AA64PFR0 register") exposed the MPAM field of AA64PFR0_EL1 to guests,
+but didn't add trap handling. A previous patch supplied the missing trap
+handling.
+
+Existing VMs that have the MPAM field of ID_AA64PFR0_EL1 set need to
+be migratable, but there is little point enabling the MPAM CPU
+interface on new VMs until there is something a guest can do with it.
+
+Clear the MPAM field from the guest's ID_AA64PFR0_EL1 and on hardware
+that supports MPAM, politely ignore the VMMs attempts to set this bit.
+
+Guests exposed to this bug have the sanitised value of the MPAM field,
+so only the correct value needs to be ignored. This means the field
+can continue to be used to block migration to incompatible hardware
+(between MPAM=1 and MPAM=5), and the VMM can't rely on the field
+being ignored.
+
+Signed-off-by: James Morse <james.morse@arm.com>
+Co-developed-by: Joey Gouly <joey.gouly@arm.com>
+Signed-off-by: Joey Gouly <joey.gouly@arm.com>
+Reviewed-by: Gavin Shan <gshan@redhat.com>
+Tested-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
+Reviewed-by: Marc Zyngier <maz@kernel.org>
+Link: https://lore.kernel.org/r/20241030160317.2528209-7-joey.gouly@arm.com
+Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
+Acked-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kvm/sys_regs.c | 52 ++++++++++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 50 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -1330,6 +1330,7 @@ static u64 __kvm_read_sanitised_id_reg(c
+ val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
+
+ val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
++ val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MPAM_frac);
+ break;
+ case SYS_ID_AA64ISAR1_EL1:
+ if (!vcpu_has_ptrauth(vcpu))
+@@ -1472,6 +1473,13 @@ static u64 read_sanitised_id_aa64pfr0_el
+
+ val &= ~ID_AA64PFR0_EL1_AMU_MASK;
+
++ /*
++ * MPAM is disabled by default as KVM also needs a set of PARTID to
++ * program the MPAMVPMx_EL2 PARTID remapping registers with. But some
++ * older kernels let the guest see the ID bit.
++ */
++ val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
++
+ return val;
+ }
+
+@@ -1560,6 +1568,42 @@ static int set_id_dfr0_el1(struct kvm_vc
+ return set_id_reg(vcpu, rd, val);
+ }
+
++static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
++ const struct sys_reg_desc *rd, u64 user_val)
++{
++ u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
++ u64 mpam_mask = ID_AA64PFR0_EL1_MPAM_MASK;
++
++ /*
++ * Commit 011e5f5bf529f ("arm64/cpufeature: Add remaining feature bits
++ * in ID_AA64PFR0 register") exposed the MPAM field of AA64PFR0_EL1 to
++ * guests, but didn't add trap handling. KVM doesn't support MPAM and
++ * always returns an UNDEF for these registers. The guest must see 0
++ * for this field.
++ *
++ * But KVM must also accept values from user-space that were provided
++ * by KVM. On CPUs that support MPAM, permit user-space to write
++ * the sanitizied value to ID_AA64PFR0_EL1.MPAM, but ignore this field.
++ */
++ if ((hw_val & mpam_mask) == (user_val & mpam_mask))
++ user_val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
++
++ return set_id_reg(vcpu, rd, user_val);
++}
++
++static int set_id_aa64pfr1_el1(struct kvm_vcpu *vcpu,
++ const struct sys_reg_desc *rd, u64 user_val)
++{
++ u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
++ u64 mpam_mask = ID_AA64PFR1_EL1_MPAM_frac_MASK;
++
++ /* See set_id_aa64pfr0_el1 for comment about MPAM */
++ if ((hw_val & mpam_mask) == (user_val & mpam_mask))
++ user_val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK;
++
++ return set_id_reg(vcpu, rd, user_val);
++}
++
+ /*
+ * cpufeature ID register user accessors
+ *
+@@ -2018,10 +2062,14 @@ static const struct sys_reg_desc sys_reg
+ { SYS_DESC(SYS_ID_AA64PFR0_EL1),
+ .access = access_id_reg,
+ .get_user = get_id_reg,
+- .set_user = set_id_reg,
++ .set_user = set_id_aa64pfr0_el1,
+ .reset = read_sanitised_id_aa64pfr0_el1,
+ .val = ID_AA64PFR0_EL1_CSV2_MASK | ID_AA64PFR0_EL1_CSV3_MASK, },
+- ID_SANITISED(ID_AA64PFR1_EL1),
++ { SYS_DESC(SYS_ID_AA64PFR1_EL1),
++ .access = access_id_reg,
++ .get_user = get_id_reg,
++ .set_user = set_id_aa64pfr1_el1,
++ .reset = kvm_read_sanitised_id_reg, },
+ ID_UNALLOCATED(4,2),
+ ID_UNALLOCATED(4,3),
+ ID_SANITISED(ID_AA64ZFR0_EL1),