--- /dev/null
+From e88b2c6e5a4d9ce30d75391e4d950da74bb2bd90 Mon Sep 17 00:00:00 2001
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Tue, 9 Feb 2021 18:46:10 +0000
+Subject: bpf: Fix 32 bit src register truncation on div/mod
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+commit e88b2c6e5a4d9ce30d75391e4d950da74bb2bd90 upstream.
+
+While reviewing a different fix, John and I noticed an oddity in one of the
+BPF program dumps that stood out, for example:
+
+ # bpftool p d x i 13
+ 0: (b7) r0 = 808464450
+ 1: (b4) w4 = 808464432
+ 2: (bc) w0 = w0
+ 3: (15) if r0 == 0x0 goto pc+1
+ 4: (9c) w4 %= w0
+ [...]
+
+In line 2 we noticed that the mov32 would 32 bit truncate the original src
+register for the div/mod operation. While for the two operations the dst
+register is typically marked unknown e.g. from adjust_scalar_min_max_vals()
+the src register is not, and thus verifier keeps tracking original bounds,
+simplified:
+
+ 0: R1=ctx(id=0,off=0,imm=0) R10=fp0
+ 0: (b7) r0 = -1
+ 1: R0_w=invP-1 R1=ctx(id=0,off=0,imm=0) R10=fp0
+ 1: (b7) r1 = -1
+ 2: R0_w=invP-1 R1_w=invP-1 R10=fp0
+ 2: (3c) w0 /= w1
+ 3: R0_w=invP(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R1_w=invP-1 R10=fp0
+ 3: (77) r1 >>= 32
+ 4: R0_w=invP(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R1_w=invP4294967295 R10=fp0
+ 4: (bf) r0 = r1
+ 5: R0_w=invP4294967295 R1_w=invP4294967295 R10=fp0
+ 5: (95) exit
+ processed 6 insns (limit 1000000) max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0
+
+Runtime result of r0 at exit is 0 instead of expected -1. Remove the
+verifier mov32 src rewrite in div/mod and replace it with a jmp32 test
+instead. After the fix, we result in the following code generation when
+having dividend r1 and divisor r6:
+
+ div, 64 bit: div, 32 bit:
+
+ 0: (b7) r6 = 8 0: (b7) r6 = 8
+ 1: (b7) r1 = 8 1: (b7) r1 = 8
+ 2: (55) if r6 != 0x0 goto pc+2 2: (56) if w6 != 0x0 goto pc+2
+ 3: (ac) w1 ^= w1 3: (ac) w1 ^= w1
+ 4: (05) goto pc+1 4: (05) goto pc+1
+ 5: (3f) r1 /= r6 5: (3c) w1 /= w6
+ 6: (b7) r0 = 0 6: (b7) r0 = 0
+ 7: (95) exit 7: (95) exit
+
+ mod, 64 bit: mod, 32 bit:
+
+ 0: (b7) r6 = 8 0: (b7) r6 = 8
+ 1: (b7) r1 = 8 1: (b7) r1 = 8
+ 2: (15) if r6 == 0x0 goto pc+1 2: (16) if w6 == 0x0 goto pc+1
+ 3: (9f) r1 %= r6 3: (9c) w1 %= w6
+ 4: (b7) r0 = 0 4: (b7) r0 = 0
+ 5: (95) exit 5: (95) exit
+
+x86 in particular can throw a 'divide error' exception for div
+instruction not only for divisor being zero, but also for the case
+when the quotient is too large for the designated register. For the
+edx:eax and rdx:rax dividend pair it is not an issue in x86 BPF JIT
+since we always zero edx (rdx). Hence really the only protection
+needed is against divisor being zero.
+
+Fixes: 68fda450a7df ("bpf: fix 32-bit divide by zero")
+Co-developed-by: John Fastabend <john.fastabend@gmail.com>
+Signed-off-by: John Fastabend <john.fastabend@gmail.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/verifier.c | 28 +++++++++++++---------------
+ 1 file changed, 13 insertions(+), 15 deletions(-)
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -9002,30 +9002,28 @@ static int fixup_bpf_calls(struct bpf_ve
+ insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
+ insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
+ bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
+- struct bpf_insn mask_and_div[] = {
+- BPF_MOV32_REG(insn->src_reg, insn->src_reg),
++ bool isdiv = BPF_OP(insn->code) == BPF_DIV;
++ struct bpf_insn *patchlet;
++ struct bpf_insn chk_and_div[] = {
+ /* Rx div 0 -> 0 */
+- BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2),
++ BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
++ BPF_JNE | BPF_K, insn->src_reg,
++ 0, 2, 0),
+ BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ *insn,
+ };
+- struct bpf_insn mask_and_mod[] = {
+- BPF_MOV32_REG(insn->src_reg, insn->src_reg),
++ struct bpf_insn chk_and_mod[] = {
+ /* Rx mod 0 -> Rx */
+- BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1),
++ BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
++ BPF_JEQ | BPF_K, insn->src_reg,
++ 0, 1, 0),
+ *insn,
+ };
+- struct bpf_insn *patchlet;
+
+- if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
+- insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
+- patchlet = mask_and_div + (is64 ? 1 : 0);
+- cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0);
+- } else {
+- patchlet = mask_and_mod + (is64 ? 1 : 0);
+- cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0);
+- }
++ patchlet = isdiv ? chk_and_div : chk_and_mod;
++ cnt = isdiv ? ARRAY_SIZE(chk_and_div) :
++ ARRAY_SIZE(chk_and_mod);
+
+ new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
+ if (!new_prog)
--- /dev/null
+From 19a23da53932bc8011220bd8c410cb76012de004 Mon Sep 17 00:00:00 2001
+From: Peter Gonda <pgonda@google.com>
+Date: Wed, 27 Jan 2021 08:15:24 -0800
+Subject: Fix unsynchronized access to sev members through svm_register_enc_region
+
+From: Peter Gonda <pgonda@google.com>
+
+commit 19a23da53932bc8011220bd8c410cb76012de004 upstream.
+
+Grab kvm->lock before pinning memory when registering an encrypted
+region; sev_pin_memory() relies on kvm->lock being held to ensure
+correctness when checking and updating the number of pinned pages.
+
+Add a lockdep assertion to help prevent future regressions.
+
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Joerg Roedel <joro@8bytes.org>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Brijesh Singh <brijesh.singh@amd.com>
+Cc: Sean Christopherson <seanjc@google.com>
+Cc: x86@kernel.org
+Cc: kvm@vger.kernel.org
+Cc: stable@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Fixes: 1e80fdc09d12 ("KVM: SVM: Pin guest memory when SEV is active")
+Signed-off-by: Peter Gonda <pgonda@google.com>
+
+V2
+ - Fix up patch description
+ - Correct file paths svm.c -> sev.c
+ - Add unlock of kvm->lock on sev_pin_memory error
+
+V1
+ - https://lore.kernel.org/kvm/20210126185431.1824530-1-pgonda@google.com/
+
+Message-Id: <20210127161524.2832400-1-pgonda@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm.c | 18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -1835,6 +1835,8 @@ static struct page **sev_pin_memory(stru
+ struct page **pages;
+ unsigned long first, last;
+
++ lockdep_assert_held(&kvm->lock);
++
+ if (ulen == 0 || uaddr + ulen < uaddr)
+ return NULL;
+
+@@ -7091,12 +7093,21 @@ static int svm_register_enc_region(struc
+ if (!region)
+ return -ENOMEM;
+
++ mutex_lock(&kvm->lock);
+ region->pages = sev_pin_memory(kvm, range->addr, range->size, ®ion->npages, 1);
+ if (!region->pages) {
+ ret = -ENOMEM;
++ mutex_unlock(&kvm->lock);
+ goto e_free;
+ }
+
++ region->uaddr = range->addr;
++ region->size = range->size;
++
++ mutex_lock(&kvm->lock);
++ list_add_tail(®ion->list, &sev->regions_list);
++ mutex_unlock(&kvm->lock);
++
+ /*
+ * The guest may change the memory encryption attribute from C=0 -> C=1
+ * or vice versa for this memory range. Lets make sure caches are
+@@ -7105,13 +7116,6 @@ static int svm_register_enc_region(struc
+ */
+ sev_clflush_pages(region->pages, region->npages);
+
+- region->uaddr = range->addr;
+- region->size = range->size;
+-
+- mutex_lock(&kvm->lock);
+- list_add_tail(®ion->list, &sev->regions_list);
+- mutex_unlock(&kvm->lock);
+-
+ return ret;
+
+ e_free: