]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blame - releases/4.4.44/kvm-x86-introduce-segmented_write_std.patch
4.9-stable patches
[thirdparty/kernel/stable-queue.git] / releases / 4.4.44 / kvm-x86-introduce-segmented_write_std.patch
CommitLineData
1b15aa9d
GKH
1From 129a72a0d3c8e139a04512325384fe5ac119e74d Mon Sep 17 00:00:00 2001
2From: Steve Rutherford <srutherford@google.com>
3Date: Wed, 11 Jan 2017 18:28:29 -0800
4Subject: KVM: x86: Introduce segmented_write_std
5
6From: Steve Rutherford <srutherford@google.com>
7
8commit 129a72a0d3c8e139a04512325384fe5ac119e74d upstream.
9
10Introduces segemented_write_std.
11
12Switches from emulated reads/writes to standard read/writes in fxsave,
13fxrstor, sgdt, and sidt. This fixes CVE-2017-2584, a longstanding
14kernel memory leak.
15
16Since commit 283c95d0e389 ("KVM: x86: emulate FXSAVE and FXRSTOR",
172016-11-09), which is luckily not yet in any final release, this would
18also be an exploitable kernel memory *write*!
19
20Reported-by: Dmitry Vyukov <dvyukov@google.com>
21Fixes: 96051572c819194c37a8367624b285be10297eca
22Fixes: 283c95d0e3891b64087706b344a4b545d04a6e62
23Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
24Signed-off-by: Steve Rutherford <srutherford@google.com>
25Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
26Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
27
28---
29 arch/x86/kvm/emulate.c | 22 ++++++++++++++++++----
30 1 file changed, 18 insertions(+), 4 deletions(-)
31
32--- a/arch/x86/kvm/emulate.c
33+++ b/arch/x86/kvm/emulate.c
79473355 34@@ -803,6 +803,20 @@ static int segmented_read_std(struct x86
1b15aa9d
GKH
35 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
36 }
37
38+static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
39+ struct segmented_address addr,
40+ void *data,
41+ unsigned int size)
42+{
43+ int rc;
44+ ulong linear;
45+
46+ rc = linearize(ctxt, addr, size, true, &linear);
47+ if (rc != X86EMUL_CONTINUE)
48+ return rc;
49+ return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
50+}
51+
52 /*
53 * Prefetch the remaining bytes of the instruction without crossing page
54 * boundary if they are not in fetch_cache yet.
79473355 55@@ -3698,8 +3712,8 @@ static int emulate_store_desc_ptr(struct
1b15aa9d
GKH
56 }
57 /* Disable writeback. */
58 ctxt->dst.type = OP_NONE;
59- return segmented_write(ctxt, ctxt->dst.addr.mem,
60- &desc_ptr, 2 + ctxt->op_bytes);
61+ return segmented_write_std(ctxt, ctxt->dst.addr.mem,
62+ &desc_ptr, 2 + ctxt->op_bytes);
63 }
64
65 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
79473355 66@@ -3945,7 +3959,7 @@ static int em_fxsave(struct x86_emulate_
1b15aa9d
GKH
67 else
68 size = offsetof(struct fxregs_state, xmm_space[0]);
69
70- return segmented_write(ctxt, ctxt->memop.addr.mem, &fx_state, size);
71+ return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
72 }
73
74 static int fxrstor_fixup(struct x86_emulate_ctxt *ctxt,
79473355 75@@ -3987,7 +4001,7 @@ static int em_fxrstor(struct x86_emulate
1b15aa9d
GKH
76 if (rc != X86EMUL_CONTINUE)
77 return rc;
78
79- rc = segmented_read(ctxt, ctxt->memop.addr.mem, &fx_state, 512);
80+ rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, 512);
81 if (rc != X86EMUL_CONTINUE)
82 return rc;
83