]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Drop kvm-svm-name-and-check-reserved-fields-with-structs-.patch
authorSasha Levin <sashal@kernel.org>
Fri, 15 Sep 2023 01:21:17 +0000 (21:21 -0400)
committerSasha Levin <sashal@kernel.org>
Fri, 15 Sep 2023 01:21:17 +0000 (21:21 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-6.1/kvm-svm-name-and-check-reserved-fields-with-structs-.patch [deleted file]
queue-6.1/series

diff --git a/queue-6.1/kvm-svm-name-and-check-reserved-fields-with-structs-.patch b/queue-6.1/kvm-svm-name-and-check-reserved-fields-with-structs-.patch
deleted file mode 100644 (file)
index 6c79093..0000000
+++ /dev/null
@@ -1,267 +0,0 @@
-From 033671fc511439f6fde88640d0bff031ad9bb80a Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Mon, 24 Oct 2022 11:44:48 -0500
-Subject: KVM: SVM: Name and check reserved fields with structs offset
-
-From: Carlos Bilbao <carlos.bilbao@amd.com>
-
-[ Upstream commit d08b48585309247d4d28051dd7a315eef5d1db26 ]
-
-Rename reserved fields on all structs in arch/x86/include/asm/svm.h
-following their offset within the structs. Include compile time checks for
-this in the same place where other BUILD_BUG_ON for the structs are.
-
-This also solves that fields of struct sev_es_save_area are named by their
-order of appearance, but right now they jump from reserved_5 to reserved_7.
-
-Link: https://lkml.org/lkml/2022/10/22/376
-Signed-off-by: Carlos Bilbao <carlos.bilbao@amd.com>
-Message-Id: <20221024164448.203351-1-carlos.bilbao@amd.com>
-[Use ASSERT_STRUCT_OFFSET + fix a couple wrong offsets. - Paolo]
-Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-Stable-dep-of: f67063414c0e ("KVM: SVM: correct the size of spec_ctrl field in VMCB save area")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- arch/x86/include/asm/svm.h | 93 ++++++++++++++++++++++++++------------
- arch/x86/kvm/svm/sev.c     |  2 +-
- 2 files changed, 66 insertions(+), 29 deletions(-)
-
-diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
-index 02aac78cb21d4..770dcf75eaa97 100644
---- a/arch/x86/include/asm/svm.h
-+++ b/arch/x86/include/asm/svm.h
-@@ -300,12 +300,13 @@ struct vmcb_save_area {
-       struct vmcb_seg ldtr;
-       struct vmcb_seg idtr;
-       struct vmcb_seg tr;
--      u8 reserved_1[42];
-+      /* Reserved fields are named following their struct offset */
-+      u8 reserved_0xa0[42];
-       u8 vmpl;
-       u8 cpl;
--      u8 reserved_2[4];
-+      u8 reserved_0xcc[4];
-       u64 efer;
--      u8 reserved_3[112];
-+      u8 reserved_0xd8[112];
-       u64 cr4;
-       u64 cr3;
-       u64 cr0;
-@@ -313,7 +314,7 @@ struct vmcb_save_area {
-       u64 dr6;
-       u64 rflags;
-       u64 rip;
--      u8 reserved_4[88];
-+      u8 reserved_0x180[88];
-       u64 rsp;
-       u64 s_cet;
-       u64 ssp;
-@@ -328,14 +329,14 @@ struct vmcb_save_area {
-       u64 sysenter_esp;
-       u64 sysenter_eip;
-       u64 cr2;
--      u8 reserved_5[32];
-+      u8 reserved_0x248[32];
-       u64 g_pat;
-       u64 dbgctl;
-       u64 br_from;
-       u64 br_to;
-       u64 last_excp_from;
-       u64 last_excp_to;
--      u8 reserved_6[72];
-+      u8 reserved_0x298[72];
-       u32 spec_ctrl;          /* Guest version of SPEC_CTRL at 0x2E0 */
- } __packed;
-@@ -356,12 +357,12 @@ struct sev_es_save_area {
-       u64 vmpl2_ssp;
-       u64 vmpl3_ssp;
-       u64 u_cet;
--      u8 reserved_1[2];
-+      u8 reserved_0xc8[2];
-       u8 vmpl;
-       u8 cpl;
--      u8 reserved_2[4];
-+      u8 reserved_0xcc[4];
-       u64 efer;
--      u8 reserved_3[104];
-+      u8 reserved_0xd8[104];
-       u64 xss;
-       u64 cr4;
-       u64 cr3;
-@@ -378,7 +379,7 @@ struct sev_es_save_area {
-       u64 dr1_addr_mask;
-       u64 dr2_addr_mask;
-       u64 dr3_addr_mask;
--      u8 reserved_4[24];
-+      u8 reserved_0x1c0[24];
-       u64 rsp;
-       u64 s_cet;
-       u64 ssp;
-@@ -393,21 +394,21 @@ struct sev_es_save_area {
-       u64 sysenter_esp;
-       u64 sysenter_eip;
-       u64 cr2;
--      u8 reserved_5[32];
-+      u8 reserved_0x248[32];
-       u64 g_pat;
-       u64 dbgctl;
-       u64 br_from;
-       u64 br_to;
-       u64 last_excp_from;
-       u64 last_excp_to;
--      u8 reserved_7[80];
-+      u8 reserved_0x298[80];
-       u32 pkru;
--      u8 reserved_8[20];
--      u64 reserved_9;         /* rax already available at 0x01f8 */
-+      u32 tsc_aux;
-+      u8 reserved_0x2f0[24];
-       u64 rcx;
-       u64 rdx;
-       u64 rbx;
--      u64 reserved_10;        /* rsp already available at 0x01d8 */
-+      u64 reserved_0x320;     /* rsp already available at 0x01d8 */
-       u64 rbp;
-       u64 rsi;
-       u64 rdi;
-@@ -419,7 +420,7 @@ struct sev_es_save_area {
-       u64 r13;
-       u64 r14;
-       u64 r15;
--      u8 reserved_11[16];
-+      u8 reserved_0x380[16];
-       u64 guest_exit_info_1;
-       u64 guest_exit_info_2;
-       u64 guest_exit_int_info;
-@@ -432,7 +433,7 @@ struct sev_es_save_area {
-       u64 pcpu_id;
-       u64 event_inj;
-       u64 xcr0;
--      u8 reserved_12[16];
-+      u8 reserved_0x3f0[16];
-       /* Floating point area */
-       u64 x87_dp;
-@@ -450,23 +451,23 @@ struct sev_es_save_area {
- } __packed;
- struct ghcb_save_area {
--      u8 reserved_1[203];
-+      u8 reserved_0x0[203];
-       u8 cpl;
--      u8 reserved_2[116];
-+      u8 reserved_0xcc[116];
-       u64 xss;
--      u8 reserved_3[24];
-+      u8 reserved_0x148[24];
-       u64 dr7;
--      u8 reserved_4[16];
-+      u8 reserved_0x168[16];
-       u64 rip;
--      u8 reserved_5[88];
-+      u8 reserved_0x180[88];
-       u64 rsp;
--      u8 reserved_6[24];
-+      u8 reserved_0x1e0[24];
-       u64 rax;
--      u8 reserved_7[264];
-+      u8 reserved_0x200[264];
-       u64 rcx;
-       u64 rdx;
-       u64 rbx;
--      u8 reserved_8[8];
-+      u8 reserved_0x320[8];
-       u64 rbp;
-       u64 rsi;
-       u64 rdi;
-@@ -478,12 +479,12 @@ struct ghcb_save_area {
-       u64 r13;
-       u64 r14;
-       u64 r15;
--      u8 reserved_9[16];
-+      u8 reserved_0x380[16];
-       u64 sw_exit_code;
-       u64 sw_exit_info_1;
-       u64 sw_exit_info_2;
-       u64 sw_scratch;
--      u8 reserved_10[56];
-+      u8 reserved_0x3b0[56];
-       u64 xcr0;
-       u8 valid_bitmap[16];
-       u64 x87_state_gpa;
-@@ -497,7 +498,7 @@ struct ghcb {
-       u8 shared_buffer[GHCB_SHARED_BUF_SIZE];
--      u8 reserved_1[10];
-+      u8 reserved_0xff0[10];
-       u16 protocol_version;   /* negotiated SEV-ES/GHCB protocol version */
-       u32 ghcb_usage;
- } __packed;
-@@ -509,6 +510,9 @@ struct ghcb {
- #define EXPECTED_VMCB_CONTROL_AREA_SIZE               1024
- #define EXPECTED_GHCB_SIZE                    PAGE_SIZE
-+#define BUILD_BUG_RESERVED_OFFSET(x, y) \
-+      ASSERT_STRUCT_OFFSET(struct x, reserved ## _ ## y, y)
-+
- static inline void __unused_size_checks(void)
- {
-       BUILD_BUG_ON(sizeof(struct vmcb_save_area)      != EXPECTED_VMCB_SAVE_AREA_SIZE);
-@@ -516,6 +520,39 @@ static inline void __unused_size_checks(void)
-       BUILD_BUG_ON(sizeof(struct sev_es_save_area)    != EXPECTED_SEV_ES_SAVE_AREA_SIZE);
-       BUILD_BUG_ON(sizeof(struct vmcb_control_area)   != EXPECTED_VMCB_CONTROL_AREA_SIZE);
-       BUILD_BUG_ON(sizeof(struct ghcb)                != EXPECTED_GHCB_SIZE);
-+
-+      /* Check offsets of reserved fields */
-+
-+      BUILD_BUG_RESERVED_OFFSET(vmcb_save_area, 0xa0);
-+      BUILD_BUG_RESERVED_OFFSET(vmcb_save_area, 0xcc);
-+      BUILD_BUG_RESERVED_OFFSET(vmcb_save_area, 0xd8);
-+      BUILD_BUG_RESERVED_OFFSET(vmcb_save_area, 0x180);
-+      BUILD_BUG_RESERVED_OFFSET(vmcb_save_area, 0x248);
-+      BUILD_BUG_RESERVED_OFFSET(vmcb_save_area, 0x298);
-+
-+      BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0xc8);
-+      BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0xcc);
-+      BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0xd8);
-+      BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x1c0);
-+      BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x248);
-+      BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x298);
-+      BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x2f0);
-+      BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x320);
-+      BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x380);
-+      BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x3f0);
-+
-+      BUILD_BUG_RESERVED_OFFSET(ghcb_save_area, 0x0);
-+      BUILD_BUG_RESERVED_OFFSET(ghcb_save_area, 0xcc);
-+      BUILD_BUG_RESERVED_OFFSET(ghcb_save_area, 0x148);
-+      BUILD_BUG_RESERVED_OFFSET(ghcb_save_area, 0x168);
-+      BUILD_BUG_RESERVED_OFFSET(ghcb_save_area, 0x180);
-+      BUILD_BUG_RESERVED_OFFSET(ghcb_save_area, 0x1e0);
-+      BUILD_BUG_RESERVED_OFFSET(ghcb_save_area, 0x200);
-+      BUILD_BUG_RESERVED_OFFSET(ghcb_save_area, 0x320);
-+      BUILD_BUG_RESERVED_OFFSET(ghcb_save_area, 0x380);
-+      BUILD_BUG_RESERVED_OFFSET(ghcb_save_area, 0x3b0);
-+
-+      BUILD_BUG_RESERVED_OFFSET(ghcb, 0xff0);
- }
- struct vmcb {
-diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
-index e0437acb5cf75..06caee08b7285 100644
---- a/arch/x86/kvm/svm/sev.c
-+++ b/arch/x86/kvm/svm/sev.c
-@@ -2653,7 +2653,7 @@ static int setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
-               ghcb_scratch_beg = control->ghcb_gpa +
-                                  offsetof(struct ghcb, shared_buffer);
-               ghcb_scratch_end = control->ghcb_gpa +
--                                 offsetof(struct ghcb, reserved_1);
-+                                 offsetof(struct ghcb, reserved_0xff0);
-               /*
-                * If the scratch area begins within the GHCB, it must be
--- 
-2.40.1
-
index 639cf72efac7e1f5ec8b0cfac70d418875fdce62..ea009c2a34c2de2cc0c6e0216a906b16ff50fe91 100644 (file)
@@ -71,7 +71,6 @@ perf-vendor-events-update-the-json-events-descriptio.patch
 perf-vendor-events-drop-some-of-the-json-events-for-.patch
 perf-vendor-events-drop-stores_per_inst-metric-event.patch
 perf-top-don-t-pass-an-err_ptr-directly-to-perf_sess.patch
-kvm-svm-name-and-check-reserved-fields-with-structs-.patch
 kvm-svm-correct-the-size-of-spec_ctrl-field-in-vmcb-.patch
 watchdog-intel-mid_wdt-add-module_alias-to-allow-aut.patch
 pwm-lpc32xx-remove-handling-of-pwm-channels.patch