From e3f78b189733d90a8fce2178a9f1b14b93dc8d17 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Tue, 16 Jun 2020 15:25:37 +0200 Subject: [PATCH] drop the kvm patches from older kernels, I went to fast... --- ...te-on-injecting-an-aarch32-exception.patch | 92 -------------- queue-4.19/series | 1 - ...auth-keys-in-non-preemptible-context.patch | 116 ------------------ ...te-on-injecting-an-aarch32-exception.patch | 92 -------------- queue-5.4/series | 2 - ...auth-keys-in-non-preemptible-context.patch | 116 ------------------ ...te-on-injecting-an-aarch32-exception.patch | 92 -------------- queue-5.6/series | 2 - 8 files changed, 513 deletions(-) delete mode 100644 queue-4.19/kvm-arm64-synchronize-sysreg-state-on-injecting-an-aarch32-exception.patch delete mode 100644 queue-5.4/kvm-arm64-save-the-host-s-ptrauth-keys-in-non-preemptible-context.patch delete mode 100644 queue-5.4/kvm-arm64-synchronize-sysreg-state-on-injecting-an-aarch32-exception.patch delete mode 100644 queue-5.6/kvm-arm64-save-the-host-s-ptrauth-keys-in-non-preemptible-context.patch delete mode 100644 queue-5.6/kvm-arm64-synchronize-sysreg-state-on-injecting-an-aarch32-exception.patch diff --git a/queue-4.19/kvm-arm64-synchronize-sysreg-state-on-injecting-an-aarch32-exception.patch b/queue-4.19/kvm-arm64-synchronize-sysreg-state-on-injecting-an-aarch32-exception.patch deleted file mode 100644 index e086445dd1a..00000000000 --- a/queue-4.19/kvm-arm64-synchronize-sysreg-state-on-injecting-an-aarch32-exception.patch +++ /dev/null @@ -1,92 +0,0 @@ -From 0370964dd3ff7d3d406f292cb443a927952cbd05 Mon Sep 17 00:00:00 2001 -From: Marc Zyngier -Date: Tue, 9 Jun 2020 08:50:29 +0100 -Subject: KVM: arm64: Synchronize sysreg state on injecting an AArch32 exception - -From: Marc Zyngier - -commit 0370964dd3ff7d3d406f292cb443a927952cbd05 upstream. - -On a VHE system, the EL1 state is left in the CPU most of the time, -and only syncronized back to memory when vcpu_put() is called (most -of the time on preemption). - -Which means that when injecting an exception, we'd better have a way -to either: -(1) write directly to the EL1 sysregs -(2) synchronize the state back to memory, and do the changes there - -For an AArch64, we already do (1), so we are safe. Unfortunately, -doing the same thing for AArch32 would be pretty invasive. Instead, -we can easily implement (2) by calling the put/load architectural -backends, and keep preemption disabled. We can then reload the -state back into EL1. - -Cc: stable@vger.kernel.org -Reported-by: James Morse -Signed-off-by: Marc Zyngier -Signed-off-by: Greg Kroah-Hartman - ---- - virt/kvm/arm/aarch32.c | 28 ++++++++++++++++++++++++++++ - 1 file changed, 28 insertions(+) - ---- a/virt/kvm/arm/aarch32.c -+++ b/virt/kvm/arm/aarch32.c -@@ -44,6 +44,26 @@ static const u8 return_offsets[8][2] = { - [7] = { 4, 4 }, /* FIQ, unused */ - }; - -+static bool pre_fault_synchronize(struct kvm_vcpu *vcpu) -+{ -+ preempt_disable(); -+ if (vcpu->arch.sysregs_loaded_on_cpu) { -+ kvm_arch_vcpu_put(vcpu); -+ return true; -+ } -+ -+ preempt_enable(); -+ return false; -+} -+ -+static void post_fault_synchronize(struct kvm_vcpu *vcpu, bool loaded) -+{ -+ if (loaded) { -+ kvm_arch_vcpu_load(vcpu, smp_processor_id()); -+ preempt_enable(); -+ } -+} -+ - /* - * When an exception is taken, most CPSR fields are left unchanged in the - * handler. However, some are explicitly overridden (e.g. M[4:0]). -@@ -166,7 +186,10 @@ static void prepare_fault32(struct kvm_v - - void kvm_inject_undef32(struct kvm_vcpu *vcpu) - { -+ bool loaded = pre_fault_synchronize(vcpu); -+ - prepare_fault32(vcpu, PSR_AA32_MODE_UND, 4); -+ post_fault_synchronize(vcpu, loaded); - } - - /* -@@ -179,6 +202,9 @@ static void inject_abt32(struct kvm_vcpu - u32 vect_offset; - u32 *far, *fsr; - bool is_lpae; -+ bool loaded; -+ -+ loaded = pre_fault_synchronize(vcpu); - - if (is_pabt) { - vect_offset = 12; -@@ -202,6 +228,8 @@ static void inject_abt32(struct kvm_vcpu - /* no need to shuffle FS[4] into DFSR[10] as its 0 */ - *fsr = DFSR_FSC_EXTABT_nLPAE; - } -+ -+ post_fault_synchronize(vcpu, loaded); - } - - void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr) diff --git a/queue-4.19/series b/queue-4.19/series index 18e74124600..d43acab5cbc 100644 --- a/queue-4.19/series +++ b/queue-4.19/series @@ -89,4 +89,3 @@ mmc-sdhci-msm-clear-tuning-done-flag-while-hs400-tuning.patch arm-dts-at91-sama5d2_ptc_ek-fix-sdmmc0-node-description.patch mmc-sdio-fix-potential-null-pointer-error-in-mmc_sdio_init_card.patch xen-pvcalls-back-test-for-errors-when-calling-backend_connect.patch -kvm-arm64-synchronize-sysreg-state-on-injecting-an-aarch32-exception.patch diff --git a/queue-5.4/kvm-arm64-save-the-host-s-ptrauth-keys-in-non-preemptible-context.patch b/queue-5.4/kvm-arm64-save-the-host-s-ptrauth-keys-in-non-preemptible-context.patch deleted file mode 100644 index 5d89434c784..00000000000 --- a/queue-5.4/kvm-arm64-save-the-host-s-ptrauth-keys-in-non-preemptible-context.patch +++ /dev/null @@ -1,116 +0,0 @@ -From ef3e40a7ea8dbe2abd0a345032cd7d5023b9684f Mon Sep 17 00:00:00 2001 -From: Marc Zyngier -Date: Wed, 3 Jun 2020 18:24:01 +0100 -Subject: KVM: arm64: Save the host's PtrAuth keys in non-preemptible context - -From: Marc Zyngier - -commit ef3e40a7ea8dbe2abd0a345032cd7d5023b9684f upstream. - -When using the PtrAuth feature in a guest, we need to save the host's -keys before allowing the guest to program them. For that, we dump -them in a per-CPU data structure (the so called host context). - -But both call sites that do this are in preemptible context, -which may end up in disaster should the vcpu thread get preempted -before reentering the guest. - -Instead, save the keys eagerly on each vcpu_load(). This has an -increased overhead, but is at least safe. - -Cc: stable@vger.kernel.org -Reviewed-by: Mark Rutland -Signed-off-by: Marc Zyngier -Signed-off-by: Greg Kroah-Hartman - ---- - arch/arm64/include/asm/kvm_emulate.h | 6 ------ - arch/arm64/kvm/handle_exit.c | 19 ++----------------- - virt/kvm/arm/arm.c | 18 +++++++++++++++++- - 3 files changed, 19 insertions(+), 24 deletions(-) - ---- a/arch/arm64/include/asm/kvm_emulate.h -+++ b/arch/arm64/include/asm/kvm_emulate.h -@@ -97,12 +97,6 @@ static inline void vcpu_ptrauth_disable( - vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK); - } - --static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu) --{ -- if (vcpu_has_ptrauth(vcpu)) -- vcpu_ptrauth_disable(vcpu); --} -- - static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu) - { - return vcpu->arch.vsesr_el2; ---- a/arch/arm64/kvm/handle_exit.c -+++ b/arch/arm64/kvm/handle_exit.c -@@ -162,31 +162,16 @@ static int handle_sve(struct kvm_vcpu *v - return 1; - } - --#define __ptrauth_save_key(regs, key) \ --({ \ -- regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \ -- regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \ --}) -- - /* - * Handle the guest trying to use a ptrauth instruction, or trying to access a - * ptrauth register. - */ - void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu) - { -- struct kvm_cpu_context *ctxt; -- -- if (vcpu_has_ptrauth(vcpu)) { -+ if (vcpu_has_ptrauth(vcpu)) - vcpu_ptrauth_enable(vcpu); -- ctxt = vcpu->arch.host_cpu_context; -- __ptrauth_save_key(ctxt->sys_regs, APIA); -- __ptrauth_save_key(ctxt->sys_regs, APIB); -- __ptrauth_save_key(ctxt->sys_regs, APDA); -- __ptrauth_save_key(ctxt->sys_regs, APDB); -- __ptrauth_save_key(ctxt->sys_regs, APGA); -- } else { -+ else - kvm_inject_undefined(vcpu); -- } - } - - /* ---- a/virt/kvm/arm/arm.c -+++ b/virt/kvm/arm/arm.c -@@ -354,6 +354,12 @@ int kvm_arch_vcpu_init(struct kvm_vcpu * - return kvm_vgic_vcpu_init(vcpu); - } - -+#define __ptrauth_save_key(regs, key) \ -+({ \ -+ regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \ -+ regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \ -+}) -+ - void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) - { - int *last_ran; -@@ -386,7 +392,17 @@ void kvm_arch_vcpu_load(struct kvm_vcpu - else - vcpu_set_wfe_traps(vcpu); - -- vcpu_ptrauth_setup_lazy(vcpu); -+ if (vcpu_has_ptrauth(vcpu)) { -+ struct kvm_cpu_context *ctxt = vcpu->arch.host_cpu_context; -+ -+ __ptrauth_save_key(ctxt->sys_regs, APIA); -+ __ptrauth_save_key(ctxt->sys_regs, APIB); -+ __ptrauth_save_key(ctxt->sys_regs, APDA); -+ __ptrauth_save_key(ctxt->sys_regs, APDB); -+ __ptrauth_save_key(ctxt->sys_regs, APGA); -+ -+ vcpu_ptrauth_disable(vcpu); -+ } - } - - void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) diff --git a/queue-5.4/kvm-arm64-synchronize-sysreg-state-on-injecting-an-aarch32-exception.patch b/queue-5.4/kvm-arm64-synchronize-sysreg-state-on-injecting-an-aarch32-exception.patch deleted file mode 100644 index 3fba683e8b5..00000000000 --- a/queue-5.4/kvm-arm64-synchronize-sysreg-state-on-injecting-an-aarch32-exception.patch +++ /dev/null @@ -1,92 +0,0 @@ -From 0370964dd3ff7d3d406f292cb443a927952cbd05 Mon Sep 17 00:00:00 2001 -From: Marc Zyngier -Date: Tue, 9 Jun 2020 08:50:29 +0100 -Subject: KVM: arm64: Synchronize sysreg state on injecting an AArch32 exception - -From: Marc Zyngier - -commit 0370964dd3ff7d3d406f292cb443a927952cbd05 upstream. - -On a VHE system, the EL1 state is left in the CPU most of the time, -and only syncronized back to memory when vcpu_put() is called (most -of the time on preemption). - -Which means that when injecting an exception, we'd better have a way -to either: -(1) write directly to the EL1 sysregs -(2) synchronize the state back to memory, and do the changes there - -For an AArch64, we already do (1), so we are safe. Unfortunately, -doing the same thing for AArch32 would be pretty invasive. Instead, -we can easily implement (2) by calling the put/load architectural -backends, and keep preemption disabled. We can then reload the -state back into EL1. - -Cc: stable@vger.kernel.org -Reported-by: James Morse -Signed-off-by: Marc Zyngier -Signed-off-by: Greg Kroah-Hartman - ---- - virt/kvm/arm/aarch32.c | 28 ++++++++++++++++++++++++++++ - 1 file changed, 28 insertions(+) - ---- a/virt/kvm/arm/aarch32.c -+++ b/virt/kvm/arm/aarch32.c -@@ -33,6 +33,26 @@ static const u8 return_offsets[8][2] = { - [7] = { 4, 4 }, /* FIQ, unused */ - }; - -+static bool pre_fault_synchronize(struct kvm_vcpu *vcpu) -+{ -+ preempt_disable(); -+ if (vcpu->arch.sysregs_loaded_on_cpu) { -+ kvm_arch_vcpu_put(vcpu); -+ return true; -+ } -+ -+ preempt_enable(); -+ return false; -+} -+ -+static void post_fault_synchronize(struct kvm_vcpu *vcpu, bool loaded) -+{ -+ if (loaded) { -+ kvm_arch_vcpu_load(vcpu, smp_processor_id()); -+ preempt_enable(); -+ } -+} -+ - /* - * When an exception is taken, most CPSR fields are left unchanged in the - * handler. However, some are explicitly overridden (e.g. M[4:0]). -@@ -155,7 +175,10 @@ static void prepare_fault32(struct kvm_v - - void kvm_inject_undef32(struct kvm_vcpu *vcpu) - { -+ bool loaded = pre_fault_synchronize(vcpu); -+ - prepare_fault32(vcpu, PSR_AA32_MODE_UND, 4); -+ post_fault_synchronize(vcpu, loaded); - } - - /* -@@ -168,6 +191,9 @@ static void inject_abt32(struct kvm_vcpu - u32 vect_offset; - u32 *far, *fsr; - bool is_lpae; -+ bool loaded; -+ -+ loaded = pre_fault_synchronize(vcpu); - - if (is_pabt) { - vect_offset = 12; -@@ -191,6 +217,8 @@ static void inject_abt32(struct kvm_vcpu - /* no need to shuffle FS[4] into DFSR[10] as its 0 */ - *fsr = DFSR_FSC_EXTABT_nLPAE; - } -+ -+ post_fault_synchronize(vcpu, loaded); - } - - void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr) diff --git a/queue-5.4/series b/queue-5.4/series index 5952feb9dc8..857f1bad4ef 100644 --- a/queue-5.4/series +++ b/queue-5.4/series @@ -130,5 +130,3 @@ mmc-sdio-fix-potential-null-pointer-error-in-mmc_sdio_init_card.patch mmc-sdio-fix-several-potential-memory-leaks-in-mmc_sdio_init_card.patch block-floppy-fix-contended-case-in-floppy_queue_rq.patch xen-pvcalls-back-test-for-errors-when-calling-backend_connect.patch -kvm-arm64-save-the-host-s-ptrauth-keys-in-non-preemptible-context.patch -kvm-arm64-synchronize-sysreg-state-on-injecting-an-aarch32-exception.patch diff --git a/queue-5.6/kvm-arm64-save-the-host-s-ptrauth-keys-in-non-preemptible-context.patch b/queue-5.6/kvm-arm64-save-the-host-s-ptrauth-keys-in-non-preemptible-context.patch deleted file mode 100644 index d0cd6a34c9b..00000000000 --- a/queue-5.6/kvm-arm64-save-the-host-s-ptrauth-keys-in-non-preemptible-context.patch +++ /dev/null @@ -1,116 +0,0 @@ -From ef3e40a7ea8dbe2abd0a345032cd7d5023b9684f Mon Sep 17 00:00:00 2001 -From: Marc Zyngier -Date: Wed, 3 Jun 2020 18:24:01 +0100 -Subject: KVM: arm64: Save the host's PtrAuth keys in non-preemptible context - -From: Marc Zyngier - -commit ef3e40a7ea8dbe2abd0a345032cd7d5023b9684f upstream. - -When using the PtrAuth feature in a guest, we need to save the host's -keys before allowing the guest to program them. For that, we dump -them in a per-CPU data structure (the so called host context). - -But both call sites that do this are in preemptible context, -which may end up in disaster should the vcpu thread get preempted -before reentering the guest. - -Instead, save the keys eagerly on each vcpu_load(). This has an -increased overhead, but is at least safe. - -Cc: stable@vger.kernel.org -Reviewed-by: Mark Rutland -Signed-off-by: Marc Zyngier -Signed-off-by: Greg Kroah-Hartman - ---- - arch/arm64/include/asm/kvm_emulate.h | 6 ------ - arch/arm64/kvm/handle_exit.c | 19 ++----------------- - virt/kvm/arm/arm.c | 18 +++++++++++++++++- - 3 files changed, 19 insertions(+), 24 deletions(-) - ---- a/arch/arm64/include/asm/kvm_emulate.h -+++ b/arch/arm64/include/asm/kvm_emulate.h -@@ -111,12 +111,6 @@ static inline void vcpu_ptrauth_disable( - vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK); - } - --static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu) --{ -- if (vcpu_has_ptrauth(vcpu)) -- vcpu_ptrauth_disable(vcpu); --} -- - static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu) - { - return vcpu->arch.vsesr_el2; ---- a/arch/arm64/kvm/handle_exit.c -+++ b/arch/arm64/kvm/handle_exit.c -@@ -162,31 +162,16 @@ static int handle_sve(struct kvm_vcpu *v - return 1; - } - --#define __ptrauth_save_key(regs, key) \ --({ \ -- regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \ -- regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \ --}) -- - /* - * Handle the guest trying to use a ptrauth instruction, or trying to access a - * ptrauth register. - */ - void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu) - { -- struct kvm_cpu_context *ctxt; -- -- if (vcpu_has_ptrauth(vcpu)) { -+ if (vcpu_has_ptrauth(vcpu)) - vcpu_ptrauth_enable(vcpu); -- ctxt = vcpu->arch.host_cpu_context; -- __ptrauth_save_key(ctxt->sys_regs, APIA); -- __ptrauth_save_key(ctxt->sys_regs, APIB); -- __ptrauth_save_key(ctxt->sys_regs, APDA); -- __ptrauth_save_key(ctxt->sys_regs, APDB); -- __ptrauth_save_key(ctxt->sys_regs, APGA); -- } else { -+ else - kvm_inject_undefined(vcpu); -- } - } - - /* ---- a/virt/kvm/arm/arm.c -+++ b/virt/kvm/arm/arm.c -@@ -332,6 +332,12 @@ void kvm_arch_vcpu_unblocking(struct kvm - preempt_enable(); - } - -+#define __ptrauth_save_key(regs, key) \ -+({ \ -+ regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \ -+ regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \ -+}) -+ - void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) - { - int *last_ran; -@@ -365,7 +371,17 @@ void kvm_arch_vcpu_load(struct kvm_vcpu - else - vcpu_set_wfx_traps(vcpu); - -- vcpu_ptrauth_setup_lazy(vcpu); -+ if (vcpu_has_ptrauth(vcpu)) { -+ struct kvm_cpu_context *ctxt = vcpu->arch.host_cpu_context; -+ -+ __ptrauth_save_key(ctxt->sys_regs, APIA); -+ __ptrauth_save_key(ctxt->sys_regs, APIB); -+ __ptrauth_save_key(ctxt->sys_regs, APDA); -+ __ptrauth_save_key(ctxt->sys_regs, APDB); -+ __ptrauth_save_key(ctxt->sys_regs, APGA); -+ -+ vcpu_ptrauth_disable(vcpu); -+ } - } - - void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) diff --git a/queue-5.6/kvm-arm64-synchronize-sysreg-state-on-injecting-an-aarch32-exception.patch b/queue-5.6/kvm-arm64-synchronize-sysreg-state-on-injecting-an-aarch32-exception.patch deleted file mode 100644 index 3fba683e8b5..00000000000 --- a/queue-5.6/kvm-arm64-synchronize-sysreg-state-on-injecting-an-aarch32-exception.patch +++ /dev/null @@ -1,92 +0,0 @@ -From 0370964dd3ff7d3d406f292cb443a927952cbd05 Mon Sep 17 00:00:00 2001 -From: Marc Zyngier -Date: Tue, 9 Jun 2020 08:50:29 +0100 -Subject: KVM: arm64: Synchronize sysreg state on injecting an AArch32 exception - -From: Marc Zyngier - -commit 0370964dd3ff7d3d406f292cb443a927952cbd05 upstream. - -On a VHE system, the EL1 state is left in the CPU most of the time, -and only syncronized back to memory when vcpu_put() is called (most -of the time on preemption). - -Which means that when injecting an exception, we'd better have a way -to either: -(1) write directly to the EL1 sysregs -(2) synchronize the state back to memory, and do the changes there - -For an AArch64, we already do (1), so we are safe. Unfortunately, -doing the same thing for AArch32 would be pretty invasive. Instead, -we can easily implement (2) by calling the put/load architectural -backends, and keep preemption disabled. We can then reload the -state back into EL1. - -Cc: stable@vger.kernel.org -Reported-by: James Morse -Signed-off-by: Marc Zyngier -Signed-off-by: Greg Kroah-Hartman - ---- - virt/kvm/arm/aarch32.c | 28 ++++++++++++++++++++++++++++ - 1 file changed, 28 insertions(+) - ---- a/virt/kvm/arm/aarch32.c -+++ b/virt/kvm/arm/aarch32.c -@@ -33,6 +33,26 @@ static const u8 return_offsets[8][2] = { - [7] = { 4, 4 }, /* FIQ, unused */ - }; - -+static bool pre_fault_synchronize(struct kvm_vcpu *vcpu) -+{ -+ preempt_disable(); -+ if (vcpu->arch.sysregs_loaded_on_cpu) { -+ kvm_arch_vcpu_put(vcpu); -+ return true; -+ } -+ -+ preempt_enable(); -+ return false; -+} -+ -+static void post_fault_synchronize(struct kvm_vcpu *vcpu, bool loaded) -+{ -+ if (loaded) { -+ kvm_arch_vcpu_load(vcpu, smp_processor_id()); -+ preempt_enable(); -+ } -+} -+ - /* - * When an exception is taken, most CPSR fields are left unchanged in the - * handler. However, some are explicitly overridden (e.g. M[4:0]). -@@ -155,7 +175,10 @@ static void prepare_fault32(struct kvm_v - - void kvm_inject_undef32(struct kvm_vcpu *vcpu) - { -+ bool loaded = pre_fault_synchronize(vcpu); -+ - prepare_fault32(vcpu, PSR_AA32_MODE_UND, 4); -+ post_fault_synchronize(vcpu, loaded); - } - - /* -@@ -168,6 +191,9 @@ static void inject_abt32(struct kvm_vcpu - u32 vect_offset; - u32 *far, *fsr; - bool is_lpae; -+ bool loaded; -+ -+ loaded = pre_fault_synchronize(vcpu); - - if (is_pabt) { - vect_offset = 12; -@@ -191,6 +217,8 @@ static void inject_abt32(struct kvm_vcpu - /* no need to shuffle FS[4] into DFSR[10] as its 0 */ - *fsr = DFSR_FSC_EXTABT_nLPAE; - } -+ -+ post_fault_synchronize(vcpu, loaded); - } - - void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr) diff --git a/queue-5.6/series b/queue-5.6/series index c17920beb8f..319416ad588 100644 --- a/queue-5.6/series +++ b/queue-5.6/series @@ -157,5 +157,3 @@ block-floppy-fix-contended-case-in-floppy_queue_rq.patch xen-pvcalls-back-test-for-errors-when-calling-backend_connect.patch platform-x86-sony-laptop-snc-calls-should-handle-buffer-types.patch platform-x86-sony-laptop-make-resuming-thermal-profile-safer.patch -kvm-arm64-save-the-host-s-ptrauth-keys-in-non-preemptible-context.patch -kvm-arm64-synchronize-sysreg-state-on-injecting-an-aarch32-exception.patch -- 2.47.3