--- /dev/null
+From 3b013a2972d5bc344d6eaa8f24fdfe268211e45f Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+Date: Tue, 7 May 2019 09:06:28 -0700
+Subject: KVM: nVMX: Always sync GUEST_BNDCFGS when it comes from vmcs01
+
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+
+commit 3b013a2972d5bc344d6eaa8f24fdfe268211e45f upstream.
+
+If L1 does not set VM_ENTRY_LOAD_BNDCFGS, then L1's BNDCFGS value must
+be propagated to vmcs02 since KVM always runs with VM_ENTRY_LOAD_BNDCFGS
+when MPX is supported. Because the value effectively comes from vmcs01,
+vmcs02 must be updated even if vmcs12 is clean.
+
+Fixes: 62cf9bd8118c4 ("KVM: nVMX: Fix emulation of VM_ENTRY_LOAD_BNDCFGS")
+Cc: stable@vger.kernel.org
+Cc: Liran Alon <liran.alon@oracle.com>
+Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/vmx/nested.c | 13 ++++++-------
+ 1 file changed, 6 insertions(+), 7 deletions(-)
+
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -2243,13 +2243,9 @@ static void prepare_vmcs02_full(struct v
+
+ set_cr4_guest_host_mask(vmx);
+
+- if (kvm_mpx_supported()) {
+- if (vmx->nested.nested_run_pending &&
+- (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
+- vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
+- else
+- vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
+- }
++ if (kvm_mpx_supported() && vmx->nested.nested_run_pending &&
++ (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
++ vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
+ }
+
+ /*
+@@ -2292,6 +2288,9 @@ static int prepare_vmcs02(struct kvm_vcp
+ kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
+ vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl);
+ }
++ if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending ||
++ !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)))
++ vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
+ vmx_set_rflags(vcpu, vmcs12->guest_rflags);
+
+ /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
--- /dev/null
+From 73cb85568433feadb79e963bf2efba9b3e9ae3df Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+Date: Tue, 7 May 2019 09:06:26 -0700
+Subject: KVM: nVMX: Don't dump VMCS if virtual APIC page can't be mapped
+
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+
+commit 73cb85568433feadb79e963bf2efba9b3e9ae3df upstream.
+
+... as a malicious userspace can run a toy guest to generate invalid
+virtual-APIC page addresses in L1, i.e. flood the kernel log with error
+messages.
+
+Fixes: 690908104e39d ("KVM: nVMX: allow tests to use bad virtual-APIC page address")
+Cc: stable@vger.kernel.org
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/vmx/nested.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -2891,9 +2891,6 @@ static void nested_get_vmcs12_pages(stru
+ */
+ vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
+ CPU_BASED_TPR_SHADOW);
+- } else {
+- printk("bad virtual-APIC page address\n");
+- dump_vmcs();
+ }
+ }
+
--- /dev/null
+From 3c25ab35fbc8526ac0c9b298e8a78e7ad7a55479 Mon Sep 17 00:00:00 2001
+From: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
+Date: Thu, 20 Jun 2019 11:46:51 +1000
+Subject: KVM: PPC: Book3S HV: Clear pending decrementer exceptions on nested guest entry
+
+From: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
+
+commit 3c25ab35fbc8526ac0c9b298e8a78e7ad7a55479 upstream.
+
+If we enter an L1 guest with a pending decrementer exception then this
+is cleared on guest exit if the guest has writtien a positive value
+into the decrementer (indicating that it handled the decrementer
+exception) since there is no other way to detect that the guest has
+handled the pending exception and that it should be dequeued. In the
+event that the L1 guest tries to run a nested (L2) guest immediately
+after this and the L2 guest decrementer is negative (which is loaded
+by L1 before making the H_ENTER_NESTED hcall), then the pending
+decrementer exception isn't cleared and the L2 entry is blocked since
+L1 has a pending exception, even though L1 may have already handled
+the exception and written a positive value for it's decrementer. This
+results in a loop of L1 trying to enter the L2 guest and L0 blocking
+the entry since L1 has an interrupt pending with the outcome being
+that L2 never gets to run and hangs.
+
+Fix this by clearing any pending decrementer exceptions when L1 makes
+the H_ENTER_NESTED hcall since it won't do this if it's decrementer
+has gone negative, and anyway it's decrementer has been communicated
+to L0 in the hdec_expires field and L0 will return control to L1 when
+this goes negative by delivering an H_DECREMENTER exception.
+
+Fixes: 95a6432ce903 ("KVM: PPC: Book3S HV: Streamlined guest entry/exit path on P9 for radix guests")
+Cc: stable@vger.kernel.org # v4.20+
+Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kvm/book3s_hv.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -4084,8 +4084,15 @@ int kvmhv_run_single_vcpu(struct kvm_run
+
+ preempt_enable();
+
+- /* cancel pending decrementer exception if DEC is now positive */
+- if (get_tb() < vcpu->arch.dec_expires && kvmppc_core_pending_dec(vcpu))
++ /*
++ * cancel pending decrementer exception if DEC is now positive, or if
++ * entering a nested guest in which case the decrementer is now owned
++ * by L2 and the L1 decrementer is provided in hdec_expires
++ */
++ if (kvmppc_core_pending_dec(vcpu) &&
++ ((get_tb() < vcpu->arch.dec_expires) ||
++ (trap == BOOK3S_INTERRUPT_SYSCALL &&
++ kvmppc_get_gpr(vcpu, 3) == H_ENTER_NESTED)))
+ kvmppc_core_dequeue_dec(vcpu);
+
+ trace_kvm_guest_exit(vcpu);
--- /dev/null
+From 3fefd1cd95df04da67c83c1cb93b663f04b3324f Mon Sep 17 00:00:00 2001
+From: Michael Neuling <mikey@neuling.org>
+Date: Thu, 20 Jun 2019 16:00:40 +1000
+Subject: KVM: PPC: Book3S HV: Fix CR0 setting in TM emulation
+
+From: Michael Neuling <mikey@neuling.org>
+
+commit 3fefd1cd95df04da67c83c1cb93b663f04b3324f upstream.
+
+When emulating tsr, treclaim and trechkpt, we incorrectly set CR0. The
+code currently sets:
+ CR0 <- 00 || MSR[TS]
+but according to the ISA it should be:
+ CR0 <- 0 || MSR[TS] || 0
+
+This fixes the bit shift to put the bits in the correct location.
+
+This is a data integrity issue as CR0 is corrupted.
+
+Fixes: 4bb3c7a0208f ("KVM: PPC: Book3S HV: Work around transactional memory bugs in POWER9")
+Cc: stable@vger.kernel.org # v4.17+
+Tested-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
+Signed-off-by: Michael Neuling <mikey@neuling.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kvm/book3s_hv_tm.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/powerpc/kvm/book3s_hv_tm.c
++++ b/arch/powerpc/kvm/book3s_hv_tm.c
+@@ -131,7 +131,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcp
+ }
+ /* Set CR0 to indicate previous transactional state */
+ vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
+- (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28);
++ (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
+ /* L=1 => tresume, L=0 => tsuspend */
+ if (instr & (1 << 21)) {
+ if (MSR_TM_SUSPENDED(msr))
+@@ -175,7 +175,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcp
+
+ /* Set CR0 to indicate previous transactional state */
+ vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
+- (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28);
++ (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
+ vcpu->arch.shregs.msr &= ~MSR_TS_MASK;
+ return RESUME_GUEST;
+
+@@ -205,7 +205,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcp
+
+ /* Set CR0 to indicate previous transactional state */
+ vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
+- (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28);
++ (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
+ vcpu->arch.shregs.msr = msr | MSR_TS_S;
+ return RESUME_GUEST;
+ }
--- /dev/null
+From 869537709ebf1dc865e75c3fc97b23f8acf37c16 Mon Sep 17 00:00:00 2001
+From: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
+Date: Thu, 20 Jun 2019 11:46:50 +1000
+Subject: KVM: PPC: Book3S HV: Signed extend decrementer value if not using large decrementer
+
+From: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
+
+commit 869537709ebf1dc865e75c3fc97b23f8acf37c16 upstream.
+
+On POWER9 the decrementer can operate in large decrementer mode where
+the decrementer is 56 bits and signed extended to 64 bits. When not
+operating in this mode the decrementer behaves as a 32 bit decrementer
+which is NOT signed extended (as on POWER8).
+
+Currently when reading a guest decrementer value we don't take into
+account whether the large decrementer is enabled or not, and this
+means the value will be incorrect when the guest is not using the
+large decrementer. Fix this by sign extending the value read when the
+guest isn't using the large decrementer.
+
+Fixes: 95a6432ce903 ("KVM: PPC: Book3S HV: Streamlined guest entry/exit path on P9 for radix guests")
+Cc: stable@vger.kernel.org # v4.20+
+Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kvm/book3s_hv.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -3568,6 +3568,8 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu
+
+ vcpu->arch.slb_max = 0;
+ dec = mfspr(SPRN_DEC);
++ if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */
++ dec = (s32) dec;
+ tb = mftb();
+ vcpu->arch.dec_expires = dec + tb;
+ vcpu->cpu = -1;
--- /dev/null
+From 4d763b168e9c5c366b05812c7bba7662e5ea3669 Mon Sep 17 00:00:00 2001
+From: Wanpeng Li <wanpengli@tencent.com>
+Date: Thu, 20 Jun 2019 17:00:02 +0800
+Subject: KVM: VMX: check CPUID before allowing read/write of IA32_XSS
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Wanpeng Li <wanpengli@tencent.com>
+
+commit 4d763b168e9c5c366b05812c7bba7662e5ea3669 upstream.
+
+Raise #GP when guest read/write IA32_XSS, but the CPUID bits
+say that it shouldn't exist.
+
+Fixes: 203000993de5 (kvm: vmx: add MSR logic for XSAVES)
+Reported-by: Xiaoyao Li <xiaoyao.li@linux.intel.com>
+Reported-by: Tao Xu <tao3.xu@intel.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Radim Krčmář <rkrcmar@redhat.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/vmx/vmx.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -1718,7 +1718,10 @@ static int vmx_get_msr(struct kvm_vcpu *
+ return vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index,
+ &msr_info->data);
+ case MSR_IA32_XSS:
+- if (!vmx_xsaves_supported())
++ if (!vmx_xsaves_supported() ||
++ (!msr_info->host_initiated &&
++ !(guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
++ guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))))
+ return 1;
+ msr_info->data = vcpu->arch.ia32_xss;
+ break;
+@@ -1929,7 +1932,10 @@ static int vmx_set_msr(struct kvm_vcpu *
+ return 1;
+ return vmx_set_vmx_msr(vcpu, msr_index, data);
+ case MSR_IA32_XSS:
+- if (!vmx_xsaves_supported())
++ if (!vmx_xsaves_supported() ||
++ (!msr_info->host_initiated &&
++ !(guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
++ guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))))
+ return 1;
+ /*
+ * The only supported bit as of Skylake is bit 8, but
--- /dev/null
+From beb8d93b3e423043e079ef3dda19dad7b28467a8 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+Date: Fri, 19 Apr 2019 22:50:55 -0700
+Subject: KVM: VMX: Fix handling of #MC that occurs during VM-Entry
+
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+
+commit beb8d93b3e423043e079ef3dda19dad7b28467a8 upstream.
+
+A previous fix to prevent KVM from consuming stale VMCS state after a
+failed VM-Entry inadvertantly blocked KVM's handling of machine checks
+that occur during VM-Entry.
+
+Per Intel's SDM, a #MC during VM-Entry is handled in one of three ways,
+depending on when the #MC is recognoized. As it pertains to this bug
+fix, the third case explicitly states EXIT_REASON_MCE_DURING_VMENTRY
+is handled like any other VM-Exit during VM-Entry, i.e. sets bit 31 to
+indicate the VM-Entry failed.
+
+If a machine-check event occurs during a VM entry, one of the following occurs:
+ - The machine-check event is handled as if it occurred before the VM entry:
+ ...
+ - The machine-check event is handled after VM entry completes:
+ ...
+ - A VM-entry failure occurs as described in Section 26.7. The basic
+ exit reason is 41, for "VM-entry failure due to machine-check event".
+
+Explicitly handle EXIT_REASON_MCE_DURING_VMENTRY as a one-off case in
+vmx_vcpu_run() instead of binning it into vmx_complete_atomic_exit().
+Doing so allows vmx_vcpu_run() to handle VMX_EXIT_REASONS_FAILED_VMENTRY
+in a sane fashion and also simplifies vmx_complete_atomic_exit() since
+VMCS.VM_EXIT_INTR_INFO is guaranteed to be fresh.
+
+Fixes: b060ca3b2e9e7 ("kvm: vmx: Handle VMLAUNCH/VMRESUME failure properly")
+Cc: stable@vger.kernel.org
+Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Reviewed-by: Jim Mattson <jmattson@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/vmx/vmx.c | 20 ++++++++------------
+ 1 file changed, 8 insertions(+), 12 deletions(-)
+
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -6102,28 +6102,21 @@ static void vmx_apicv_post_state_restore
+
+ static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
+ {
+- u32 exit_intr_info = 0;
+- u16 basic_exit_reason = (u16)vmx->exit_reason;
+-
+- if (!(basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY
+- || basic_exit_reason == EXIT_REASON_EXCEPTION_NMI))
++ if (vmx->exit_reason != EXIT_REASON_EXCEPTION_NMI)
+ return;
+
+- if (!(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
+- exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
+- vmx->exit_intr_info = exit_intr_info;
++ vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
+
+ /* if exit due to PF check for async PF */
+- if (is_page_fault(exit_intr_info))
++ if (is_page_fault(vmx->exit_intr_info))
+ vmx->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason();
+
+ /* Handle machine checks before interrupts are enabled */
+- if (basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY ||
+- is_machine_check(exit_intr_info))
++ if (is_machine_check(vmx->exit_intr_info))
+ kvm_machine_check();
+
+ /* We need to handle NMIs before interrupts are enabled */
+- if (is_nmi(exit_intr_info)) {
++ if (is_nmi(vmx->exit_intr_info)) {
+ kvm_before_interrupt(&vmx->vcpu);
+ asm("int $2");
+ kvm_after_interrupt(&vmx->vcpu);
+@@ -6526,6 +6519,9 @@ static void vmx_vcpu_run(struct kvm_vcpu
+ vmx->idt_vectoring_info = 0;
+
+ vmx->exit_reason = vmx->fail ? 0xdead : vmcs_read32(VM_EXIT_REASON);
++ if ((u16)vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY)
++ kvm_machine_check();
++
+ if (vmx->fail || (vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
+ return;
+
--- /dev/null
+From 6fc3977ccc5d3c22e851f2dce2d3ce2a0a843842 Mon Sep 17 00:00:00 2001
+From: Like Xu <like.xu@linux.intel.com>
+Date: Thu, 18 Jul 2019 13:35:14 +0800
+Subject: KVM: x86/vPMU: refine kvm_pmu err msg when event creation failed
+
+From: Like Xu <like.xu@linux.intel.com>
+
+commit 6fc3977ccc5d3c22e851f2dce2d3ce2a0a843842 upstream.
+
+If a perf_event creation fails due to any reason of the host perf
+subsystem, it has no chance to log the corresponding event for guest
+which may cause abnormal sampling data in guest result. In debug mode,
+this message helps to understand the state of vPMC and we may not
+limit the number of occurrences but not in a spamming style.
+
+Suggested-by: Joe Perches <joe@perches.com>
+Signed-off-by: Like Xu <like.xu@linux.intel.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/pmu.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/pmu.c
++++ b/arch/x86/kvm/pmu.c
+@@ -131,8 +131,8 @@ static void pmc_reprogram_counter(struct
+ intr ? kvm_perf_overflow_intr :
+ kvm_perf_overflow, pmc);
+ if (IS_ERR(event)) {
+- printk_once("kvm_pmu: event creation failed %ld\n",
+- PTR_ERR(event));
++ pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n",
++ PTR_ERR(event), pmc->idx);
+ return;
+ }
+
media-coda-remove-unbalanced-and-unneeded-mutex-unlock.patch
media-videobuf2-core-prevent-size-alignment-wrapping-buffer-size-to-0.patch
media-videobuf2-dma-sg-prevent-size-from-overflowing.patch
+kvm-nvmx-don-t-dump-vmcs-if-virtual-apic-page-can-t-be-mapped.patch
+kvm-nvmx-always-sync-guest_bndcfgs-when-it-comes-from-vmcs01.patch
+kvm-vmx-fix-handling-of-mc-that-occurs-during-vm-entry.patch
+kvm-vmx-check-cpuid-before-allowing-read-write-of-ia32_xss.patch
+kvm-ppc-book3s-hv-signed-extend-decrementer-value-if-not-using-large-decrementer.patch
+kvm-ppc-book3s-hv-clear-pending-decrementer-exceptions-on-nested-guest-entry.patch
+kvm-ppc-book3s-hv-fix-cr0-setting-in-tm-emulation.patch
+kvm-x86-vpmu-refine-kvm_pmu-err-msg-when-event-creation-failed.patch