--- /dev/null
+From cc5034a5d293dd620484d1d836aa16c6764a1c8c Mon Sep 17 00:00:00 2001
+From: "Gustavo A. R. Silva" <gustavo@embeddedor.com>
+Date: Fri, 15 Feb 2019 14:29:26 -0600
+Subject: drm/radeon/evergreen_cs: fix missing break in switch statement
+
+From: Gustavo A. R. Silva <gustavo@embeddedor.com>
+
+commit cc5034a5d293dd620484d1d836aa16c6764a1c8c upstream.
+
+Add missing break statement in order to prevent the code from falling
+through to case CB_TARGET_MASK.
+
+This bug was found thanks to the ongoing efforts to enable
+-Wimplicit-fallthrough.
+
+Fixes: dd220a00e8bd ("drm/radeon/kms: add support for streamout v7")
+Cc: stable@vger.kernel.org
+Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/radeon/evergreen_cs.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/gpu/drm/radeon/evergreen_cs.c
++++ b/drivers/gpu/drm/radeon/evergreen_cs.c
+@@ -1299,6 +1299,7 @@ static int evergreen_cs_handle_reg(struc
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
++ break;
+ case CB_TARGET_MASK:
+ track->cb_target_mask = radeon_get_ib_value(p, idx);
+ track->cb_dirty = true;
--- /dev/null
+From 152482580a1b0accb60676063a1ac57b2d12daf6 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+Date: Tue, 5 Feb 2019 12:54:17 -0800
+Subject: KVM: Call kvm_arch_memslots_updated() before updating memslots
+
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+
+commit 152482580a1b0accb60676063a1ac57b2d12daf6 upstream.
+
+kvm_arch_memslots_updated() is at this point in time an x86-specific
+hook for handling MMIO generation wraparound. x86 stashes 19 bits of
+the memslots generation number in its MMIO sptes in order to avoid
+full page fault walks for repeat faults on emulated MMIO addresses.
+Because only 19 bits are used, wrapping the MMIO generation number is
+possible, if unlikely. kvm_arch_memslots_updated() alerts x86 that
+the generation has changed so that it can invalidate all MMIO sptes in
+case the effective MMIO generation has wrapped so as to avoid using a
+stale spte, e.g. a (very) old spte that was created with generation==0.
+
+Given that the purpose of kvm_arch_memslots_updated() is to prevent
+consuming stale entries, it needs to be called before the new generation
+is propagated to memslots. Invalidating the MMIO sptes after updating
+memslots means that there is a window where a vCPU could dereference
+the new memslots generation, e.g. 0, and incorrectly reuse an old MMIO
+spte that was created with (pre-wrap) generation==0.
+
+Fixes: e59dbe09f8e6 ("KVM: Introduce kvm_arch_memslots_updated()")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/include/asm/kvm_host.h | 2 +-
+ arch/powerpc/include/asm/kvm_host.h | 2 +-
+ arch/s390/include/asm/kvm_host.h | 2 +-
+ arch/x86/include/asm/kvm_host.h | 2 +-
+ arch/x86/kvm/mmu.c | 4 ++--
+ arch/x86/kvm/x86.c | 4 ++--
+ include/linux/kvm_host.h | 2 +-
+ virt/kvm/arm/mmu.c | 2 +-
+ virt/kvm/kvm_main.c | 7 +++++--
+ 9 files changed, 15 insertions(+), 12 deletions(-)
+
+--- a/arch/mips/include/asm/kvm_host.h
++++ b/arch/mips/include/asm/kvm_host.h
+@@ -1132,7 +1132,7 @@ static inline void kvm_arch_hardware_uns
+ static inline void kvm_arch_sync_events(struct kvm *kvm) {}
+ static inline void kvm_arch_free_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
+-static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
++static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
+ static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
+ static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
+ static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
+--- a/arch/powerpc/include/asm/kvm_host.h
++++ b/arch/powerpc/include/asm/kvm_host.h
+@@ -809,7 +809,7 @@ struct kvm_vcpu_arch {
+ static inline void kvm_arch_hardware_disable(void) {}
+ static inline void kvm_arch_hardware_unsetup(void) {}
+ static inline void kvm_arch_sync_events(struct kvm *kvm) {}
+-static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
++static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
+ static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
+ static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
+ static inline void kvm_arch_exit(void) {}
+--- a/arch/s390/include/asm/kvm_host.h
++++ b/arch/s390/include/asm/kvm_host.h
+@@ -784,7 +784,7 @@ static inline void kvm_arch_vcpu_uninit(
+ static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
+ static inline void kvm_arch_free_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
+-static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
++static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
+ static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
+ static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *slot) {}
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1121,7 +1121,7 @@ void kvm_mmu_clear_dirty_pt_masked(struc
+ struct kvm_memory_slot *slot,
+ gfn_t gfn_offset, unsigned long mask);
+ void kvm_mmu_zap_all(struct kvm *kvm);
+-void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots);
++void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
+ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
+ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
+
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -5418,13 +5418,13 @@ static bool kvm_has_zapped_obsolete_page
+ return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
+ }
+
+-void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots)
++void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
+ {
+ /*
+ * The very rare case: if the generation-number is round,
+ * zap all shadow pages.
+ */
+- if (unlikely((slots->generation & MMIO_GEN_MASK) == 0)) {
++ if (unlikely((gen & MMIO_GEN_MASK) == 0)) {
+ kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
+ kvm_mmu_invalidate_zap_all_pages(kvm);
+ }
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -8524,13 +8524,13 @@ out_free:
+ return -ENOMEM;
+ }
+
+-void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
++void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
+ {
+ /*
+ * memslots->generation has been incremented.
+ * mmio generation may have reached its maximum value.
+ */
+- kvm_mmu_invalidate_mmio_sptes(kvm, slots);
++ kvm_mmu_invalidate_mmio_sptes(kvm, gen);
+ }
+
+ int kvm_arch_prepare_memory_region(struct kvm *kvm,
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -625,7 +625,7 @@ void kvm_arch_free_memslot(struct kvm *k
+ struct kvm_memory_slot *dont);
+ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
+ unsigned long npages);
+-void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots);
++void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
+ int kvm_arch_prepare_memory_region(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
+ const struct kvm_userspace_memory_region *mem,
+--- a/virt/kvm/arm/mmu.c
++++ b/virt/kvm/arm/mmu.c
+@@ -1955,7 +1955,7 @@ int kvm_arch_create_memslot(struct kvm *
+ return 0;
+ }
+
+-void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
++void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
+ {
+ }
+
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -856,6 +856,7 @@ static struct kvm_memslots *install_new_
+ int as_id, struct kvm_memslots *slots)
+ {
+ struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id);
++ u64 gen;
+
+ /*
+ * Set the low bit in the generation, which disables SPTE caching
+@@ -878,9 +879,11 @@ static struct kvm_memslots *install_new_
+ * space 0 will use generations 0, 4, 8, ... while * address space 1 will
+ * use generations 2, 6, 10, 14, ...
+ */
+- slots->generation += KVM_ADDRESS_SPACE_NUM * 2 - 1;
++ gen = slots->generation + KVM_ADDRESS_SPACE_NUM * 2 - 1;
+
+- kvm_arch_memslots_updated(kvm, slots);
++ kvm_arch_memslots_updated(kvm, gen);
++
++ slots->generation = gen;
+
+ return old_memslots;
+ }
--- /dev/null
+From 8570f9e881e3fde98801bb3a47eef84dd934d405 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+Date: Wed, 23 Jan 2019 14:39:24 -0800
+Subject: KVM: nVMX: Apply addr size mask to effective address for VMX instructions
+
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+
+commit 8570f9e881e3fde98801bb3a47eef84dd934d405 upstream.
+
+The address size of an instruction affects the effective address, not
+the virtual/linear address. The final address may still be truncated,
+e.g. to 32-bits outside of long mode, but that happens irrespective of
+the address size, e.g. a 32-bit address size can yield a 64-bit virtual
+address when using FS/GS with a non-zero base.
+
+Fixes: 064aea774768 ("KVM: nVMX: Decoding memory operands of VMX instructions")
+Cc: stable@vger.kernel.org
+Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/vmx.c | 25 +++++++++++++++++++++++--
+ 1 file changed, 23 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -7455,20 +7455,41 @@ static int get_vmx_mem_address(struct kv
+ if (index_is_valid)
+ off += kvm_register_read(vcpu, index_reg)<<scaling;
+ vmx_get_segment(vcpu, &s, seg_reg);
+- *ret = s.base + off;
+
++ /*
++ * The effective address, i.e. @off, of a memory operand is truncated
++ * based on the address size of the instruction. Note that this is
++ * the *effective address*, i.e. the address prior to accounting for
++ * the segment's base.
++ */
+ if (addr_size == 1) /* 32 bit */
+- *ret &= 0xffffffff;
++ off &= 0xffffffff;
++ else if (addr_size == 0) /* 16 bit */
++ off &= 0xffff;
+
+ /* Checks for #GP/#SS exceptions. */
+ exn = false;
+ if (is_long_mode(vcpu)) {
++ /*
++ * The virtual/linear address is never truncated in 64-bit
++ * mode, e.g. a 32-bit address size can yield a 64-bit virtual
++ * address when using FS/GS with a non-zero base.
++ */
++ *ret = s.base + off;
++
+ /* Long mode: #GP(0)/#SS(0) if the memory address is in a
+ * non-canonical form. This is the only check on the memory
+ * destination for long mode!
+ */
+ exn = is_noncanonical_address(*ret, vcpu);
+ } else if (is_protmode(vcpu)) {
++ /*
++ * When not in long mode, the virtual/linear address is
++ * unconditionally truncated to 32 bits regardless of the
++ * address size.
++ */
++ *ret = (s.base + off) & 0xffffffff;
++
+ /* Protected mode: apply checks for segment validity in the
+ * following order:
+ * - segment type check (#GP(0) may be thrown)
--- /dev/null
+From 34333cc6c2cb021662fd32e24e618d1b86de95bf Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+Date: Wed, 23 Jan 2019 14:39:25 -0800
+Subject: KVM: nVMX: Ignore limit checks on VMX instructions using flat segments
+
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+
+commit 34333cc6c2cb021662fd32e24e618d1b86de95bf upstream.
+
+Regarding segments with a limit==0xffffffff, the SDM officially states:
+
+ When the effective limit is FFFFFFFFH (4 GBytes), these accesses may
+ or may not cause the indicated exceptions. Behavior is
+ implementation-specific and may vary from one execution to another.
+
+In practice, all CPUs that support VMX ignore limit checks for "flat
+segments", i.e. an expand-up data or code segment with base=0 and
+limit=0xffffffff. This is subtly different than wrapping the effective
+address calculation based on the address size, as the flat segment
+behavior also applies to accesses that would wrap the 4g boundary, e.g.
+a 4-byte access starting at 0xffffffff will access linear addresses
+0xffffffff, 0x0, 0x1 and 0x2.
+
+Fixes: f9eb4af67c9d ("KVM: nVMX: VMX instructions: add checks for #GP/#SS exceptions")
+Cc: stable@vger.kernel.org
+Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/vmx.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -7513,10 +7513,16 @@ static int get_vmx_mem_address(struct kv
+ /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
+ */
+ exn = (s.unusable != 0);
+- /* Protected mode: #GP(0)/#SS(0) if the memory
+- * operand is outside the segment limit.
++
++ /*
++ * Protected mode: #GP(0)/#SS(0) if the memory operand is
++ * outside the segment limit. All CPUs that support VMX ignore
++ * limit checks for flat segments, i.e. segments with base==0,
++ * limit==0xffffffff and of type expand-up data or code.
+ */
+- exn = exn || (off + sizeof(u64) > s.limit);
++ if (!(s.base == 0 && s.limit == 0xffffffff &&
++ ((s.type & 8) || !(s.type & 4))))
++ exn = exn || (off + sizeof(u64) > s.limit);
+ }
+ if (exn) {
+ kvm_queue_exception_e(vcpu,
--- /dev/null
+From 946c522b603f281195af1df91837a1d4d1eb3bc9 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+Date: Wed, 23 Jan 2019 14:39:23 -0800
+Subject: KVM: nVMX: Sign extend displacements of VMX instr's mem operands
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+
+commit 946c522b603f281195af1df91837a1d4d1eb3bc9 upstream.
+
+The VMCS.EXIT_QUALIFCATION field reports the displacements of memory
+operands for various instructions, including VMX instructions, as a
+naturally sized unsigned value, but masks the value by the addr size,
+e.g. given a ModRM encoded as -0x28(%ebp), the -0x28 displacement is
+reported as 0xffffffd8 for a 32-bit address size. Despite some weird
+wording regarding sign extension, the SDM explicitly states that bits
+beyond the instructions address size are undefined:
+
+ In all cases, bits of this field beyond the instruction’s address
+ size are undefined.
+
+Failure to sign extend the displacement results in KVM incorrectly
+treating a negative displacement as a large positive displacement when
+the address size of the VMX instruction is smaller than KVM's native
+size, e.g. a 32-bit address size on a 64-bit KVM.
+
+The very original decoding, added by commit 064aea774768 ("KVM: nVMX:
+Decoding memory operands of VMX instructions"), sort of modeled sign
+extension by truncating the final virtual/linear address for a 32-bit
+address size. I.e. it messed up the effective address but made it work
+by adjusting the final address.
+
+When segmentation checks were added, the truncation logic was kept
+as-is and no sign extension logic was introduced. In other words, it
+kept calculating the wrong effective address while mostly generating
+the correct virtual/linear address. As the effective address is what's
+used in the segment limit checks, this results in KVM incorreclty
+injecting #GP/#SS faults due to non-existent segment violations when
+a nested VMM uses negative displacements with an address size smaller
+than KVM's native address size.
+
+Using the -0x28(%ebp) example, an EBP value of 0x1000 will result in
+KVM using 0x100000fd8 as the effective address when checking for a
+segment limit violation. This causes a 100% failure rate when running
+a 32-bit KVM build as L1 on top of a 64-bit KVM L0.
+
+Fixes: f9eb4af67c9d ("KVM: nVMX: VMX instructions: add checks for #GP/#SS exceptions")
+Cc: stable@vger.kernel.org
+Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/vmx.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -7446,6 +7446,10 @@ static int get_vmx_mem_address(struct kv
+ /* Addr = segment_base + offset */
+ /* offset = base + [index * scale] + displacement */
+ off = exit_qualification; /* holds the displacement */
++ if (addr_size == 1)
++ off = (gva_t)sign_extend64(off, 31);
++ else if (addr_size == 0)
++ off = (gva_t)sign_extend64(off, 15);
+ if (base_is_valid)
+ off += kvm_register_read(vcpu, base_reg);
+ if (index_is_valid)
--- /dev/null
+From e1359e2beb8b0a1188abc997273acbaedc8ee791 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+Date: Tue, 5 Feb 2019 13:01:12 -0800
+Subject: KVM: x86/mmu: Detect MMIO generation wrap in any address space
+
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+
+commit e1359e2beb8b0a1188abc997273acbaedc8ee791 upstream.
+
+The check to detect a wrap of the MMIO generation explicitly looks for a
+generation number of zero. Now that unique memslots generation numbers
+are assigned to each address space, only address space 0 will get a
+generation number of exactly zero when wrapping. E.g. when address
+space 1 goes from 0x7fffe to 0x80002, the MMIO generation number will
+wrap to 0x2. Adjust the MMIO generation to strip the address space
+modifier prior to checking for a wrap.
+
+Fixes: 4bd518f1598d ("KVM: use separate generations for each address space")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/mmu.c | 21 +++++++++++++++++++--
+ 1 file changed, 19 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -5420,11 +5420,28 @@ static bool kvm_has_zapped_obsolete_page
+
+ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
+ {
++ gen &= MMIO_GEN_MASK;
++
++ /*
++ * Shift to eliminate the "update in-progress" flag, which isn't
++ * included in the spte's generation number.
++ */
++ gen >>= 1;
++
++ /*
++ * Generation numbers are incremented in multiples of the number of
++ * address spaces in order to provide unique generations across all
++ * address spaces. Strip what is effectively the address space
++ * modifier prior to checking for a wrap of the MMIO generation so
++ * that a wrap in any address space is detected.
++ */
++ gen &= ~((u64)KVM_ADDRESS_SPACE_NUM - 1);
++
+ /*
+- * The very rare case: if the generation-number is round,
++ * The very rare case: if the MMIO generation number has wrapped,
+ * zap all shadow pages.
+ */
+- if (unlikely((gen & MMIO_GEN_MASK) == 0)) {
++ if (unlikely(gen == 0)) {
+ kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
+ kvm_mmu_invalidate_zap_all_pages(kvm);
+ }
--- /dev/null
+From ddfd1730fd829743e41213e32ccc8b4aa6dc8325 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+Date: Tue, 5 Feb 2019 13:01:13 -0800
+Subject: KVM: x86/mmu: Do not cache MMIO accesses while memslots are in flux
+
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+
+commit ddfd1730fd829743e41213e32ccc8b4aa6dc8325 upstream.
+
+When installing new memslots, KVM sets bit 0 of the generation number to
+indicate that an update is in-progress. Until the update is complete,
+there are no guarantees as to whether a vCPU will see the old or the new
+memslots. Explicity prevent caching MMIO accesses so as to avoid using
+an access cached from the old memslots after the new memslots have been
+installed.
+
+Note that it is unclear whether or not disabling caching during the
+update window is strictly necessary as there is no definitive
+documentation as to what ordering guarantees KVM provides with respect
+to updating memslots. That being said, the MMIO spte code does not
+allow reusing sptes created while an update is in-progress, and the
+associated documentation explicitly states:
+
+ We do not want to use an MMIO sptes created with an odd generation
+ number, ... If KVM is unlucky and creates an MMIO spte while the
+ low bit is 1, the next access to the spte will always be a cache miss.
+
+At the very least, disabling the per-vCPU MMIO cache during updates will
+make its behavior consistent with the MMIO spte behavior and
+documentation.
+
+Fixes: 56f17dd3fbc4 ("kvm: x86: fix stale mmio cache bug")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/x86.h | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/x86.h
++++ b/arch/x86/kvm/x86.h
+@@ -136,6 +136,11 @@ static inline bool emul_is_noncanonical_
+ static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
+ gva_t gva, gfn_t gfn, unsigned access)
+ {
++ u64 gen = kvm_memslots(vcpu->kvm)->generation;
++
++ if (unlikely(gen & 1))
++ return;
++
+ /*
+ * If this is a shadow nested page table, the "GVA" is
+ * actually a nGPA.
+@@ -143,7 +148,7 @@ static inline void vcpu_cache_mmio_info(
+ vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK;
+ vcpu->arch.access = access;
+ vcpu->arch.mmio_gfn = gfn;
+- vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation;
++ vcpu->arch.mmio_gen = gen;
+ }
+
+ static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
--- /dev/null
+From 2e0fe66e0a136252f4d89dbbccdcb26deb867eb8 Mon Sep 17 00:00:00 2001
+From: Steve Longerbeam <slongerbeam@gmail.com>
+Date: Mon, 21 Jan 2019 21:35:50 -0200
+Subject: media: imx: csi: Disable CSI immediately after last EOF
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Steve Longerbeam <slongerbeam@gmail.com>
+
+commit 2e0fe66e0a136252f4d89dbbccdcb26deb867eb8 upstream.
+
+Disable the CSI immediately after receiving the last EOF before stream
+off (and thus before disabling the IDMA channel). Do this by moving the
+wait for EOF completion into a new function csi_idmac_wait_last_eof().
+
+This fixes a complete system hard lockup on the SabreAuto when streaming
+from the ADV7180, by repeatedly sending a stream off immediately followed
+by stream on:
+
+while true; do v4l2-ctl -d4 --stream-mmap --stream-count=3; done
+
+Eventually this either causes the system lockup or EOF timeouts at all
+subsequent stream on, until a system reset.
+
+The lockup occurs when disabling the IDMA channel at stream off. Disabling
+the CSI before disabling the IDMA channel appears to be a reliable fix for
+the hard lockup.
+
+Fixes: 4a34ec8e470cb ("[media] media: imx: Add CSI subdev driver")
+
+Reported-by: Gaël PORTAY <gael.portay@collabora.com>
+Signed-off-by: Steve Longerbeam <slongerbeam@gmail.com>
+Cc: stable@vger.kernel.org # for 4.13 and up
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/media/imx/imx-media-csi.c | 17 ++++++++++++++---
+ 1 file changed, 14 insertions(+), 3 deletions(-)
+
+--- a/drivers/staging/media/imx/imx-media-csi.c
++++ b/drivers/staging/media/imx/imx-media-csi.c
+@@ -538,7 +538,7 @@ out_put_ipu:
+ return ret;
+ }
+
+-static void csi_idmac_stop(struct csi_priv *priv)
++static void csi_idmac_wait_last_eof(struct csi_priv *priv)
+ {
+ unsigned long flags;
+ int ret;
+@@ -555,7 +555,10 @@ static void csi_idmac_stop(struct csi_pr
+ &priv->last_eof_comp, msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT));
+ if (ret == 0)
+ v4l2_warn(&priv->sd, "wait last EOF timeout\n");
++}
+
++static void csi_idmac_stop(struct csi_priv *priv)
++{
+ devm_free_irq(priv->dev, priv->eof_irq, priv);
+ devm_free_irq(priv->dev, priv->nfb4eof_irq, priv);
+
+@@ -681,6 +684,16 @@ idmac_stop:
+
+ static void csi_stop(struct csi_priv *priv)
+ {
++ if (priv->dest == IPU_CSI_DEST_IDMAC)
++ csi_idmac_wait_last_eof(priv);
++
++ /*
++ * Disable the CSI asap, after syncing with the last EOF.
++ * Doing so after the IDMA channel is disabled has shown to
++ * create hard system-wide hangs.
++ */
++ ipu_csi_disable(priv->csi);
++
+ if (priv->dest == IPU_CSI_DEST_IDMAC) {
+ csi_idmac_stop(priv);
+
+@@ -688,8 +701,6 @@ static void csi_stop(struct csi_priv *pr
+ if (priv->fim)
+ imx_media_fim_set_stream(priv->fim, NULL, false);
+ }
+-
+- ipu_csi_disable(priv->csi);
+ }
+
+ static const struct csi_skip_desc csi_skip[12] = {
--- /dev/null
+From 4bc1ab41eee9d02ad2483bf8f51a7b72e3504eba Mon Sep 17 00:00:00 2001
+From: Steve Longerbeam <slongerbeam@gmail.com>
+Date: Mon, 21 Jan 2019 21:35:51 -0200
+Subject: media: imx: csi: Stop upstream before disabling IDMA channel
+
+From: Steve Longerbeam <slongerbeam@gmail.com>
+
+commit 4bc1ab41eee9d02ad2483bf8f51a7b72e3504eba upstream.
+
+Move upstream stream off to just after receiving the last EOF completion
+and disabling the CSI (and thus before disabling the IDMA channel) in
+csi_stop(). For symmetry also move upstream stream on to beginning of
+csi_start().
+
+Doing this makes csi_s_stream() more symmetric with prp_s_stream() which
+will require the same change to fix a hard lockup.
+
+Signed-off-by: Steve Longerbeam <slongerbeam@gmail.com>
+Cc: stable@vger.kernel.org # for 4.13 and up
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/media/imx/imx-media-csi.c | 25 +++++++++++++------------
+ 1 file changed, 13 insertions(+), 12 deletions(-)
+
+--- a/drivers/staging/media/imx/imx-media-csi.c
++++ b/drivers/staging/media/imx/imx-media-csi.c
+@@ -648,10 +648,16 @@ static int csi_start(struct csi_priv *pr
+ usleep_range(delay_usec, delay_usec + 1000);
+ }
+
++ /* start upstream */
++ ret = v4l2_subdev_call(priv->src_sd, video, s_stream, 1);
++ ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
++ if (ret)
++ return ret;
++
+ if (priv->dest == IPU_CSI_DEST_IDMAC) {
+ ret = csi_idmac_start(priv);
+ if (ret)
+- return ret;
++ goto stop_upstream;
+ }
+
+ ret = csi_setup(priv);
+@@ -679,6 +685,8 @@ fim_off:
+ idmac_stop:
+ if (priv->dest == IPU_CSI_DEST_IDMAC)
+ csi_idmac_stop(priv);
++stop_upstream:
++ v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
+ return ret;
+ }
+
+@@ -694,6 +702,9 @@ static void csi_stop(struct csi_priv *pr
+ */
+ ipu_csi_disable(priv->csi);
+
++ /* stop upstream */
++ v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
++
+ if (priv->dest == IPU_CSI_DEST_IDMAC) {
+ csi_idmac_stop(priv);
+
+@@ -861,23 +872,13 @@ static int csi_s_stream(struct v4l2_subd
+ goto update_count;
+
+ if (enable) {
+- /* upstream must be started first, before starting CSI */
+- ret = v4l2_subdev_call(priv->src_sd, video, s_stream, 1);
+- ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
+- if (ret)
+- goto out;
+-
+ dev_dbg(priv->dev, "stream ON\n");
+ ret = csi_start(priv);
+- if (ret) {
+- v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
++ if (ret)
+ goto out;
+- }
+ } else {
+ dev_dbg(priv->dev, "stream OFF\n");
+- /* CSI must be stopped first, then stop upstream */
+ csi_stop(priv);
+- v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
+ }
+
+ update_count:
--- /dev/null
+From a19c22677377b87e4354f7306f46ad99bc982a9f Mon Sep 17 00:00:00 2001
+From: Steve Longerbeam <slongerbeam@gmail.com>
+Date: Mon, 21 Jan 2019 21:35:52 -0200
+Subject: media: imx: prpencvf: Stop upstream before disabling IDMA channel
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Steve Longerbeam <slongerbeam@gmail.com>
+
+commit a19c22677377b87e4354f7306f46ad99bc982a9f upstream.
+
+Upstream must be stopped immediately after receiving the last EOF and
+before disabling the IDMA channel. This can be accomplished by moving
+upstream stream off to just after receiving the last EOF completion in
+prp_stop(). For symmetry also move upstream stream on to end of
+prp_start().
+
+This fixes a complete system hard lockup on the SabreAuto when streaming
+from the ADV7180, by repeatedly sending a stream off immediately followed
+by stream on:
+
+while true; do v4l2-ctl -d1 --stream-mmap --stream-count=3; done
+
+Eventually this either causes the system lockup or EOF timeouts at all
+subsequent stream on, until a system reset.
+
+The lockup occurs when disabling the IDMA channel at stream off. Stopping
+the video data stream entering the IDMA channel before disabling the
+channel itself appears to be a reliable fix for the hard lockup.
+
+Fixes: f0d9c8924e2c3 ("[media] media: imx: Add IC subdev drivers")
+
+Reported-by: Gaël PORTAY <gael.portay@collabora.com>
+Tested-by: Gaël PORTAY <gael.portay@collabora.com>
+Signed-off-by: Steve Longerbeam <slongerbeam@gmail.com>
+Cc: stable@vger.kernel.org # for 4.13 and up
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/media/imx/imx-ic-prpencvf.c | 26 +++++++++++++++++---------
+ 1 file changed, 17 insertions(+), 9 deletions(-)
+
+--- a/drivers/staging/media/imx/imx-ic-prpencvf.c
++++ b/drivers/staging/media/imx/imx-ic-prpencvf.c
+@@ -676,12 +676,23 @@ static int prp_start(struct prp_priv *pr
+ goto out_free_nfb4eof_irq;
+ }
+
++ /* start upstream */
++ ret = v4l2_subdev_call(priv->src_sd, video, s_stream, 1);
++ ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
++ if (ret) {
++ v4l2_err(&ic_priv->sd,
++ "upstream stream on failed: %d\n", ret);
++ goto out_free_eof_irq;
++ }
++
+ /* start the EOF timeout timer */
+ mod_timer(&priv->eof_timeout_timer,
+ jiffies + msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT));
+
+ return 0;
+
++out_free_eof_irq:
++ devm_free_irq(ic_priv->dev, priv->eof_irq, priv);
+ out_free_nfb4eof_irq:
+ devm_free_irq(ic_priv->dev, priv->nfb4eof_irq, priv);
+ out_unsetup:
+@@ -713,6 +724,12 @@ static void prp_stop(struct prp_priv *pr
+ if (ret == 0)
+ v4l2_warn(&ic_priv->sd, "wait last EOF timeout\n");
+
++ /* stop upstream */
++ ret = v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
++ if (ret && ret != -ENOIOCTLCMD)
++ v4l2_warn(&ic_priv->sd,
++ "upstream stream off failed: %d\n", ret);
++
+ devm_free_irq(ic_priv->dev, priv->eof_irq, priv);
+ devm_free_irq(ic_priv->dev, priv->nfb4eof_irq, priv);
+
+@@ -1144,15 +1161,6 @@ static int prp_s_stream(struct v4l2_subd
+ if (ret)
+ goto out;
+
+- /* start/stop upstream */
+- ret = v4l2_subdev_call(priv->src_sd, video, s_stream, enable);
+- ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
+- if (ret) {
+- if (enable)
+- prp_stop(priv);
+- goto out;
+- }
+-
+ update_count:
+ priv->stream_count += enable ? 1 : -1;
+ if (priv->stream_count < 0)
--- /dev/null
+From 9dd0627d8d62a7ddb001a75f63942d92b5336561 Mon Sep 17 00:00:00 2001
+From: Sakari Ailus <sakari.ailus@linux.intel.com>
+Date: Wed, 30 Jan 2019 05:09:41 -0500
+Subject: media: uvcvideo: Avoid NULL pointer dereference at the end of streaming
+
+From: Sakari Ailus <sakari.ailus@linux.intel.com>
+
+commit 9dd0627d8d62a7ddb001a75f63942d92b5336561 upstream.
+
+The UVC video driver converts the timestamp from hardware specific unit
+to one known by the kernel at the time when the buffer is dequeued. This
+is fine in general, but the streamoff operation consists of the
+following steps (among other things):
+
+1. uvc_video_clock_cleanup --- the hardware clock sample array is
+ released and the pointer to the array is set to NULL,
+
+2. buffers in active state are returned to the user and
+
+3. buf_finish callback is called on buffers that are prepared.
+ buf_finish includes calling uvc_video_clock_update that accesses the
+ hardware clock sample array.
+
+The above is serialised by a queue specific mutex. Address the problem
+by skipping the clock conversion if the hardware clock sample array is
+already released.
+
+Fixes: 9c0863b1cc48 ("[media] vb2: call buf_finish from __queue_cancel")
+
+Reported-by: Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+Tested-by: Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/media/usb/uvc/uvc_video.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/drivers/media/usb/uvc/uvc_video.c
++++ b/drivers/media/usb/uvc/uvc_video.c
+@@ -638,6 +638,14 @@ void uvc_video_clock_update(struct uvc_s
+ if (!uvc_hw_timestamps_param)
+ return;
+
++ /*
++ * We will get called from __vb2_queue_cancel() if there are buffers
++ * done but not dequeued by the user, but the sample array has already
++ * been released at that time. Just bail out in that case.
++ */
++ if (!clock->samples)
++ return;
++
+ spin_lock_irqsave(&clock->lock, flags);
+
+ if (clock->count < clock->size)
--- /dev/null
+From adc589d2a20808fb99d46a78175cd023f2040338 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Lucas=20A=2E=20M=2E=20Magalh=C3=A3es?= <lucmaga@gmail.com>
+Date: Mon, 21 Jan 2019 20:05:01 -0500
+Subject: media: vimc: Add vimc-streamer for stream control
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Lucas A. M. Magalhães <lucmaga@gmail.com>
+
+commit adc589d2a20808fb99d46a78175cd023f2040338 upstream.
+
+Add a linear pipeline logic for the stream control. It's created by
+walking backwards on the entity graph. When the stream starts it will
+simply loop through the pipeline calling the respective process_frame
+function of each entity.
+
+Fixes: f2fe89061d797 ("vimc: Virtual Media Controller core, capture
+and sensor")
+
+Cc: stable@vger.kernel.org # for v4.20
+Signed-off-by: Lucas A. M. Magalhães <lucmaga@gmail.com>
+Acked-by: Helen Koike <helen.koike@collabora.com>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+[hverkuil-cisco@xs4all.nl: fixed small space-after-tab issue in the patch]
+Signed-off-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/media/platform/vimc/Makefile | 3
+ drivers/media/platform/vimc/vimc-capture.c | 18 +-
+ drivers/media/platform/vimc/vimc-common.c | 35 -----
+ drivers/media/platform/vimc/vimc-common.h | 15 --
+ drivers/media/platform/vimc/vimc-debayer.c | 26 ---
+ drivers/media/platform/vimc/vimc-scaler.c | 28 ----
+ drivers/media/platform/vimc/vimc-sensor.c | 56 +-------
+ drivers/media/platform/vimc/vimc-streamer.c | 188 ++++++++++++++++++++++++++++
+ drivers/media/platform/vimc/vimc-streamer.h | 38 +++++
+ 9 files changed, 260 insertions(+), 147 deletions(-)
+
+--- a/drivers/media/platform/vimc/Makefile
++++ b/drivers/media/platform/vimc/Makefile
+@@ -5,6 +5,7 @@ vimc_common-objs := vimc-common.o
+ vimc_debayer-objs := vimc-debayer.o
+ vimc_scaler-objs := vimc-scaler.o
+ vimc_sensor-objs := vimc-sensor.o
++vimc_streamer-objs := vimc-streamer.o
+
+ obj-$(CONFIG_VIDEO_VIMC) += vimc.o vimc_capture.o vimc_common.o vimc-debayer.o \
+- vimc_scaler.o vimc_sensor.o
++ vimc_scaler.o vimc_sensor.o vimc_streamer.o
+--- a/drivers/media/platform/vimc/vimc-capture.c
++++ b/drivers/media/platform/vimc/vimc-capture.c
+@@ -23,6 +23,7 @@
+ #include <media/videobuf2-vmalloc.h>
+
+ #include "vimc-common.h"
++#include "vimc-streamer.h"
+
+ #define VIMC_CAP_DRV_NAME "vimc-capture"
+
+@@ -43,7 +44,7 @@ struct vimc_cap_device {
+ spinlock_t qlock;
+ struct mutex lock;
+ u32 sequence;
+- struct media_pipeline pipe;
++ struct vimc_stream stream;
+ };
+
+ static const struct v4l2_pix_format fmt_default = {
+@@ -247,14 +248,13 @@ static int vimc_cap_start_streaming(stru
+ vcap->sequence = 0;
+
+ /* Start the media pipeline */
+- ret = media_pipeline_start(entity, &vcap->pipe);
++ ret = media_pipeline_start(entity, &vcap->stream.pipe);
+ if (ret) {
+ vimc_cap_return_all_buffers(vcap, VB2_BUF_STATE_QUEUED);
+ return ret;
+ }
+
+- /* Enable streaming from the pipe */
+- ret = vimc_pipeline_s_stream(&vcap->vdev.entity, 1);
++ ret = vimc_streamer_s_stream(&vcap->stream, &vcap->ved, 1);
+ if (ret) {
+ media_pipeline_stop(entity);
+ vimc_cap_return_all_buffers(vcap, VB2_BUF_STATE_QUEUED);
+@@ -272,8 +272,7 @@ static void vimc_cap_stop_streaming(stru
+ {
+ struct vimc_cap_device *vcap = vb2_get_drv_priv(vq);
+
+- /* Disable streaming from the pipe */
+- vimc_pipeline_s_stream(&vcap->vdev.entity, 0);
++ vimc_streamer_s_stream(&vcap->stream, &vcap->ved, 0);
+
+ /* Stop the media pipeline */
+ media_pipeline_stop(&vcap->vdev.entity);
+@@ -354,8 +353,8 @@ static void vimc_cap_comp_unbind(struct
+ kfree(vcap);
+ }
+
+-static void vimc_cap_process_frame(struct vimc_ent_device *ved,
+- struct media_pad *sink, const void *frame)
++static void *vimc_cap_process_frame(struct vimc_ent_device *ved,
++ const void *frame)
+ {
+ struct vimc_cap_device *vcap = container_of(ved, struct vimc_cap_device,
+ ved);
+@@ -369,7 +368,7 @@ static void vimc_cap_process_frame(struc
+ typeof(*vimc_buf), list);
+ if (!vimc_buf) {
+ spin_unlock(&vcap->qlock);
+- return;
++ return ERR_PTR(-EAGAIN);
+ }
+
+ /* Remove this entry from the list */
+@@ -390,6 +389,7 @@ static void vimc_cap_process_frame(struc
+ vb2_set_plane_payload(&vimc_buf->vb2.vb2_buf, 0,
+ vcap->format.sizeimage);
+ vb2_buffer_done(&vimc_buf->vb2.vb2_buf, VB2_BUF_STATE_DONE);
++ return NULL;
+ }
+
+ static int vimc_cap_comp_bind(struct device *comp, struct device *master,
+--- a/drivers/media/platform/vimc/vimc-common.c
++++ b/drivers/media/platform/vimc/vimc-common.c
+@@ -207,41 +207,6 @@ const struct vimc_pix_map *vimc_pix_map_
+ }
+ EXPORT_SYMBOL_GPL(vimc_pix_map_by_pixelformat);
+
+-int vimc_propagate_frame(struct media_pad *src, const void *frame)
+-{
+- struct media_link *link;
+-
+- if (!(src->flags & MEDIA_PAD_FL_SOURCE))
+- return -EINVAL;
+-
+- /* Send this frame to all sink pads that are direct linked */
+- list_for_each_entry(link, &src->entity->links, list) {
+- if (link->source == src &&
+- (link->flags & MEDIA_LNK_FL_ENABLED)) {
+- struct vimc_ent_device *ved = NULL;
+- struct media_entity *entity = link->sink->entity;
+-
+- if (is_media_entity_v4l2_subdev(entity)) {
+- struct v4l2_subdev *sd =
+- container_of(entity, struct v4l2_subdev,
+- entity);
+- ved = v4l2_get_subdevdata(sd);
+- } else if (is_media_entity_v4l2_video_device(entity)) {
+- struct video_device *vdev =
+- container_of(entity,
+- struct video_device,
+- entity);
+- ved = video_get_drvdata(vdev);
+- }
+- if (ved && ved->process_frame)
+- ved->process_frame(ved, link->sink, frame);
+- }
+- }
+-
+- return 0;
+-}
+-EXPORT_SYMBOL_GPL(vimc_propagate_frame);
+-
+ /* Helper function to allocate and initialize pads */
+ struct media_pad *vimc_pads_init(u16 num_pads, const unsigned long *pads_flag)
+ {
+--- a/drivers/media/platform/vimc/vimc-common.h
++++ b/drivers/media/platform/vimc/vimc-common.h
+@@ -108,24 +108,13 @@ struct vimc_pix_map {
+ struct vimc_ent_device {
+ struct media_entity *ent;
+ struct media_pad *pads;
+- void (*process_frame)(struct vimc_ent_device *ved,
+- struct media_pad *sink, const void *frame);
++ void * (*process_frame)(struct vimc_ent_device *ved,
++ const void *frame);
+ void (*vdev_get_format)(struct vimc_ent_device *ved,
+ struct v4l2_pix_format *fmt);
+ };
+
+ /**
+- * vimc_propagate_frame - propagate a frame through the topology
+- *
+- * @src: the source pad where the frame is being originated
+- * @frame: the frame to be propagated
+- *
+- * This function will call the process_frame callback from the vimc_ent_device
+- * struct of the nodes directly connected to the @src pad
+- */
+-int vimc_propagate_frame(struct media_pad *src, const void *frame);
+-
+-/**
+ * vimc_pads_init - initialize pads
+ *
+ * @num_pads: number of pads to initialize
+--- a/drivers/media/platform/vimc/vimc-debayer.c
++++ b/drivers/media/platform/vimc/vimc-debayer.c
+@@ -320,7 +320,6 @@ static void vimc_deb_set_rgb_mbus_fmt_rg
+ static int vimc_deb_s_stream(struct v4l2_subdev *sd, int enable)
+ {
+ struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
+- int ret;
+
+ if (enable) {
+ const struct vimc_pix_map *vpix;
+@@ -350,22 +349,10 @@ static int vimc_deb_s_stream(struct v4l2
+ if (!vdeb->src_frame)
+ return -ENOMEM;
+
+- /* Turn the stream on in the subdevices directly connected */
+- ret = vimc_pipeline_s_stream(&vdeb->sd.entity, 1);
+- if (ret) {
+- vfree(vdeb->src_frame);
+- vdeb->src_frame = NULL;
+- return ret;
+- }
+ } else {
+ if (!vdeb->src_frame)
+ return 0;
+
+- /* Disable streaming from the pipe */
+- ret = vimc_pipeline_s_stream(&vdeb->sd.entity, 0);
+- if (ret)
+- return ret;
+-
+ vfree(vdeb->src_frame);
+ vdeb->src_frame = NULL;
+ }
+@@ -479,9 +466,8 @@ static void vimc_deb_calc_rgb_sink(struc
+ }
+ }
+
+-static void vimc_deb_process_frame(struct vimc_ent_device *ved,
+- struct media_pad *sink,
+- const void *sink_frame)
++static void *vimc_deb_process_frame(struct vimc_ent_device *ved,
++ const void *sink_frame)
+ {
+ struct vimc_deb_device *vdeb = container_of(ved, struct vimc_deb_device,
+ ved);
+@@ -490,7 +476,7 @@ static void vimc_deb_process_frame(struc
+
+ /* If the stream in this node is not active, just return */
+ if (!vdeb->src_frame)
+- return;
++ return ERR_PTR(-EINVAL);
+
+ for (i = 0; i < vdeb->sink_fmt.height; i++)
+ for (j = 0; j < vdeb->sink_fmt.width; j++) {
+@@ -498,12 +484,8 @@ static void vimc_deb_process_frame(struc
+ vdeb->set_rgb_src(vdeb, i, j, rgb);
+ }
+
+- /* Propagate the frame through all source pads */
+- for (i = 1; i < vdeb->sd.entity.num_pads; i++) {
+- struct media_pad *pad = &vdeb->sd.entity.pads[i];
++ return vdeb->src_frame;
+
+- vimc_propagate_frame(pad, vdeb->src_frame);
+- }
+ }
+
+ static void vimc_deb_comp_unbind(struct device *comp, struct device *master,
+--- a/drivers/media/platform/vimc/vimc-scaler.c
++++ b/drivers/media/platform/vimc/vimc-scaler.c
+@@ -216,7 +216,6 @@ static const struct v4l2_subdev_pad_ops
+ static int vimc_sca_s_stream(struct v4l2_subdev *sd, int enable)
+ {
+ struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
+- int ret;
+
+ if (enable) {
+ const struct vimc_pix_map *vpix;
+@@ -244,22 +243,10 @@ static int vimc_sca_s_stream(struct v4l2
+ if (!vsca->src_frame)
+ return -ENOMEM;
+
+- /* Turn the stream on in the subdevices directly connected */
+- ret = vimc_pipeline_s_stream(&vsca->sd.entity, 1);
+- if (ret) {
+- vfree(vsca->src_frame);
+- vsca->src_frame = NULL;
+- return ret;
+- }
+ } else {
+ if (!vsca->src_frame)
+ return 0;
+
+- /* Disable streaming from the pipe */
+- ret = vimc_pipeline_s_stream(&vsca->sd.entity, 0);
+- if (ret)
+- return ret;
+-
+ vfree(vsca->src_frame);
+ vsca->src_frame = NULL;
+ }
+@@ -345,26 +332,19 @@ static void vimc_sca_fill_src_frame(cons
+ vimc_sca_scale_pix(vsca, i, j, sink_frame);
+ }
+
+-static void vimc_sca_process_frame(struct vimc_ent_device *ved,
+- struct media_pad *sink,
+- const void *sink_frame)
++static void *vimc_sca_process_frame(struct vimc_ent_device *ved,
++ const void *sink_frame)
+ {
+ struct vimc_sca_device *vsca = container_of(ved, struct vimc_sca_device,
+ ved);
+- unsigned int i;
+
+ /* If the stream in this node is not active, just return */
+ if (!vsca->src_frame)
+- return;
++ return ERR_PTR(-EINVAL);
+
+ vimc_sca_fill_src_frame(vsca, sink_frame);
+
+- /* Propagate the frame through all source pads */
+- for (i = 1; i < vsca->sd.entity.num_pads; i++) {
+- struct media_pad *pad = &vsca->sd.entity.pads[i];
+-
+- vimc_propagate_frame(pad, vsca->src_frame);
+- }
++ return vsca->src_frame;
+ };
+
+ static void vimc_sca_comp_unbind(struct device *comp, struct device *master,
+--- a/drivers/media/platform/vimc/vimc-sensor.c
++++ b/drivers/media/platform/vimc/vimc-sensor.c
+@@ -16,8 +16,6 @@
+ */
+
+ #include <linux/component.h>
+-#include <linux/freezer.h>
+-#include <linux/kthread.h>
+ #include <linux/module.h>
+ #include <linux/platform_device.h>
+ #include <linux/v4l2-mediabus.h>
+@@ -197,38 +195,27 @@ static const struct v4l2_subdev_pad_ops
+ .set_fmt = vimc_sen_set_fmt,
+ };
+
+-static int vimc_sen_tpg_thread(void *data)
++static void *vimc_sen_process_frame(struct vimc_ent_device *ved,
++ const void *sink_frame)
+ {
+- struct vimc_sen_device *vsen = data;
+- unsigned int i;
+-
+- set_freezable();
+- set_current_state(TASK_UNINTERRUPTIBLE);
+-
+- for (;;) {
+- try_to_freeze();
+- if (kthread_should_stop())
+- break;
+-
+- tpg_fill_plane_buffer(&vsen->tpg, 0, 0, vsen->frame);
+-
+- /* Send the frame to all source pads */
+- for (i = 0; i < vsen->sd.entity.num_pads; i++)
+- vimc_propagate_frame(&vsen->sd.entity.pads[i],
+- vsen->frame);
++ struct vimc_sen_device *vsen = container_of(ved, struct vimc_sen_device,
++ ved);
++ const struct vimc_pix_map *vpix;
++ unsigned int frame_size;
+
+- /* 60 frames per second */
+- schedule_timeout(HZ/60);
+- }
++ /* Calculate the frame size */
++ vpix = vimc_pix_map_by_code(vsen->mbus_format.code);
++ frame_size = vsen->mbus_format.width * vpix->bpp *
++ vsen->mbus_format.height;
+
+- return 0;
++ tpg_fill_plane_buffer(&vsen->tpg, 0, 0, vsen->frame);
++ return vsen->frame;
+ }
+
+ static int vimc_sen_s_stream(struct v4l2_subdev *sd, int enable)
+ {
+ struct vimc_sen_device *vsen =
+ container_of(sd, struct vimc_sen_device, sd);
+- int ret;
+
+ if (enable) {
+ const struct vimc_pix_map *vpix;
+@@ -254,26 +241,8 @@ static int vimc_sen_s_stream(struct v4l2
+ /* configure the test pattern generator */
+ vimc_sen_tpg_s_format(vsen);
+
+- /* Initialize the image generator thread */
+- vsen->kthread_sen = kthread_run(vimc_sen_tpg_thread, vsen,
+- "%s-sen", vsen->sd.v4l2_dev->name);
+- if (IS_ERR(vsen->kthread_sen)) {
+- dev_err(vsen->dev, "%s: kernel_thread() failed\n",
+- vsen->sd.name);
+- vfree(vsen->frame);
+- vsen->frame = NULL;
+- return PTR_ERR(vsen->kthread_sen);
+- }
+ } else {
+- if (!vsen->kthread_sen)
+- return 0;
+-
+- /* Stop image generator */
+- ret = kthread_stop(vsen->kthread_sen);
+- if (ret)
+- return ret;
+
+- vsen->kthread_sen = NULL;
+ vfree(vsen->frame);
+ vsen->frame = NULL;
+ return 0;
+@@ -325,6 +294,7 @@ static int vimc_sen_comp_bind(struct dev
+ if (ret)
+ goto err_free_vsen;
+
++ vsen->ved.process_frame = vimc_sen_process_frame;
+ dev_set_drvdata(comp, &vsen->ved);
+ vsen->dev = comp;
+
+--- /dev/null
++++ b/drivers/media/platform/vimc/vimc-streamer.c
+@@ -0,0 +1,188 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * vimc-streamer.c Virtual Media Controller Driver
++ *
++ * Copyright (C) 2018 Lucas A. M. Magalhães <lucmaga@gmail.com>
++ *
++ */
++
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/freezer.h>
++#include <linux/kthread.h>
++
++#include "vimc-streamer.h"
++
++/**
++ * vimc_get_source_entity - get the entity connected with the first sink pad
++ *
++ * @ent: reference media_entity
++ *
++ * Helper function that returns the media entity containing the source pad
++ * linked with the first sink pad from the given media entity pad list.
++ */
++static struct media_entity *vimc_get_source_entity(struct media_entity *ent)
++{
++ struct media_pad *pad;
++ int i;
++
++ for (i = 0; i < ent->num_pads; i++) {
++ if (ent->pads[i].flags & MEDIA_PAD_FL_SOURCE)
++ continue;
++ pad = media_entity_remote_pad(&ent->pads[i]);
++ return pad ? pad->entity : NULL;
++ }
++ return NULL;
++}
++
++/*
++ * vimc_streamer_pipeline_terminate - Disable stream in all ved in stream
++ *
++ * @stream: the pointer to the stream structure with the pipeline to be
++ * disabled.
++ *
++ * Calls s_stream to disable the stream in each entity of the pipeline
++ *
++ */
++static void vimc_streamer_pipeline_terminate(struct vimc_stream *stream)
++{
++ struct media_entity *entity;
++ struct v4l2_subdev *sd;
++
++ while (stream->pipe_size) {
++ stream->pipe_size--;
++ entity = stream->ved_pipeline[stream->pipe_size]->ent;
++ entity = vimc_get_source_entity(entity);
++ stream->ved_pipeline[stream->pipe_size] = NULL;
++
++ if (!is_media_entity_v4l2_subdev(entity))
++ continue;
++
++ sd = media_entity_to_v4l2_subdev(entity);
++ v4l2_subdev_call(sd, video, s_stream, 0);
++ }
++}
++
++/*
++ * vimc_streamer_pipeline_init - initializes the stream structure
++ *
++ * @stream: the pointer to the stream structure to be initialized
++ * @ved: the pointer to the vimc entity initializing the stream
++ *
++ * Initializes the stream structure. Walks through the entity graph to
++ * construct the pipeline used later on the streamer thread.
++ * Calls s_stream to enable stream in all entities of the pipeline.
++ */
++static int vimc_streamer_pipeline_init(struct vimc_stream *stream,
++ struct vimc_ent_device *ved)
++{
++ struct media_entity *entity;
++ struct video_device *vdev;
++ struct v4l2_subdev *sd;
++ int ret = 0;
++
++ stream->pipe_size = 0;
++ while (stream->pipe_size < VIMC_STREAMER_PIPELINE_MAX_SIZE) {
++ if (!ved) {
++ vimc_streamer_pipeline_terminate(stream);
++ return -EINVAL;
++ }
++ stream->ved_pipeline[stream->pipe_size++] = ved;
++
++ entity = vimc_get_source_entity(ved->ent);
++ /* Check if the end of the pipeline was reached*/
++ if (!entity)
++ return 0;
++
++ if (is_media_entity_v4l2_subdev(entity)) {
++ sd = media_entity_to_v4l2_subdev(entity);
++ ret = v4l2_subdev_call(sd, video, s_stream, 1);
++ if (ret && ret != -ENOIOCTLCMD) {
++ vimc_streamer_pipeline_terminate(stream);
++ return ret;
++ }
++ ved = v4l2_get_subdevdata(sd);
++ } else {
++ vdev = container_of(entity,
++ struct video_device,
++ entity);
++ ved = video_get_drvdata(vdev);
++ }
++ }
++
++ vimc_streamer_pipeline_terminate(stream);
++ return -EINVAL;
++}
++
++static int vimc_streamer_thread(void *data)
++{
++ struct vimc_stream *stream = data;
++ int i;
++
++ set_freezable();
++ set_current_state(TASK_UNINTERRUPTIBLE);
++
++ for (;;) {
++ try_to_freeze();
++ if (kthread_should_stop())
++ break;
++
++ for (i = stream->pipe_size - 1; i >= 0; i--) {
++ stream->frame = stream->ved_pipeline[i]->process_frame(
++ stream->ved_pipeline[i],
++ stream->frame);
++ if (!stream->frame)
++ break;
++ if (IS_ERR(stream->frame))
++ break;
++ }
++ //wait for 60hz
++ schedule_timeout(HZ / 60);
++ }
++
++ return 0;
++}
++
++int vimc_streamer_s_stream(struct vimc_stream *stream,
++ struct vimc_ent_device *ved,
++ int enable)
++{
++ int ret;
++
++ if (!stream || !ved)
++ return -EINVAL;
++
++ if (enable) {
++ if (stream->kthread)
++ return 0;
++
++ ret = vimc_streamer_pipeline_init(stream, ved);
++ if (ret)
++ return ret;
++
++ stream->kthread = kthread_run(vimc_streamer_thread, stream,
++ "vimc-streamer thread");
++
++ if (IS_ERR(stream->kthread))
++ return PTR_ERR(stream->kthread);
++
++ } else {
++ if (!stream->kthread)
++ return 0;
++
++ ret = kthread_stop(stream->kthread);
++ if (ret)
++ return ret;
++
++ stream->kthread = NULL;
++
++ vimc_streamer_pipeline_terminate(stream);
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(vimc_streamer_s_stream);
++
++MODULE_DESCRIPTION("Virtual Media Controller Driver (VIMC) Streamer");
++MODULE_AUTHOR("Lucas A. M. Magalhães <lucmaga@gmail.com>");
++MODULE_LICENSE("GPL");
+--- /dev/null
++++ b/drivers/media/platform/vimc/vimc-streamer.h
+@@ -0,0 +1,38 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * vimc-streamer.h Virtual Media Controller Driver
++ *
++ * Copyright (C) 2018 Lucas A. M. Magalhães <lucmaga@gmail.com>
++ *
++ */
++
++#ifndef _VIMC_STREAMER_H_
++#define _VIMC_STREAMER_H_
++
++#include <media/media-device.h>
++
++#include "vimc-common.h"
++
++#define VIMC_STREAMER_PIPELINE_MAX_SIZE 16
++
++struct vimc_stream {
++ struct media_pipeline pipe;
++ struct vimc_ent_device *ved_pipeline[VIMC_STREAMER_PIPELINE_MAX_SIZE];
++ unsigned int pipe_size;
++ u8 *frame;
++ struct task_struct *kthread;
++};
++
++/**
++ * vimc_streamer_s_streamer - start/stop the stream
++ *
++ * @stream: the pointer to the stream to start or stop
++ * @ved: The last entity of the streamer pipeline
++ * @enable: any non-zero number start the stream, zero stop
++ *
++ */
++int vimc_streamer_s_stream(struct vimc_stream *stream,
++ struct vimc_ent_device *ved,
++ int enable);
++
++#endif //_VIMC_STREAMER_H_
tpm-tpm_crb-avoid-unaligned-reads-in-crb_recv.patch
tpm-unify-the-send-callback-behaviour.patch
rcu-do-rcu-gp-kthread-self-wakeup-from-softirq-and-interrupt.patch
+media-imx-prpencvf-stop-upstream-before-disabling-idma-channel.patch
+media-uvcvideo-avoid-null-pointer-dereference-at-the-end-of-streaming.patch
+media-vimc-add-vimc-streamer-for-stream-control.patch
+media-imx-csi-disable-csi-immediately-after-last-eof.patch
+media-imx-csi-stop-upstream-before-disabling-idma-channel.patch
+drm-radeon-evergreen_cs-fix-missing-break-in-switch-statement.patch
+kvm-call-kvm_arch_memslots_updated-before-updating-memslots.patch
+kvm-x86-mmu-detect-mmio-generation-wrap-in-any-address-space.patch
+kvm-x86-mmu-do-not-cache-mmio-accesses-while-memslots-are-in-flux.patch
+kvm-nvmx-sign-extend-displacements-of-vmx-instr-s-mem-operands.patch
+kvm-nvmx-apply-addr-size-mask-to-effective-address-for-vmx-instructions.patch
+kvm-nvmx-ignore-limit-checks-on-vmx-instructions-using-flat-segments.patch