--- /dev/null
+From 10117450735c7a7c0858095fb46a860e7037cb9a Mon Sep 17 00:00:00 2001
+From: "ndesaulniers@google.com" <ndesaulniers@google.com>
+Date: Thu, 24 Jan 2019 16:52:59 -0800
+Subject: drm/amd/display: add -msse2 to prevent Clang from emitting libcalls to undefined SW FP routines
+
+From: ndesaulniers@google.com <ndesaulniers@google.com>
+
+commit 10117450735c7a7c0858095fb46a860e7037cb9a upstream.
+
+arch/x86/Makefile disables SSE and SSE2 for the whole kernel. The
+AMDGPU drivers modified in this patch re-enable SSE but not SSE2. Turn
+on SSE2 to support emitting double precision floating point instructions
+rather than calls to non-existent (usually available from gcc_s or
+compiler_rt) floating point helper routines.
+
+Link: https://gcc.gnu.org/onlinedocs/gccint/Soft-float-library-routines.html
+Link: https://github.com/ClangBuiltLinux/linux/issues/327
+Cc: stable@vger.kernel.org # 4.19
+Reported-by: S, Shirish <Shirish.S@amd.com>
+Reported-by: Matthias Kaehlcke <mka@google.com>
+Suggested-by: James Y Knight <jyknight@google.com>
+Suggested-by: Nathan Chancellor <natechancellor@gmail.com>
+Signed-off-by: Nick Desaulniers <ndesaulniers@google.com>
+Tested-by: Guenter Roeck <linux@roeck-us.net>
+Tested-by: Matthias Kaehlcke <mka@chromium.org>
+Tested-by: Nathan Chancellor <natechancellor@gmail.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/display/dc/calcs/Makefile | 2 +-
+ drivers/gpu/drm/amd/display/dc/dml/Makefile | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/amd/display/dc/calcs/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
+@@ -30,7 +30,7 @@ else ifneq ($(call cc-option, -mstack-al
+ cc_stack_align := -mstack-alignment=16
+ endif
+
+-calcs_ccflags := -mhard-float -msse $(cc_stack_align)
++calcs_ccflags := -mhard-float -msse -msse2 $(cc_stack_align)
+
+ CFLAGS_dcn_calcs.o := $(calcs_ccflags)
+ CFLAGS_dcn_calc_auto.o := $(calcs_ccflags)
+--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
+@@ -30,7 +30,7 @@ else ifneq ($(call cc-option, -mstack-al
+ cc_stack_align := -mstack-alignment=16
+ endif
+
+-dml_ccflags := -mhard-float -msse $(cc_stack_align)
++dml_ccflags := -mhard-float -msse -msse2 $(cc_stack_align)
+
+ CFLAGS_display_mode_lib.o := $(dml_ccflags)
+ CFLAGS_display_pipe_clocks.o := $(dml_ccflags)
--- /dev/null
+From 59d3191f14dc18881fec1172c7096b7863622803 Mon Sep 17 00:00:00 2001
+From: Harry Wentland <harry.wentland@amd.com>
+Date: Wed, 30 Jan 2019 15:45:18 -0500
+Subject: drm/amd/display: don't call dm_pp_ function from an fpu block
+
+From: Harry Wentland <harry.wentland@amd.com>
+
+commit 59d3191f14dc18881fec1172c7096b7863622803 upstream.
+
+Powerplay functions called from dm_pp_* functions tend to do a
+mutex_lock which isn't safe to do inside a kernel_fpu_begin/end block as
+those will disable/enable preemption.
+
+Rearrange the dm_pp_get_clock_levels_by_type_with_voltage calls to make
+sure they happen outside of kernel_fpu_begin/end.
+
+Cc: stable@vger.kernel.org
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
++++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+@@ -1347,12 +1347,12 @@ void dcn_bw_update_from_pplib(struct dc
+ struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0};
+ bool res;
+
+- kernel_fpu_begin();
+-
+ /* TODO: This is not the proper way to obtain fabric_and_dram_bandwidth, should be min(fclk, memclk) */
+ res = dm_pp_get_clock_levels_by_type_with_voltage(
+ ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks);
+
++ kernel_fpu_begin();
++
+ if (res)
+ res = verify_clock_values(&fclks);
+
+@@ -1371,9 +1371,13 @@ void dcn_bw_update_from_pplib(struct dc
+ } else
+ BREAK_TO_DEBUGGER();
+
++ kernel_fpu_end();
++
+ res = dm_pp_get_clock_levels_by_type_with_voltage(
+ ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks);
+
++ kernel_fpu_begin();
++
+ if (res)
+ res = verify_clock_values(&dcfclks);
+
--- /dev/null
+From f5742ec36422a39b57f0256e4847f61b3c432f8c Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Mon, 25 Feb 2019 16:44:36 +0800
+Subject: drm/amd/powerplay: correct power reading on fiji
+
+From: Evan Quan <evan.quan@amd.com>
+
+commit f5742ec36422a39b57f0256e4847f61b3c432f8c upstream.
+
+Set sampling period as 500ms to provide a smooth power
+reading output. Also, correct the register for power
+reading.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Feifei Xu <Feifei.Xu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+@@ -3487,14 +3487,14 @@ static int smu7_get_gpu_power(struct pp_
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+- ixSMU_PM_STATUS_94, 0);
++ ixSMU_PM_STATUS_95, 0);
+
+ for (i = 0; i < 10; i++) {
+- mdelay(1);
++ mdelay(500);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample);
+ tmp = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC,
+- ixSMU_PM_STATUS_94);
++ ixSMU_PM_STATUS_95);
+ if (tmp != 0)
+ break;
+ }
--- /dev/null
+From 78de14c23e031420aa5f61973583635eccd6cd2a Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Noralf=20Tr=C3=B8nnes?= <noralf@tronnes.org>
+Date: Fri, 25 Jan 2019 16:03:00 +0100
+Subject: drm/fb-helper: generic: Fix drm_fbdev_client_restore()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Noralf Trønnes <noralf@tronnes.org>
+
+commit 78de14c23e031420aa5f61973583635eccd6cd2a upstream.
+
+If fbdev setup has failed, lastclose will give a NULL pointer deref:
+
+[ 77.794295] [drm:drm_lastclose]
+[ 77.794414] [drm:drm_lastclose] driver lastclose completed
+[ 77.794660] Unable to handle kernel NULL pointer dereference at virtual address 00000014
+[ 77.809460] pgd = b376b71b
+[ 77.818275] [00000014] *pgd=175ba831, *pte=00000000, *ppte=00000000
+[ 77.830813] Internal error: Oops: 17 [#1] ARM
+[ 77.840963] Modules linked in: mi0283qt mipi_dbi tinydrm raspberrypi_hwmon gpio_backlight backlight snd_bcm2835(C) bcm2835_rng rng_core
+[ 77.865203] CPU: 0 PID: 527 Comm: lt-modetest Tainted: G C 5.0.0-rc1+ #1
+[ 77.879525] Hardware name: BCM2835
+[ 77.889185] PC is at restore_fbdev_mode+0x20/0x164
+[ 77.900261] LR is at drm_fb_helper_restore_fbdev_mode_unlocked+0x54/0x9c
+[ 78.002446] Process lt-modetest (pid: 527, stack limit = 0x7a3d5c14)
+[ 78.291030] Backtrace:
+[ 78.300815] [<c04f2d0c>] (restore_fbdev_mode) from [<c04f4708>] (drm_fb_helper_restore_fbdev_mode_unlocked+0x54/0x9c)
+[ 78.319095] r9:d8a8a288 r8:d891acf0 r7:d7697910 r6:00000000 r5:d891ac00 r4:d891ac00
+[ 78.334432] [<c04f46b4>] (drm_fb_helper_restore_fbdev_mode_unlocked) from [<c04f47e8>] (drm_fbdev_client_restore+0x18/0x20)
+[ 78.353296] r8:d76978c0 r7:d7697910 r6:d7697950 r5:d7697800 r4:d891ac00 r3:c04f47d0
+[ 78.368689] [<c04f47d0>] (drm_fbdev_client_restore) from [<c051b6b4>] (drm_client_dev_restore+0x7c/0xc0)
+[ 78.385982] [<c051b638>] (drm_client_dev_restore) from [<c04f8fd0>] (drm_lastclose+0xc4/0xd4)
+[ 78.402332] r8:d76978c0 r7:d7471080 r6:c0e0c088 r5:d8a85e00 r4:d7697800
+[ 78.416688] [<c04f8f0c>] (drm_lastclose) from [<c04f9088>] (drm_release+0xa8/0x10c)
+[ 78.431929] r5:d8a85e00 r4:d7697800
+[ 78.442989] [<c04f8fe0>] (drm_release) from [<c02640c4>] (__fput+0x104/0x1c8)
+[ 78.457740] r8:d5ccea10 r7:d96cfb10 r6:00000008 r5:d74c1b90 r4:d8a8a280
+[ 78.472043] [<c0263fc0>] (__fput) from [<c02641ec>] (____fput+0x18/0x1c)
+[ 78.486363] r10:00000006 r9:d7722000 r8:c01011c4 r7:00000000 r6:c0ebac6c r5:d892a340
+[ 78.501869] r4:d8a8a280
+[ 78.512002] [<c02641d4>] (____fput) from [<c013ef1c>] (task_work_run+0x98/0xac)
+[ 78.527186] [<c013ee84>] (task_work_run) from [<c010cc54>] (do_work_pending+0x4f8/0x570)
+[ 78.543238] r7:d7722030 r6:00000004 r5:d7723fb0 r4:00000000
+[ 78.556825] [<c010c75c>] (do_work_pending) from [<c0101034>] (slow_work_pending+0xc/0x20)
+[ 78.674256] ---[ end trace 70d3a60cf739be3b ]---
+
+Fix by using drm_fb_helper_lastclose() which checks if fbdev is in use.
+
+Fixes: 9060d7f49376 ("drm/fb-helper: Finish the generic fbdev emulation")
+Cc: stable@vger.kernel.org
+Signed-off-by: Noralf Trønnes <noralf@tronnes.org>
+Reviewed-by: Gerd Hoffmann <kraxel@redhat.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20190125150300.33268-1-noralf@tronnes.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/drm_fb_helper.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/drm_fb_helper.c
++++ b/drivers/gpu/drm/drm_fb_helper.c
+@@ -3156,9 +3156,7 @@ static void drm_fbdev_client_unregister(
+
+ static int drm_fbdev_client_restore(struct drm_client_dev *client)
+ {
+- struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+-
+- drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
++ drm_fb_helper_lastclose(client->dev);
+
+ return 0;
+ }
--- /dev/null
+From cc5034a5d293dd620484d1d836aa16c6764a1c8c Mon Sep 17 00:00:00 2001
+From: "Gustavo A. R. Silva" <gustavo@embeddedor.com>
+Date: Fri, 15 Feb 2019 14:29:26 -0600
+Subject: drm/radeon/evergreen_cs: fix missing break in switch statement
+
+From: Gustavo A. R. Silva <gustavo@embeddedor.com>
+
+commit cc5034a5d293dd620484d1d836aa16c6764a1c8c upstream.
+
+Add missing break statement in order to prevent the code from falling
+through to case CB_TARGET_MASK.
+
+This bug was found thanks to the ongoing efforts to enable
+-Wimplicit-fallthrough.
+
+Fixes: dd220a00e8bd ("drm/radeon/kms: add support for streamout v7")
+Cc: stable@vger.kernel.org
+Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/radeon/evergreen_cs.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/gpu/drm/radeon/evergreen_cs.c
++++ b/drivers/gpu/drm/radeon/evergreen_cs.c
+@@ -1299,6 +1299,7 @@ static int evergreen_cs_handle_reg(struc
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
++ break;
+ case CB_TARGET_MASK:
+ track->cb_target_mask = radeon_get_ib_value(p, idx);
+ track->cb_dirty = true;
--- /dev/null
+From 152482580a1b0accb60676063a1ac57b2d12daf6 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+Date: Tue, 5 Feb 2019 12:54:17 -0800
+Subject: KVM: Call kvm_arch_memslots_updated() before updating memslots
+
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+
+commit 152482580a1b0accb60676063a1ac57b2d12daf6 upstream.
+
+kvm_arch_memslots_updated() is at this point in time an x86-specific
+hook for handling MMIO generation wraparound. x86 stashes 19 bits of
+the memslots generation number in its MMIO sptes in order to avoid
+full page fault walks for repeat faults on emulated MMIO addresses.
+Because only 19 bits are used, wrapping the MMIO generation number is
+possible, if unlikely. kvm_arch_memslots_updated() alerts x86 that
+the generation has changed so that it can invalidate all MMIO sptes in
+case the effective MMIO generation has wrapped so as to avoid using a
+stale spte, e.g. a (very) old spte that was created with generation==0.
+
+Given that the purpose of kvm_arch_memslots_updated() is to prevent
+consuming stale entries, it needs to be called before the new generation
+is propagated to memslots. Invalidating the MMIO sptes after updating
+memslots means that there is a window where a vCPU could dereference
+the new memslots generation, e.g. 0, and incorrectly reuse an old MMIO
+spte that was created with (pre-wrap) generation==0.
+
+Fixes: e59dbe09f8e6 ("KVM: Introduce kvm_arch_memslots_updated()")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/include/asm/kvm_host.h | 2 +-
+ arch/powerpc/include/asm/kvm_host.h | 2 +-
+ arch/s390/include/asm/kvm_host.h | 2 +-
+ arch/x86/include/asm/kvm_host.h | 2 +-
+ arch/x86/kvm/mmu.c | 4 ++--
+ arch/x86/kvm/x86.c | 4 ++--
+ include/linux/kvm_host.h | 2 +-
+ virt/kvm/arm/mmu.c | 2 +-
+ virt/kvm/kvm_main.c | 7 +++++--
+ 9 files changed, 15 insertions(+), 12 deletions(-)
+
+--- a/arch/mips/include/asm/kvm_host.h
++++ b/arch/mips/include/asm/kvm_host.h
+@@ -1131,7 +1131,7 @@ static inline void kvm_arch_hardware_uns
+ static inline void kvm_arch_sync_events(struct kvm *kvm) {}
+ static inline void kvm_arch_free_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
+-static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
++static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
+ static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
+ static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
+ static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
+--- a/arch/powerpc/include/asm/kvm_host.h
++++ b/arch/powerpc/include/asm/kvm_host.h
+@@ -822,7 +822,7 @@ struct kvm_vcpu_arch {
+ static inline void kvm_arch_hardware_disable(void) {}
+ static inline void kvm_arch_hardware_unsetup(void) {}
+ static inline void kvm_arch_sync_events(struct kvm *kvm) {}
+-static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
++static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
+ static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
+ static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
+ static inline void kvm_arch_exit(void) {}
+--- a/arch/s390/include/asm/kvm_host.h
++++ b/arch/s390/include/asm/kvm_host.h
+@@ -865,7 +865,7 @@ static inline void kvm_arch_vcpu_uninit(
+ static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
+ static inline void kvm_arch_free_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
+-static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
++static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
+ static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
+ static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *slot) {}
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1194,7 +1194,7 @@ void kvm_mmu_clear_dirty_pt_masked(struc
+ struct kvm_memory_slot *slot,
+ gfn_t gfn_offset, unsigned long mask);
+ void kvm_mmu_zap_all(struct kvm *kvm);
+-void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots);
++void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
+ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
+ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
+
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -5774,13 +5774,13 @@ static bool kvm_has_zapped_obsolete_page
+ return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
+ }
+
+-void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots)
++void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
+ {
+ /*
+ * The very rare case: if the generation-number is round,
+ * zap all shadow pages.
+ */
+- if (unlikely((slots->generation & MMIO_GEN_MASK) == 0)) {
++ if (unlikely((gen & MMIO_GEN_MASK) == 0)) {
+ kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
+ kvm_mmu_invalidate_zap_all_pages(kvm);
+ }
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -9108,13 +9108,13 @@ out_free:
+ return -ENOMEM;
+ }
+
+-void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
++void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
+ {
+ /*
+ * memslots->generation has been incremented.
+ * mmio generation may have reached its maximum value.
+ */
+- kvm_mmu_invalidate_mmio_sptes(kvm, slots);
++ kvm_mmu_invalidate_mmio_sptes(kvm, gen);
+ }
+
+ int kvm_arch_prepare_memory_region(struct kvm *kvm,
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -633,7 +633,7 @@ void kvm_arch_free_memslot(struct kvm *k
+ struct kvm_memory_slot *dont);
+ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
+ unsigned long npages);
+-void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots);
++void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
+ int kvm_arch_prepare_memory_region(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
+ const struct kvm_userspace_memory_region *mem,
+--- a/virt/kvm/arm/mmu.c
++++ b/virt/kvm/arm/mmu.c
+@@ -2154,7 +2154,7 @@ int kvm_arch_create_memslot(struct kvm *
+ return 0;
+ }
+
+-void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
++void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
+ {
+ }
+
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -873,6 +873,7 @@ static struct kvm_memslots *install_new_
+ int as_id, struct kvm_memslots *slots)
+ {
+ struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id);
++ u64 gen;
+
+ /*
+ * Set the low bit in the generation, which disables SPTE caching
+@@ -895,9 +896,11 @@ static struct kvm_memslots *install_new_
+ * space 0 will use generations 0, 4, 8, ... while * address space 1 will
+ * use generations 2, 6, 10, 14, ...
+ */
+- slots->generation += KVM_ADDRESS_SPACE_NUM * 2 - 1;
++ gen = slots->generation + KVM_ADDRESS_SPACE_NUM * 2 - 1;
+
+- kvm_arch_memslots_updated(kvm, slots);
++ kvm_arch_memslots_updated(kvm, gen);
++
++ slots->generation = gen;
+
+ return old_memslots;
+ }
--- /dev/null
+From 8570f9e881e3fde98801bb3a47eef84dd934d405 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+Date: Wed, 23 Jan 2019 14:39:24 -0800
+Subject: KVM: nVMX: Apply addr size mask to effective address for VMX instructions
+
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+
+commit 8570f9e881e3fde98801bb3a47eef84dd934d405 upstream.
+
+The address size of an instruction affects the effective address, not
+the virtual/linear address. The final address may still be truncated,
+e.g. to 32-bits outside of long mode, but that happens irrespective of
+the address size, e.g. a 32-bit address size can yield a 64-bit virtual
+address when using FS/GS with a non-zero base.
+
+Fixes: 064aea774768 ("KVM: nVMX: Decoding memory operands of VMX instructions")
+Cc: stable@vger.kernel.org
+Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/vmx.c | 25 +++++++++++++++++++++++--
+ 1 file changed, 23 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -8193,20 +8193,41 @@ static int get_vmx_mem_address(struct kv
+ if (index_is_valid)
+ off += kvm_register_read(vcpu, index_reg)<<scaling;
+ vmx_get_segment(vcpu, &s, seg_reg);
+- *ret = s.base + off;
+
++ /*
++ * The effective address, i.e. @off, of a memory operand is truncated
++ * based on the address size of the instruction. Note that this is
++ * the *effective address*, i.e. the address prior to accounting for
++ * the segment's base.
++ */
+ if (addr_size == 1) /* 32 bit */
+- *ret &= 0xffffffff;
++ off &= 0xffffffff;
++ else if (addr_size == 0) /* 16 bit */
++ off &= 0xffff;
+
+ /* Checks for #GP/#SS exceptions. */
+ exn = false;
+ if (is_long_mode(vcpu)) {
++ /*
++ * The virtual/linear address is never truncated in 64-bit
++ * mode, e.g. a 32-bit address size can yield a 64-bit virtual
++ * address when using FS/GS with a non-zero base.
++ */
++ *ret = s.base + off;
++
+ /* Long mode: #GP(0)/#SS(0) if the memory address is in a
+ * non-canonical form. This is the only check on the memory
+ * destination for long mode!
+ */
+ exn = is_noncanonical_address(*ret, vcpu);
+ } else if (is_protmode(vcpu)) {
++ /*
++ * When not in long mode, the virtual/linear address is
++ * unconditionally truncated to 32 bits regardless of the
++ * address size.
++ */
++ *ret = (s.base + off) & 0xffffffff;
++
+ /* Protected mode: apply checks for segment validity in the
+ * following order:
+ * - segment type check (#GP(0) may be thrown)
--- /dev/null
+From 34333cc6c2cb021662fd32e24e618d1b86de95bf Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+Date: Wed, 23 Jan 2019 14:39:25 -0800
+Subject: KVM: nVMX: Ignore limit checks on VMX instructions using flat segments
+
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+
+commit 34333cc6c2cb021662fd32e24e618d1b86de95bf upstream.
+
+Regarding segments with a limit==0xffffffff, the SDM officially states:
+
+ When the effective limit is FFFFFFFFH (4 GBytes), these accesses may
+ or may not cause the indicated exceptions. Behavior is
+ implementation-specific and may vary from one execution to another.
+
+In practice, all CPUs that support VMX ignore limit checks for "flat
+segments", i.e. an expand-up data or code segment with base=0 and
+limit=0xffffffff. This is subtly different than wrapping the effective
+address calculation based on the address size, as the flat segment
+behavior also applies to accesses that would wrap the 4g boundary, e.g.
+a 4-byte access starting at 0xffffffff will access linear addresses
+0xffffffff, 0x0, 0x1 and 0x2.
+
+Fixes: f9eb4af67c9d ("KVM: nVMX: VMX instructions: add checks for #GP/#SS exceptions")
+Cc: stable@vger.kernel.org
+Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/vmx.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -8251,10 +8251,16 @@ static int get_vmx_mem_address(struct kv
+ /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
+ */
+ exn = (s.unusable != 0);
+- /* Protected mode: #GP(0)/#SS(0) if the memory
+- * operand is outside the segment limit.
++
++ /*
++ * Protected mode: #GP(0)/#SS(0) if the memory operand is
++ * outside the segment limit. All CPUs that support VMX ignore
++ * limit checks for flat segments, i.e. segments with base==0,
++ * limit==0xffffffff and of type expand-up data or code.
+ */
+- exn = exn || (off + sizeof(u64) > s.limit);
++ if (!(s.base == 0 && s.limit == 0xffffffff &&
++ ((s.type & 8) || !(s.type & 4))))
++ exn = exn || (off + sizeof(u64) > s.limit);
+ }
+ if (exn) {
+ kvm_queue_exception_e(vcpu,
--- /dev/null
+From 946c522b603f281195af1df91837a1d4d1eb3bc9 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+Date: Wed, 23 Jan 2019 14:39:23 -0800
+Subject: KVM: nVMX: Sign extend displacements of VMX instr's mem operands
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+
+commit 946c522b603f281195af1df91837a1d4d1eb3bc9 upstream.
+
+The VMCS.EXIT_QUALIFCATION field reports the displacements of memory
+operands for various instructions, including VMX instructions, as a
+naturally sized unsigned value, but masks the value by the addr size,
+e.g. given a ModRM encoded as -0x28(%ebp), the -0x28 displacement is
+reported as 0xffffffd8 for a 32-bit address size. Despite some weird
+wording regarding sign extension, the SDM explicitly states that bits
+beyond the instructions address size are undefined:
+
+ In all cases, bits of this field beyond the instruction’s address
+ size are undefined.
+
+Failure to sign extend the displacement results in KVM incorrectly
+treating a negative displacement as a large positive displacement when
+the address size of the VMX instruction is smaller than KVM's native
+size, e.g. a 32-bit address size on a 64-bit KVM.
+
+The very original decoding, added by commit 064aea774768 ("KVM: nVMX:
+Decoding memory operands of VMX instructions"), sort of modeled sign
+extension by truncating the final virtual/linear address for a 32-bit
+address size. I.e. it messed up the effective address but made it work
+by adjusting the final address.
+
+When segmentation checks were added, the truncation logic was kept
+as-is and no sign extension logic was introduced. In other words, it
+kept calculating the wrong effective address while mostly generating
+the correct virtual/linear address. As the effective address is what's
+used in the segment limit checks, this results in KVM incorreclty
+injecting #GP/#SS faults due to non-existent segment violations when
+a nested VMM uses negative displacements with an address size smaller
+than KVM's native address size.
+
+Using the -0x28(%ebp) example, an EBP value of 0x1000 will result in
+KVM using 0x100000fd8 as the effective address when checking for a
+segment limit violation. This causes a 100% failure rate when running
+a 32-bit KVM build as L1 on top of a 64-bit KVM L0.
+
+Fixes: f9eb4af67c9d ("KVM: nVMX: VMX instructions: add checks for #GP/#SS exceptions")
+Cc: stable@vger.kernel.org
+Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/vmx.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -8184,6 +8184,10 @@ static int get_vmx_mem_address(struct kv
+ /* Addr = segment_base + offset */
+ /* offset = base + [index * scale] + displacement */
+ off = exit_qualification; /* holds the displacement */
++ if (addr_size == 1)
++ off = (gva_t)sign_extend64(off, 31);
++ else if (addr_size == 0)
++ off = (gva_t)sign_extend64(off, 15);
+ if (base_is_valid)
+ off += kvm_register_read(vcpu, base_reg);
+ if (index_is_valid)
--- /dev/null
+From e1359e2beb8b0a1188abc997273acbaedc8ee791 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+Date: Tue, 5 Feb 2019 13:01:12 -0800
+Subject: KVM: x86/mmu: Detect MMIO generation wrap in any address space
+
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+
+commit e1359e2beb8b0a1188abc997273acbaedc8ee791 upstream.
+
+The check to detect a wrap of the MMIO generation explicitly looks for a
+generation number of zero. Now that unique memslots generation numbers
+are assigned to each address space, only address space 0 will get a
+generation number of exactly zero when wrapping. E.g. when address
+space 1 goes from 0x7fffe to 0x80002, the MMIO generation number will
+wrap to 0x2. Adjust the MMIO generation to strip the address space
+modifier prior to checking for a wrap.
+
+Fixes: 4bd518f1598d ("KVM: use separate generations for each address space")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/mmu.c | 21 +++++++++++++++++++--
+ 1 file changed, 19 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -5776,11 +5776,28 @@ static bool kvm_has_zapped_obsolete_page
+
+ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
+ {
++ gen &= MMIO_GEN_MASK;
++
++ /*
++ * Shift to eliminate the "update in-progress" flag, which isn't
++ * included in the spte's generation number.
++ */
++ gen >>= 1;
++
++ /*
++ * Generation numbers are incremented in multiples of the number of
++ * address spaces in order to provide unique generations across all
++ * address spaces. Strip what is effectively the address space
++ * modifier prior to checking for a wrap of the MMIO generation so
++ * that a wrap in any address space is detected.
++ */
++ gen &= ~((u64)KVM_ADDRESS_SPACE_NUM - 1);
++
+ /*
+- * The very rare case: if the generation-number is round,
++ * The very rare case: if the MMIO generation number has wrapped,
+ * zap all shadow pages.
+ */
+- if (unlikely((gen & MMIO_GEN_MASK) == 0)) {
++ if (unlikely(gen == 0)) {
+ kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
+ kvm_mmu_invalidate_zap_all_pages(kvm);
+ }
--- /dev/null
+From ddfd1730fd829743e41213e32ccc8b4aa6dc8325 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+Date: Tue, 5 Feb 2019 13:01:13 -0800
+Subject: KVM: x86/mmu: Do not cache MMIO accesses while memslots are in flux
+
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+
+commit ddfd1730fd829743e41213e32ccc8b4aa6dc8325 upstream.
+
+When installing new memslots, KVM sets bit 0 of the generation number to
+indicate that an update is in-progress. Until the update is complete,
+there are no guarantees as to whether a vCPU will see the old or the new
+memslots. Explicity prevent caching MMIO accesses so as to avoid using
+an access cached from the old memslots after the new memslots have been
+installed.
+
+Note that it is unclear whether or not disabling caching during the
+update window is strictly necessary as there is no definitive
+documentation as to what ordering guarantees KVM provides with respect
+to updating memslots. That being said, the MMIO spte code does not
+allow reusing sptes created while an update is in-progress, and the
+associated documentation explicitly states:
+
+ We do not want to use an MMIO sptes created with an odd generation
+ number, ... If KVM is unlucky and creates an MMIO spte while the
+ low bit is 1, the next access to the spte will always be a cache miss.
+
+At the very least, disabling the per-vCPU MMIO cache during updates will
+make its behavior consistent with the MMIO spte behavior and
+documentation.
+
+Fixes: 56f17dd3fbc4 ("kvm: x86: fix stale mmio cache bug")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/x86.h | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/x86.h
++++ b/arch/x86/kvm/x86.h
+@@ -181,6 +181,11 @@ static inline bool emul_is_noncanonical_
+ static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
+ gva_t gva, gfn_t gfn, unsigned access)
+ {
++ u64 gen = kvm_memslots(vcpu->kvm)->generation;
++
++ if (unlikely(gen & 1))
++ return;
++
+ /*
+ * If this is a shadow nested page table, the "GVA" is
+ * actually a nGPA.
+@@ -188,7 +193,7 @@ static inline void vcpu_cache_mmio_info(
+ vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK;
+ vcpu->arch.access = access;
+ vcpu->arch.mmio_gfn = gfn;
+- vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation;
++ vcpu->arch.mmio_gen = gen;
+ }
+
+ static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
--- /dev/null
+From 2e0fe66e0a136252f4d89dbbccdcb26deb867eb8 Mon Sep 17 00:00:00 2001
+From: Steve Longerbeam <slongerbeam@gmail.com>
+Date: Mon, 21 Jan 2019 21:35:50 -0200
+Subject: media: imx: csi: Disable CSI immediately after last EOF
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Steve Longerbeam <slongerbeam@gmail.com>
+
+commit 2e0fe66e0a136252f4d89dbbccdcb26deb867eb8 upstream.
+
+Disable the CSI immediately after receiving the last EOF before stream
+off (and thus before disabling the IDMA channel). Do this by moving the
+wait for EOF completion into a new function csi_idmac_wait_last_eof().
+
+This fixes a complete system hard lockup on the SabreAuto when streaming
+from the ADV7180, by repeatedly sending a stream off immediately followed
+by stream on:
+
+while true; do v4l2-ctl -d4 --stream-mmap --stream-count=3; done
+
+Eventually this either causes the system lockup or EOF timeouts at all
+subsequent stream on, until a system reset.
+
+The lockup occurs when disabling the IDMA channel at stream off. Disabling
+the CSI before disabling the IDMA channel appears to be a reliable fix for
+the hard lockup.
+
+Fixes: 4a34ec8e470cb ("[media] media: imx: Add CSI subdev driver")
+
+Reported-by: Gaël PORTAY <gael.portay@collabora.com>
+Signed-off-by: Steve Longerbeam <slongerbeam@gmail.com>
+Cc: stable@vger.kernel.org # for 4.13 and up
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/media/imx/imx-media-csi.c | 17 ++++++++++++++---
+ 1 file changed, 14 insertions(+), 3 deletions(-)
+
+--- a/drivers/staging/media/imx/imx-media-csi.c
++++ b/drivers/staging/media/imx/imx-media-csi.c
+@@ -626,7 +626,7 @@ out_put_ipu:
+ return ret;
+ }
+
+-static void csi_idmac_stop(struct csi_priv *priv)
++static void csi_idmac_wait_last_eof(struct csi_priv *priv)
+ {
+ unsigned long flags;
+ int ret;
+@@ -643,7 +643,10 @@ static void csi_idmac_stop(struct csi_pr
+ &priv->last_eof_comp, msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT));
+ if (ret == 0)
+ v4l2_warn(&priv->sd, "wait last EOF timeout\n");
++}
+
++static void csi_idmac_stop(struct csi_priv *priv)
++{
+ devm_free_irq(priv->dev, priv->eof_irq, priv);
+ devm_free_irq(priv->dev, priv->nfb4eof_irq, priv);
+
+@@ -755,6 +758,16 @@ idmac_stop:
+
+ static void csi_stop(struct csi_priv *priv)
+ {
++ if (priv->dest == IPU_CSI_DEST_IDMAC)
++ csi_idmac_wait_last_eof(priv);
++
++ /*
++ * Disable the CSI asap, after syncing with the last EOF.
++ * Doing so after the IDMA channel is disabled has shown to
++ * create hard system-wide hangs.
++ */
++ ipu_csi_disable(priv->csi);
++
+ if (priv->dest == IPU_CSI_DEST_IDMAC) {
+ csi_idmac_stop(priv);
+
+@@ -762,8 +775,6 @@ static void csi_stop(struct csi_priv *pr
+ if (priv->fim)
+ imx_media_fim_set_stream(priv->fim, NULL, false);
+ }
+-
+- ipu_csi_disable(priv->csi);
+ }
+
+ static const struct csi_skip_desc csi_skip[12] = {
--- /dev/null
+From 4bc1ab41eee9d02ad2483bf8f51a7b72e3504eba Mon Sep 17 00:00:00 2001
+From: Steve Longerbeam <slongerbeam@gmail.com>
+Date: Mon, 21 Jan 2019 21:35:51 -0200
+Subject: media: imx: csi: Stop upstream before disabling IDMA channel
+
+From: Steve Longerbeam <slongerbeam@gmail.com>
+
+commit 4bc1ab41eee9d02ad2483bf8f51a7b72e3504eba upstream.
+
+Move upstream stream off to just after receiving the last EOF completion
+and disabling the CSI (and thus before disabling the IDMA channel) in
+csi_stop(). For symmetry also move upstream stream on to beginning of
+csi_start().
+
+Doing this makes csi_s_stream() more symmetric with prp_s_stream() which
+will require the same change to fix a hard lockup.
+
+Signed-off-by: Steve Longerbeam <slongerbeam@gmail.com>
+Cc: stable@vger.kernel.org # for 4.13 and up
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/media/imx/imx-media-csi.c | 25 +++++++++++++------------
+ 1 file changed, 13 insertions(+), 12 deletions(-)
+
+--- a/drivers/staging/media/imx/imx-media-csi.c
++++ b/drivers/staging/media/imx/imx-media-csi.c
+@@ -722,10 +722,16 @@ static int csi_start(struct csi_priv *pr
+
+ output_fi = &priv->frame_interval[priv->active_output_pad];
+
++ /* start upstream */
++ ret = v4l2_subdev_call(priv->src_sd, video, s_stream, 1);
++ ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
++ if (ret)
++ return ret;
++
+ if (priv->dest == IPU_CSI_DEST_IDMAC) {
+ ret = csi_idmac_start(priv);
+ if (ret)
+- return ret;
++ goto stop_upstream;
+ }
+
+ ret = csi_setup(priv);
+@@ -753,6 +759,8 @@ fim_off:
+ idmac_stop:
+ if (priv->dest == IPU_CSI_DEST_IDMAC)
+ csi_idmac_stop(priv);
++stop_upstream:
++ v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
+ return ret;
+ }
+
+@@ -768,6 +776,9 @@ static void csi_stop(struct csi_priv *pr
+ */
+ ipu_csi_disable(priv->csi);
+
++ /* stop upstream */
++ v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
++
+ if (priv->dest == IPU_CSI_DEST_IDMAC) {
+ csi_idmac_stop(priv);
+
+@@ -935,23 +946,13 @@ static int csi_s_stream(struct v4l2_subd
+ goto update_count;
+
+ if (enable) {
+- /* upstream must be started first, before starting CSI */
+- ret = v4l2_subdev_call(priv->src_sd, video, s_stream, 1);
+- ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
+- if (ret)
+- goto out;
+-
+ dev_dbg(priv->dev, "stream ON\n");
+ ret = csi_start(priv);
+- if (ret) {
+- v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
++ if (ret)
+ goto out;
+- }
+ } else {
+ dev_dbg(priv->dev, "stream OFF\n");
+- /* CSI must be stopped first, then stop upstream */
+ csi_stop(priv);
+- v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
+ }
+
+ update_count:
--- /dev/null
+From a19c22677377b87e4354f7306f46ad99bc982a9f Mon Sep 17 00:00:00 2001
+From: Steve Longerbeam <slongerbeam@gmail.com>
+Date: Mon, 21 Jan 2019 21:35:52 -0200
+Subject: media: imx: prpencvf: Stop upstream before disabling IDMA channel
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Steve Longerbeam <slongerbeam@gmail.com>
+
+commit a19c22677377b87e4354f7306f46ad99bc982a9f upstream.
+
+Upstream must be stopped immediately after receiving the last EOF and
+before disabling the IDMA channel. This can be accomplished by moving
+upstream stream off to just after receiving the last EOF completion in
+prp_stop(). For symmetry also move upstream stream on to end of
+prp_start().
+
+This fixes a complete system hard lockup on the SabreAuto when streaming
+from the ADV7180, by repeatedly sending a stream off immediately followed
+by stream on:
+
+while true; do v4l2-ctl -d1 --stream-mmap --stream-count=3; done
+
+Eventually this either causes the system lockup or EOF timeouts at all
+subsequent stream on, until a system reset.
+
+The lockup occurs when disabling the IDMA channel at stream off. Stopping
+the video data stream entering the IDMA channel before disabling the
+channel itself appears to be a reliable fix for the hard lockup.
+
+Fixes: f0d9c8924e2c3 ("[media] media: imx: Add IC subdev drivers")
+
+Reported-by: Gaël PORTAY <gael.portay@collabora.com>
+Tested-by: Gaël PORTAY <gael.portay@collabora.com>
+Signed-off-by: Steve Longerbeam <slongerbeam@gmail.com>
+Cc: stable@vger.kernel.org # for 4.13 and up
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/media/imx/imx-ic-prpencvf.c | 26 +++++++++++++++++---------
+ 1 file changed, 17 insertions(+), 9 deletions(-)
+
+--- a/drivers/staging/media/imx/imx-ic-prpencvf.c
++++ b/drivers/staging/media/imx/imx-ic-prpencvf.c
+@@ -680,12 +680,23 @@ static int prp_start(struct prp_priv *pr
+ goto out_free_nfb4eof_irq;
+ }
+
++ /* start upstream */
++ ret = v4l2_subdev_call(priv->src_sd, video, s_stream, 1);
++ ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
++ if (ret) {
++ v4l2_err(&ic_priv->sd,
++ "upstream stream on failed: %d\n", ret);
++ goto out_free_eof_irq;
++ }
++
+ /* start the EOF timeout timer */
+ mod_timer(&priv->eof_timeout_timer,
+ jiffies + msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT));
+
+ return 0;
+
++out_free_eof_irq:
++ devm_free_irq(ic_priv->dev, priv->eof_irq, priv);
+ out_free_nfb4eof_irq:
+ devm_free_irq(ic_priv->dev, priv->nfb4eof_irq, priv);
+ out_unsetup:
+@@ -717,6 +728,12 @@ static void prp_stop(struct prp_priv *pr
+ if (ret == 0)
+ v4l2_warn(&ic_priv->sd, "wait last EOF timeout\n");
+
++ /* stop upstream */
++ ret = v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
++ if (ret && ret != -ENOIOCTLCMD)
++ v4l2_warn(&ic_priv->sd,
++ "upstream stream off failed: %d\n", ret);
++
+ devm_free_irq(ic_priv->dev, priv->eof_irq, priv);
+ devm_free_irq(ic_priv->dev, priv->nfb4eof_irq, priv);
+
+@@ -1148,15 +1165,6 @@ static int prp_s_stream(struct v4l2_subd
+ if (ret)
+ goto out;
+
+- /* start/stop upstream */
+- ret = v4l2_subdev_call(priv->src_sd, video, s_stream, enable);
+- ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
+- if (ret) {
+- if (enable)
+- prp_stop(priv);
+- goto out;
+- }
+-
+ update_count:
+ priv->stream_count += enable ? 1 : -1;
+ if (priv->stream_count < 0)
--- /dev/null
+From 1b4fd9de6ec7f3722c2b3e08cc5ad171c11f93be Mon Sep 17 00:00:00 2001
+From: "French, Nicholas A" <naf@ou.edu>
+Date: Sun, 9 Dec 2018 02:11:18 -0500
+Subject: media: lgdt330x: fix lock status reporting
+
+From: French, Nicholas A <naf@ou.edu>
+
+commit 1b4fd9de6ec7f3722c2b3e08cc5ad171c11f93be upstream.
+
+A typo in code cleanup commit db9c1007bc07 ("media: lgdt330x: do
+some cleanups at status logic") broke the FE_HAS_LOCK reporting
+for 3303 chips by inadvertently modifying the register mask.
+
+The broken lock status is critial as it prevents video capture
+cards from reporting signal strength, scanning for channels,
+and capturing video.
+
+Fix regression by reverting mask change.
+
+Cc: stable@vger.kernel.org # Kernel 4.17+
+Fixes: db9c1007bc07 ("media: lgdt330x: do some cleanups at status logic")
+Signed-off-by: Nick French <naf@ou.edu>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Tested-by: Adam Stylinski <kungfujesus06@gmail.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/media/dvb-frontends/lgdt330x.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/media/dvb-frontends/lgdt330x.c
++++ b/drivers/media/dvb-frontends/lgdt330x.c
+@@ -783,7 +783,7 @@ static int lgdt3303_read_status(struct d
+
+ if ((buf[0] & 0x02) == 0x00)
+ *status |= FE_HAS_SYNC;
+- if ((buf[0] & 0xfd) == 0x01)
++ if ((buf[0] & 0x01) == 0x01)
+ *status |= FE_HAS_VITERBI | FE_HAS_LOCK;
+ break;
+ default:
--- /dev/null
+From 9dd0627d8d62a7ddb001a75f63942d92b5336561 Mon Sep 17 00:00:00 2001
+From: Sakari Ailus <sakari.ailus@linux.intel.com>
+Date: Wed, 30 Jan 2019 05:09:41 -0500
+Subject: media: uvcvideo: Avoid NULL pointer dereference at the end of streaming
+
+From: Sakari Ailus <sakari.ailus@linux.intel.com>
+
+commit 9dd0627d8d62a7ddb001a75f63942d92b5336561 upstream.
+
+The UVC video driver converts the timestamp from hardware specific unit
+to one known by the kernel at the time when the buffer is dequeued. This
+is fine in general, but the streamoff operation consists of the
+following steps (among other things):
+
+1. uvc_video_clock_cleanup --- the hardware clock sample array is
+ released and the pointer to the array is set to NULL,
+
+2. buffers in active state are returned to the user and
+
+3. buf_finish callback is called on buffers that are prepared.
+ buf_finish includes calling uvc_video_clock_update that accesses the
+ hardware clock sample array.
+
+The above is serialised by a queue specific mutex. Address the problem
+by skipping the clock conversion if the hardware clock sample array is
+already released.
+
+Fixes: 9c0863b1cc48 ("[media] vb2: call buf_finish from __queue_cancel")
+
+Reported-by: Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+Tested-by: Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/media/usb/uvc/uvc_video.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/drivers/media/usb/uvc/uvc_video.c
++++ b/drivers/media/usb/uvc/uvc_video.c
+@@ -676,6 +676,14 @@ void uvc_video_clock_update(struct uvc_s
+ if (!uvc_hw_timestamps_param)
+ return;
+
++ /*
++ * We will get called from __vb2_queue_cancel() if there are buffers
++ * done but not dequeued by the user, but the sample array has already
++ * been released at that time. Just bail out in that case.
++ */
++ if (!clock->samples)
++ return;
++
+ spin_lock_irqsave(&clock->lock, flags);
+
+ if (clock->count < clock->size)
--- /dev/null
+From adc589d2a20808fb99d46a78175cd023f2040338 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Lucas=20A=2E=20M=2E=20Magalh=C3=A3es?= <lucmaga@gmail.com>
+Date: Mon, 21 Jan 2019 20:05:01 -0500
+Subject: media: vimc: Add vimc-streamer for stream control
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Lucas A. M. Magalhães <lucmaga@gmail.com>
+
+commit adc589d2a20808fb99d46a78175cd023f2040338 upstream.
+
+Add a linear pipeline logic for the stream control. It's created by
+walking backwards on the entity graph. When the stream starts it will
+simply loop through the pipeline calling the respective process_frame
+function of each entity.
+
+Fixes: f2fe89061d797 ("vimc: Virtual Media Controller core, capture
+and sensor")
+
+Cc: stable@vger.kernel.org # for v4.20
+Signed-off-by: Lucas A. M. Magalhães <lucmaga@gmail.com>
+Acked-by: Helen Koike <helen.koike@collabora.com>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+[hverkuil-cisco@xs4all.nl: fixed small space-after-tab issue in the patch]
+Signed-off-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/media/platform/vimc/Makefile | 3
+ drivers/media/platform/vimc/vimc-capture.c | 18 +-
+ drivers/media/platform/vimc/vimc-common.c | 35 -----
+ drivers/media/platform/vimc/vimc-common.h | 15 --
+ drivers/media/platform/vimc/vimc-debayer.c | 26 ---
+ drivers/media/platform/vimc/vimc-scaler.c | 28 ----
+ drivers/media/platform/vimc/vimc-sensor.c | 56 +-------
+ drivers/media/platform/vimc/vimc-streamer.c | 188 ++++++++++++++++++++++++++++
+ drivers/media/platform/vimc/vimc-streamer.h | 38 +++++
+ 9 files changed, 260 insertions(+), 147 deletions(-)
+
+--- a/drivers/media/platform/vimc/Makefile
++++ b/drivers/media/platform/vimc/Makefile
+@@ -5,6 +5,7 @@ vimc_common-objs := vimc-common.o
+ vimc_debayer-objs := vimc-debayer.o
+ vimc_scaler-objs := vimc-scaler.o
+ vimc_sensor-objs := vimc-sensor.o
++vimc_streamer-objs := vimc-streamer.o
+
+ obj-$(CONFIG_VIDEO_VIMC) += vimc.o vimc_capture.o vimc_common.o vimc-debayer.o \
+- vimc_scaler.o vimc_sensor.o
++ vimc_scaler.o vimc_sensor.o vimc_streamer.o
+--- a/drivers/media/platform/vimc/vimc-capture.c
++++ b/drivers/media/platform/vimc/vimc-capture.c
+@@ -24,6 +24,7 @@
+ #include <media/videobuf2-vmalloc.h>
+
+ #include "vimc-common.h"
++#include "vimc-streamer.h"
+
+ #define VIMC_CAP_DRV_NAME "vimc-capture"
+
+@@ -44,7 +45,7 @@ struct vimc_cap_device {
+ spinlock_t qlock;
+ struct mutex lock;
+ u32 sequence;
+- struct media_pipeline pipe;
++ struct vimc_stream stream;
+ };
+
+ static const struct v4l2_pix_format fmt_default = {
+@@ -248,14 +249,13 @@ static int vimc_cap_start_streaming(stru
+ vcap->sequence = 0;
+
+ /* Start the media pipeline */
+- ret = media_pipeline_start(entity, &vcap->pipe);
++ ret = media_pipeline_start(entity, &vcap->stream.pipe);
+ if (ret) {
+ vimc_cap_return_all_buffers(vcap, VB2_BUF_STATE_QUEUED);
+ return ret;
+ }
+
+- /* Enable streaming from the pipe */
+- ret = vimc_pipeline_s_stream(&vcap->vdev.entity, 1);
++ ret = vimc_streamer_s_stream(&vcap->stream, &vcap->ved, 1);
+ if (ret) {
+ media_pipeline_stop(entity);
+ vimc_cap_return_all_buffers(vcap, VB2_BUF_STATE_QUEUED);
+@@ -273,8 +273,7 @@ static void vimc_cap_stop_streaming(stru
+ {
+ struct vimc_cap_device *vcap = vb2_get_drv_priv(vq);
+
+- /* Disable streaming from the pipe */
+- vimc_pipeline_s_stream(&vcap->vdev.entity, 0);
++ vimc_streamer_s_stream(&vcap->stream, &vcap->ved, 0);
+
+ /* Stop the media pipeline */
+ media_pipeline_stop(&vcap->vdev.entity);
+@@ -355,8 +354,8 @@ static void vimc_cap_comp_unbind(struct
+ kfree(vcap);
+ }
+
+-static void vimc_cap_process_frame(struct vimc_ent_device *ved,
+- struct media_pad *sink, const void *frame)
++static void *vimc_cap_process_frame(struct vimc_ent_device *ved,
++ const void *frame)
+ {
+ struct vimc_cap_device *vcap = container_of(ved, struct vimc_cap_device,
+ ved);
+@@ -370,7 +369,7 @@ static void vimc_cap_process_frame(struc
+ typeof(*vimc_buf), list);
+ if (!vimc_buf) {
+ spin_unlock(&vcap->qlock);
+- return;
++ return ERR_PTR(-EAGAIN);
+ }
+
+ /* Remove this entry from the list */
+@@ -391,6 +390,7 @@ static void vimc_cap_process_frame(struc
+ vb2_set_plane_payload(&vimc_buf->vb2.vb2_buf, 0,
+ vcap->format.sizeimage);
+ vb2_buffer_done(&vimc_buf->vb2.vb2_buf, VB2_BUF_STATE_DONE);
++ return NULL;
+ }
+
+ static int vimc_cap_comp_bind(struct device *comp, struct device *master,
+--- a/drivers/media/platform/vimc/vimc-common.c
++++ b/drivers/media/platform/vimc/vimc-common.c
+@@ -207,41 +207,6 @@ const struct vimc_pix_map *vimc_pix_map_
+ }
+ EXPORT_SYMBOL_GPL(vimc_pix_map_by_pixelformat);
+
+-int vimc_propagate_frame(struct media_pad *src, const void *frame)
+-{
+- struct media_link *link;
+-
+- if (!(src->flags & MEDIA_PAD_FL_SOURCE))
+- return -EINVAL;
+-
+- /* Send this frame to all sink pads that are direct linked */
+- list_for_each_entry(link, &src->entity->links, list) {
+- if (link->source == src &&
+- (link->flags & MEDIA_LNK_FL_ENABLED)) {
+- struct vimc_ent_device *ved = NULL;
+- struct media_entity *entity = link->sink->entity;
+-
+- if (is_media_entity_v4l2_subdev(entity)) {
+- struct v4l2_subdev *sd =
+- container_of(entity, struct v4l2_subdev,
+- entity);
+- ved = v4l2_get_subdevdata(sd);
+- } else if (is_media_entity_v4l2_video_device(entity)) {
+- struct video_device *vdev =
+- container_of(entity,
+- struct video_device,
+- entity);
+- ved = video_get_drvdata(vdev);
+- }
+- if (ved && ved->process_frame)
+- ved->process_frame(ved, link->sink, frame);
+- }
+- }
+-
+- return 0;
+-}
+-EXPORT_SYMBOL_GPL(vimc_propagate_frame);
+-
+ /* Helper function to allocate and initialize pads */
+ struct media_pad *vimc_pads_init(u16 num_pads, const unsigned long *pads_flag)
+ {
+--- a/drivers/media/platform/vimc/vimc-common.h
++++ b/drivers/media/platform/vimc/vimc-common.h
+@@ -113,24 +113,13 @@ struct vimc_pix_map {
+ struct vimc_ent_device {
+ struct media_entity *ent;
+ struct media_pad *pads;
+- void (*process_frame)(struct vimc_ent_device *ved,
+- struct media_pad *sink, const void *frame);
++ void * (*process_frame)(struct vimc_ent_device *ved,
++ const void *frame);
+ void (*vdev_get_format)(struct vimc_ent_device *ved,
+ struct v4l2_pix_format *fmt);
+ };
+
+ /**
+- * vimc_propagate_frame - propagate a frame through the topology
+- *
+- * @src: the source pad where the frame is being originated
+- * @frame: the frame to be propagated
+- *
+- * This function will call the process_frame callback from the vimc_ent_device
+- * struct of the nodes directly connected to the @src pad
+- */
+-int vimc_propagate_frame(struct media_pad *src, const void *frame);
+-
+-/**
+ * vimc_pads_init - initialize pads
+ *
+ * @num_pads: number of pads to initialize
+--- a/drivers/media/platform/vimc/vimc-debayer.c
++++ b/drivers/media/platform/vimc/vimc-debayer.c
+@@ -321,7 +321,6 @@ static void vimc_deb_set_rgb_mbus_fmt_rg
+ static int vimc_deb_s_stream(struct v4l2_subdev *sd, int enable)
+ {
+ struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
+- int ret;
+
+ if (enable) {
+ const struct vimc_pix_map *vpix;
+@@ -351,22 +350,10 @@ static int vimc_deb_s_stream(struct v4l2
+ if (!vdeb->src_frame)
+ return -ENOMEM;
+
+- /* Turn the stream on in the subdevices directly connected */
+- ret = vimc_pipeline_s_stream(&vdeb->sd.entity, 1);
+- if (ret) {
+- vfree(vdeb->src_frame);
+- vdeb->src_frame = NULL;
+- return ret;
+- }
+ } else {
+ if (!vdeb->src_frame)
+ return 0;
+
+- /* Disable streaming from the pipe */
+- ret = vimc_pipeline_s_stream(&vdeb->sd.entity, 0);
+- if (ret)
+- return ret;
+-
+ vfree(vdeb->src_frame);
+ vdeb->src_frame = NULL;
+ }
+@@ -480,9 +467,8 @@ static void vimc_deb_calc_rgb_sink(struc
+ }
+ }
+
+-static void vimc_deb_process_frame(struct vimc_ent_device *ved,
+- struct media_pad *sink,
+- const void *sink_frame)
++static void *vimc_deb_process_frame(struct vimc_ent_device *ved,
++ const void *sink_frame)
+ {
+ struct vimc_deb_device *vdeb = container_of(ved, struct vimc_deb_device,
+ ved);
+@@ -491,7 +477,7 @@ static void vimc_deb_process_frame(struc
+
+ /* If the stream in this node is not active, just return */
+ if (!vdeb->src_frame)
+- return;
++ return ERR_PTR(-EINVAL);
+
+ for (i = 0; i < vdeb->sink_fmt.height; i++)
+ for (j = 0; j < vdeb->sink_fmt.width; j++) {
+@@ -499,12 +485,8 @@ static void vimc_deb_process_frame(struc
+ vdeb->set_rgb_src(vdeb, i, j, rgb);
+ }
+
+- /* Propagate the frame through all source pads */
+- for (i = 1; i < vdeb->sd.entity.num_pads; i++) {
+- struct media_pad *pad = &vdeb->sd.entity.pads[i];
++ return vdeb->src_frame;
+
+- vimc_propagate_frame(pad, vdeb->src_frame);
+- }
+ }
+
+ static void vimc_deb_comp_unbind(struct device *comp, struct device *master,
+--- a/drivers/media/platform/vimc/vimc-scaler.c
++++ b/drivers/media/platform/vimc/vimc-scaler.c
+@@ -217,7 +217,6 @@ static const struct v4l2_subdev_pad_ops
+ static int vimc_sca_s_stream(struct v4l2_subdev *sd, int enable)
+ {
+ struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
+- int ret;
+
+ if (enable) {
+ const struct vimc_pix_map *vpix;
+@@ -245,22 +244,10 @@ static int vimc_sca_s_stream(struct v4l2
+ if (!vsca->src_frame)
+ return -ENOMEM;
+
+- /* Turn the stream on in the subdevices directly connected */
+- ret = vimc_pipeline_s_stream(&vsca->sd.entity, 1);
+- if (ret) {
+- vfree(vsca->src_frame);
+- vsca->src_frame = NULL;
+- return ret;
+- }
+ } else {
+ if (!vsca->src_frame)
+ return 0;
+
+- /* Disable streaming from the pipe */
+- ret = vimc_pipeline_s_stream(&vsca->sd.entity, 0);
+- if (ret)
+- return ret;
+-
+ vfree(vsca->src_frame);
+ vsca->src_frame = NULL;
+ }
+@@ -346,26 +333,19 @@ static void vimc_sca_fill_src_frame(cons
+ vimc_sca_scale_pix(vsca, i, j, sink_frame);
+ }
+
+-static void vimc_sca_process_frame(struct vimc_ent_device *ved,
+- struct media_pad *sink,
+- const void *sink_frame)
++static void *vimc_sca_process_frame(struct vimc_ent_device *ved,
++ const void *sink_frame)
+ {
+ struct vimc_sca_device *vsca = container_of(ved, struct vimc_sca_device,
+ ved);
+- unsigned int i;
+
+ /* If the stream in this node is not active, just return */
+ if (!vsca->src_frame)
+- return;
++ return ERR_PTR(-EINVAL);
+
+ vimc_sca_fill_src_frame(vsca, sink_frame);
+
+- /* Propagate the frame through all source pads */
+- for (i = 1; i < vsca->sd.entity.num_pads; i++) {
+- struct media_pad *pad = &vsca->sd.entity.pads[i];
+-
+- vimc_propagate_frame(pad, vsca->src_frame);
+- }
++ return vsca->src_frame;
+ };
+
+ static void vimc_sca_comp_unbind(struct device *comp, struct device *master,
+--- a/drivers/media/platform/vimc/vimc-sensor.c
++++ b/drivers/media/platform/vimc/vimc-sensor.c
+@@ -16,8 +16,6 @@
+ */
+
+ #include <linux/component.h>
+-#include <linux/freezer.h>
+-#include <linux/kthread.h>
+ #include <linux/module.h>
+ #include <linux/mod_devicetable.h>
+ #include <linux/platform_device.h>
+@@ -201,38 +199,27 @@ static const struct v4l2_subdev_pad_ops
+ .set_fmt = vimc_sen_set_fmt,
+ };
+
+-static int vimc_sen_tpg_thread(void *data)
++static void *vimc_sen_process_frame(struct vimc_ent_device *ved,
++ const void *sink_frame)
+ {
+- struct vimc_sen_device *vsen = data;
+- unsigned int i;
+-
+- set_freezable();
+- set_current_state(TASK_UNINTERRUPTIBLE);
+-
+- for (;;) {
+- try_to_freeze();
+- if (kthread_should_stop())
+- break;
+-
+- tpg_fill_plane_buffer(&vsen->tpg, 0, 0, vsen->frame);
+-
+- /* Send the frame to all source pads */
+- for (i = 0; i < vsen->sd.entity.num_pads; i++)
+- vimc_propagate_frame(&vsen->sd.entity.pads[i],
+- vsen->frame);
++ struct vimc_sen_device *vsen = container_of(ved, struct vimc_sen_device,
++ ved);
++ const struct vimc_pix_map *vpix;
++ unsigned int frame_size;
+
+- /* 60 frames per second */
+- schedule_timeout(HZ/60);
+- }
++ /* Calculate the frame size */
++ vpix = vimc_pix_map_by_code(vsen->mbus_format.code);
++ frame_size = vsen->mbus_format.width * vpix->bpp *
++ vsen->mbus_format.height;
+
+- return 0;
++ tpg_fill_plane_buffer(&vsen->tpg, 0, 0, vsen->frame);
++ return vsen->frame;
+ }
+
+ static int vimc_sen_s_stream(struct v4l2_subdev *sd, int enable)
+ {
+ struct vimc_sen_device *vsen =
+ container_of(sd, struct vimc_sen_device, sd);
+- int ret;
+
+ if (enable) {
+ const struct vimc_pix_map *vpix;
+@@ -258,26 +245,8 @@ static int vimc_sen_s_stream(struct v4l2
+ /* configure the test pattern generator */
+ vimc_sen_tpg_s_format(vsen);
+
+- /* Initialize the image generator thread */
+- vsen->kthread_sen = kthread_run(vimc_sen_tpg_thread, vsen,
+- "%s-sen", vsen->sd.v4l2_dev->name);
+- if (IS_ERR(vsen->kthread_sen)) {
+- dev_err(vsen->dev, "%s: kernel_thread() failed\n",
+- vsen->sd.name);
+- vfree(vsen->frame);
+- vsen->frame = NULL;
+- return PTR_ERR(vsen->kthread_sen);
+- }
+ } else {
+- if (!vsen->kthread_sen)
+- return 0;
+-
+- /* Stop image generator */
+- ret = kthread_stop(vsen->kthread_sen);
+- if (ret)
+- return ret;
+
+- vsen->kthread_sen = NULL;
+ vfree(vsen->frame);
+ vsen->frame = NULL;
+ return 0;
+@@ -393,6 +362,7 @@ static int vimc_sen_comp_bind(struct dev
+ if (ret)
+ goto err_free_hdl;
+
++ vsen->ved.process_frame = vimc_sen_process_frame;
+ dev_set_drvdata(comp, &vsen->ved);
+ vsen->dev = comp;
+
+--- /dev/null
++++ b/drivers/media/platform/vimc/vimc-streamer.c
+@@ -0,0 +1,188 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * vimc-streamer.c Virtual Media Controller Driver
++ *
++ * Copyright (C) 2018 Lucas A. M. Magalhães <lucmaga@gmail.com>
++ *
++ */
++
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/freezer.h>
++#include <linux/kthread.h>
++
++#include "vimc-streamer.h"
++
++/**
++ * vimc_get_source_entity - get the entity connected with the first sink pad
++ *
++ * @ent: reference media_entity
++ *
++ * Helper function that returns the media entity containing the source pad
++ * linked with the first sink pad from the given media entity pad list.
++ */
++static struct media_entity *vimc_get_source_entity(struct media_entity *ent)
++{
++ struct media_pad *pad;
++ int i;
++
++ for (i = 0; i < ent->num_pads; i++) {
++ if (ent->pads[i].flags & MEDIA_PAD_FL_SOURCE)
++ continue;
++ pad = media_entity_remote_pad(&ent->pads[i]);
++ return pad ? pad->entity : NULL;
++ }
++ return NULL;
++}
++
++/*
++ * vimc_streamer_pipeline_terminate - Disable stream in all ved in stream
++ *
++ * @stream: the pointer to the stream structure with the pipeline to be
++ * disabled.
++ *
++ * Calls s_stream to disable the stream in each entity of the pipeline
++ *
++ */
++static void vimc_streamer_pipeline_terminate(struct vimc_stream *stream)
++{
++ struct media_entity *entity;
++ struct v4l2_subdev *sd;
++
++ while (stream->pipe_size) {
++ stream->pipe_size--;
++ entity = stream->ved_pipeline[stream->pipe_size]->ent;
++ entity = vimc_get_source_entity(entity);
++ stream->ved_pipeline[stream->pipe_size] = NULL;
++
++ if (!is_media_entity_v4l2_subdev(entity))
++ continue;
++
++ sd = media_entity_to_v4l2_subdev(entity);
++ v4l2_subdev_call(sd, video, s_stream, 0);
++ }
++}
++
++/*
++ * vimc_streamer_pipeline_init - initializes the stream structure
++ *
++ * @stream: the pointer to the stream structure to be initialized
++ * @ved: the pointer to the vimc entity initializing the stream
++ *
++ * Initializes the stream structure. Walks through the entity graph to
++ * construct the pipeline used later on the streamer thread.
++ * Calls s_stream to enable stream in all entities of the pipeline.
++ */
++static int vimc_streamer_pipeline_init(struct vimc_stream *stream,
++ struct vimc_ent_device *ved)
++{
++ struct media_entity *entity;
++ struct video_device *vdev;
++ struct v4l2_subdev *sd;
++ int ret = 0;
++
++ stream->pipe_size = 0;
++ while (stream->pipe_size < VIMC_STREAMER_PIPELINE_MAX_SIZE) {
++ if (!ved) {
++ vimc_streamer_pipeline_terminate(stream);
++ return -EINVAL;
++ }
++ stream->ved_pipeline[stream->pipe_size++] = ved;
++
++ entity = vimc_get_source_entity(ved->ent);
++ /* Check if the end of the pipeline was reached*/
++ if (!entity)
++ return 0;
++
++ if (is_media_entity_v4l2_subdev(entity)) {
++ sd = media_entity_to_v4l2_subdev(entity);
++ ret = v4l2_subdev_call(sd, video, s_stream, 1);
++ if (ret && ret != -ENOIOCTLCMD) {
++ vimc_streamer_pipeline_terminate(stream);
++ return ret;
++ }
++ ved = v4l2_get_subdevdata(sd);
++ } else {
++ vdev = container_of(entity,
++ struct video_device,
++ entity);
++ ved = video_get_drvdata(vdev);
++ }
++ }
++
++ vimc_streamer_pipeline_terminate(stream);
++ return -EINVAL;
++}
++
++static int vimc_streamer_thread(void *data)
++{
++ struct vimc_stream *stream = data;
++ int i;
++
++ set_freezable();
++ set_current_state(TASK_UNINTERRUPTIBLE);
++
++ for (;;) {
++ try_to_freeze();
++ if (kthread_should_stop())
++ break;
++
++ for (i = stream->pipe_size - 1; i >= 0; i--) {
++ stream->frame = stream->ved_pipeline[i]->process_frame(
++ stream->ved_pipeline[i],
++ stream->frame);
++ if (!stream->frame)
++ break;
++ if (IS_ERR(stream->frame))
++ break;
++ }
++ //wait for 60hz
++ schedule_timeout(HZ / 60);
++ }
++
++ return 0;
++}
++
++int vimc_streamer_s_stream(struct vimc_stream *stream,
++ struct vimc_ent_device *ved,
++ int enable)
++{
++ int ret;
++
++ if (!stream || !ved)
++ return -EINVAL;
++
++ if (enable) {
++ if (stream->kthread)
++ return 0;
++
++ ret = vimc_streamer_pipeline_init(stream, ved);
++ if (ret)
++ return ret;
++
++ stream->kthread = kthread_run(vimc_streamer_thread, stream,
++ "vimc-streamer thread");
++
++ if (IS_ERR(stream->kthread))
++ return PTR_ERR(stream->kthread);
++
++ } else {
++ if (!stream->kthread)
++ return 0;
++
++ ret = kthread_stop(stream->kthread);
++ if (ret)
++ return ret;
++
++ stream->kthread = NULL;
++
++ vimc_streamer_pipeline_terminate(stream);
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(vimc_streamer_s_stream);
++
++MODULE_DESCRIPTION("Virtual Media Controller Driver (VIMC) Streamer");
++MODULE_AUTHOR("Lucas A. M. Magalhães <lucmaga@gmail.com>");
++MODULE_LICENSE("GPL");
+--- /dev/null
++++ b/drivers/media/platform/vimc/vimc-streamer.h
+@@ -0,0 +1,38 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * vimc-streamer.h Virtual Media Controller Driver
++ *
++ * Copyright (C) 2018 Lucas A. M. Magalhães <lucmaga@gmail.com>
++ *
++ */
++
++#ifndef _VIMC_STREAMER_H_
++#define _VIMC_STREAMER_H_
++
++#include <media/media-device.h>
++
++#include "vimc-common.h"
++
++#define VIMC_STREAMER_PIPELINE_MAX_SIZE 16
++
++struct vimc_stream {
++ struct media_pipeline pipe;
++ struct vimc_ent_device *ved_pipeline[VIMC_STREAMER_PIPELINE_MAX_SIZE];
++ unsigned int pipe_size;
++ u8 *frame;
++ struct task_struct *kthread;
++};
++
++/**
++ * vimc_streamer_s_streamer - start/stop the stream
++ *
++ * @stream: the pointer to the stream to start or stop
++ * @ved: The last entity of the streamer pipeline
++ * @enable: any non-zero number start the stream, zero stop
++ *
++ */
++int vimc_streamer_s_stream(struct vimc_stream *stream,
++ struct vimc_ent_device *ved,
++ int enable);
++
++#endif //_VIMC_STREAMER_H_
tpm-tpm_crb-avoid-unaligned-reads-in-crb_recv.patch
tpm-unify-the-send-callback-behaviour.patch
rcu-do-rcu-gp-kthread-self-wakeup-from-softirq-and-interrupt.patch
+media-imx-prpencvf-stop-upstream-before-disabling-idma-channel.patch
+media-lgdt330x-fix-lock-status-reporting.patch
+media-uvcvideo-avoid-null-pointer-dereference-at-the-end-of-streaming.patch
+media-vimc-add-vimc-streamer-for-stream-control.patch
+media-imx-csi-disable-csi-immediately-after-last-eof.patch
+media-imx-csi-stop-upstream-before-disabling-idma-channel.patch
+drm-fb-helper-generic-fix-drm_fbdev_client_restore.patch
+drm-radeon-evergreen_cs-fix-missing-break-in-switch-statement.patch
+drm-amd-powerplay-correct-power-reading-on-fiji.patch
+drm-amd-display-add-msse2-to-prevent-clang-from-emitting-libcalls-to-undefined-sw-fp-routines.patch
+drm-amd-display-don-t-call-dm_pp_-function-from-an-fpu-block.patch
+kvm-call-kvm_arch_memslots_updated-before-updating-memslots.patch
+kvm-x86-mmu-detect-mmio-generation-wrap-in-any-address-space.patch
+kvm-x86-mmu-do-not-cache-mmio-accesses-while-memslots-are-in-flux.patch
+kvm-nvmx-sign-extend-displacements-of-vmx-instr-s-mem-operands.patch
+kvm-nvmx-apply-addr-size-mask-to-effective-address-for-vmx-instructions.patch
+kvm-nvmx-ignore-limit-checks-on-vmx-instructions-using-flat-segments.patch