--- /dev/null
+From d60d0cff4ab01255b25375425745c3cff69558ad Mon Sep 17 00:00:00 2001
+From: Lihua Yao <ylhuajnu@outlook.com>
+Date: Tue, 10 Sep 2019 13:22:28 +0000
+Subject: ARM: dts: s3c64xx: Fix init order of clock providers
+
+From: Lihua Yao <ylhuajnu@outlook.com>
+
+commit d60d0cff4ab01255b25375425745c3cff69558ad upstream.
+
+fin_pll is the parent of clock-controller@7e00f000, specify
+the dependency to ensure proper initialization order of clock
+providers.
+
+without this patch:
+[ 0.000000] S3C6410 clocks: apll = 0, mpll = 0
+[ 0.000000] epll = 0, arm_clk = 0
+
+with this patch:
+[ 0.000000] S3C6410 clocks: apll = 532000000, mpll = 532000000
+[ 0.000000] epll = 24000000, arm_clk = 532000000
+
+Cc: <stable@vger.kernel.org>
+Fixes: 3f6d439f2022 ("clk: reverse default clk provider initialization order in of_clk_init()")
+Signed-off-by: Lihua Yao <ylhuajnu@outlook.com>
+Reviewed-by: Sylwester Nawrocki <s.nawrocki@samsung.com>
+Signed-off-by: Krzysztof Kozlowski <krzk@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/boot/dts/s3c6410-mini6410.dts | 4 ++++
+ arch/arm/boot/dts/s3c6410-smdk6410.dts | 4 ++++
+ 2 files changed, 8 insertions(+)
+
+--- a/arch/arm/boot/dts/s3c6410-mini6410.dts
++++ b/arch/arm/boot/dts/s3c6410-mini6410.dts
+@@ -167,6 +167,10 @@
+ };
+ };
+
++&clocks {
++ clocks = <&fin_pll>;
++};
++
+ &sdhci0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&sd0_clk>, <&sd0_cmd>, <&sd0_cd>, <&sd0_bus4>;
+--- a/arch/arm/boot/dts/s3c6410-smdk6410.dts
++++ b/arch/arm/boot/dts/s3c6410-smdk6410.dts
+@@ -71,6 +71,10 @@
+ };
+ };
+
++&clocks {
++ clocks = <&fin_pll>;
++};
++
+ &sdhci0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&sd0_clk>, <&sd0_cmd>, <&sd0_cd>, <&sd0_bus4>;
--- /dev/null
+From d70f7d31a9e2088e8a507194354d41ea10062994 Mon Sep 17 00:00:00 2001
+From: Dmitry Osipenko <digetx@gmail.com>
+Date: Tue, 30 Jul 2019 20:23:39 +0300
+Subject: ARM: tegra: Fix FLOW_CTLR_HALT register clobbering by tegra_resume()
+
+From: Dmitry Osipenko <digetx@gmail.com>
+
+commit d70f7d31a9e2088e8a507194354d41ea10062994 upstream.
+
+There is an unfortunate typo in the code that results in writing to
+FLOW_CTLR_HALT instead of FLOW_CTLR_CSR.
+
+Cc: <stable@vger.kernel.org>
+Acked-by: Peter De Schrijver <pdeschrijver@nvidia.com>
+Signed-off-by: Dmitry Osipenko <digetx@gmail.com>
+Signed-off-by: Thierry Reding <treding@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/mach-tegra/reset-handler.S | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/arm/mach-tegra/reset-handler.S
++++ b/arch/arm/mach-tegra/reset-handler.S
+@@ -56,16 +56,16 @@ ENTRY(tegra_resume)
+ cmp r6, #TEGRA20
+ beq 1f @ Yes
+ /* Clear the flow controller flags for this CPU. */
+- cpu_to_csr_reg r1, r0
++ cpu_to_csr_reg r3, r0
+ mov32 r2, TEGRA_FLOW_CTRL_BASE
+- ldr r1, [r2, r1]
++ ldr r1, [r2, r3]
+ /* Clear event & intr flag */
+ orr r1, r1, \
+ #FLOW_CTRL_CSR_INTR_FLAG | FLOW_CTRL_CSR_EVENT_FLAG
+ movw r0, #0x3FFD @ enable, cluster_switch, immed, bitmaps
+ @ & ext flags for CPU power mgnt
+ bic r1, r1, r0
+- str r1, [r2]
++ str r1, [r2, r3]
+ 1:
+
+ mov32 r9, 0xc09
--- /dev/null
+From 44805b0e62f15e90d233485420e1847133716bdc Mon Sep 17 00:00:00 2001
+From: Pavel Shilovsky <pshilov@microsoft.com>
+Date: Tue, 12 Nov 2019 17:16:35 -0800
+Subject: CIFS: Respect O_SYNC and O_DIRECT flags during reconnect
+
+From: Pavel Shilovsky <pshilov@microsoft.com>
+
+commit 44805b0e62f15e90d233485420e1847133716bdc upstream.
+
+Currently the client translates O_SYNC and O_DIRECT flags
+into corresponding SMB create options when openning a file.
+The problem is that on reconnect when the file is being
+re-opened the client doesn't set those flags and it causes
+a server to reject re-open requests because create options
+don't match. The latter means that any subsequent system
+call against that open file fail until a share is re-mounted.
+
+Fix this by properly setting SMB create options when
+re-openning files after reconnects.
+
+Fixes: 1013e760d10e6: ("SMB3: Don't ignore O_SYNC/O_DSYNC and O_DIRECT flags")
+Cc: Stable <stable@vger.kernel.org>
+Signed-off-by: Pavel Shilovsky <pshilov@microsoft.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/file.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -722,6 +722,13 @@ cifs_reopen_file(struct cifsFileInfo *cf
+ if (backup_cred(cifs_sb))
+ create_options |= CREATE_OPEN_BACKUP_INTENT;
+
++ /* O_SYNC also has bit for O_DSYNC so following check picks up either */
++ if (cfile->f_flags & O_SYNC)
++ create_options |= CREATE_WRITE_THROUGH;
++
++ if (cfile->f_flags & O_DIRECT)
++ create_options |= CREATE_NO_BUFFER;
++
+ if (server->ops->get_lease_key)
+ server->ops->get_lease_key(inode, &cfile->fid);
+
--- /dev/null
+From 474e559567fa631dea8fb8407ab1b6090c903755 Mon Sep 17 00:00:00 2001
+From: Hou Tao <houtao1@huawei.com>
+Date: Tue, 3 Dec 2019 19:42:58 +0800
+Subject: dm btree: increase rebalance threshold in __rebalance2()
+
+From: Hou Tao <houtao1@huawei.com>
+
+commit 474e559567fa631dea8fb8407ab1b6090c903755 upstream.
+
+We got the following warnings from thin_check during thin-pool setup:
+
+ $ thin_check /dev/vdb
+ examining superblock
+ examining devices tree
+ missing devices: [1, 84]
+ too few entries in btree_node: 41, expected at least 42 (block 138, max_entries = 126)
+ examining mapping tree
+
+The phenomenon is the number of entries in one node of details_info tree is
+less than (max_entries / 3). And it can be easily reproduced by the following
+procedures:
+
+ $ new a thin pool
+ $ presume the max entries of details_info tree is 126
+ $ new 127 thin devices (e.g. 1~127) to make the root node being full
+ and then split
+ $ remove the first 43 (e.g. 1~43) thin devices to make the children
+ reblance repeatedly
+ $ stop the thin pool
+ $ thin_check
+
+The root cause is that the B-tree removal procedure in __rebalance2()
+doesn't guarantee the invariance: the minimal number of entries in
+non-root node should be >= (max_entries / 3).
+
+Simply fix the problem by increasing the rebalance threshold to
+make sure the number of entries in each child will be greater
+than or equal to (max_entries / 3 + 1), so no matter which
+child is used for removal, the number will still be valid.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Hou Tao <houtao1@huawei.com>
+Acked-by: Joe Thornber <ejt@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/persistent-data/dm-btree-remove.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/drivers/md/persistent-data/dm-btree-remove.c
++++ b/drivers/md/persistent-data/dm-btree-remove.c
+@@ -203,7 +203,13 @@ static void __rebalance2(struct dm_btree
+ struct btree_node *right = r->n;
+ uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
+ uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
+- unsigned threshold = 2 * merge_threshold(left) + 1;
++ /*
++ * Ensure the number of entries in each child will be greater
++ * than or equal to (max_entries / 3 + 1), so no matter which
++ * child is used for removal, the number will still be not
++ * less than (max_entries / 3).
++ */
++ unsigned int threshold = 2 * (merge_threshold(left) + 1);
+
+ if (nr_left + nr_right < threshold) {
+ /*
--- /dev/null
+From 694cfe7f31db36912725e63a38a5179c8628a496 Mon Sep 17 00:00:00 2001
+From: Nikos Tsironis <ntsironis@arrikto.com>
+Date: Wed, 4 Dec 2019 16:07:42 +0200
+Subject: dm thin: Flush data device before committing metadata
+
+From: Nikos Tsironis <ntsironis@arrikto.com>
+
+commit 694cfe7f31db36912725e63a38a5179c8628a496 upstream.
+
+The thin provisioning target maintains per thin device mappings that map
+virtual blocks to data blocks in the data device.
+
+When we write to a shared block, in case of internal snapshots, or
+provision a new block, in case of external snapshots, we copy the shared
+block to a new data block (COW), update the mapping for the relevant
+virtual block and then issue the write to the new data block.
+
+Suppose the data device has a volatile write-back cache and the
+following sequence of events occur:
+
+1. We write to a shared block
+2. A new data block is allocated
+3. We copy the shared block to the new data block using kcopyd (COW)
+4. We insert the new mapping for the virtual block in the btree for that
+ thin device.
+5. The commit timeout expires and we commit the metadata, that now
+ includes the new mapping from step (4).
+6. The system crashes and the data device's cache has not been flushed,
+ meaning that the COWed data are lost.
+
+The next time we read that virtual block of the thin device we read it
+from the data block allocated in step (2), since the metadata have been
+successfully committed. The data are lost due to the crash, so we read
+garbage instead of the old, shared data.
+
+This has the following implications:
+
+1. In case of writes to shared blocks, with size smaller than the pool's
+ block size (which means we first copy the whole block and then issue
+ the smaller write), we corrupt data that the user never touched.
+
+2. In case of writes to shared blocks, with size equal to the device's
+ logical block size, we fail to provide atomic sector writes. When the
+ system recovers the user will read garbage from that sector instead
+ of the old data or the new data.
+
+3. Even for writes to shared blocks, with size equal to the pool's block
+ size (overwrites), after the system recovers, the written sectors
+ will contain garbage instead of a random mix of sectors containing
+ either old data or new data, thus we fail again to provide atomic
+ sectors writes.
+
+4. Even when the user flushes the thin device, because we first commit
+ the metadata and then pass down the flush, the same risk for
+ corruption exists (if the system crashes after the metadata have been
+ committed but before the flush is passed down to the data device.)
+
+The only case which is unaffected is that of writes with size equal to
+the pool's block size and with the FUA flag set. But, because FUA writes
+trigger metadata commits, this case can trigger the corruption
+indirectly.
+
+Moreover, apart from internal and external snapshots, the same issue
+exists for newly provisioned blocks, when block zeroing is enabled.
+After the system recovers the provisioned blocks might contain garbage
+instead of zeroes.
+
+To solve this and avoid the potential data corruption we flush the
+pool's data device **before** committing its metadata.
+
+This ensures that the data blocks of any newly inserted mappings are
+properly written to non-volatile storage and won't be lost in case of a
+crash.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Nikos Tsironis <ntsironis@arrikto.com>
+Acked-by: Joe Thornber <ejt@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-thin.c | 42 ++++++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 40 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -327,6 +327,7 @@ struct pool_c {
+ dm_block_t low_water_blocks;
+ struct pool_features requested_pf; /* Features requested during table load */
+ struct pool_features adjusted_pf; /* Features used after adjusting for constituent devices */
++ struct bio flush_bio;
+ };
+
+ /*
+@@ -2403,8 +2404,16 @@ static void process_deferred_bios(struct
+ while ((bio = bio_list_pop(&bio_completions)))
+ bio_endio(bio);
+
+- while ((bio = bio_list_pop(&bios)))
+- generic_make_request(bio);
++ while ((bio = bio_list_pop(&bios))) {
++ /*
++ * The data device was flushed as part of metadata commit,
++ * so complete redundant flushes immediately.
++ */
++ if (bio->bi_opf & REQ_PREFLUSH)
++ bio_endio(bio);
++ else
++ generic_make_request(bio);
++ }
+ }
+
+ static void do_worker(struct work_struct *ws)
+@@ -3136,6 +3145,7 @@ static void pool_dtr(struct dm_target *t
+ __pool_dec(pt->pool);
+ dm_put_device(ti, pt->metadata_dev);
+ dm_put_device(ti, pt->data_dev);
++ bio_uninit(&pt->flush_bio);
+ kfree(pt);
+
+ mutex_unlock(&dm_thin_pool_table.mutex);
+@@ -3201,6 +3211,29 @@ static void metadata_low_callback(void *
+ dm_table_event(pool->ti->table);
+ }
+
++/*
++ * We need to flush the data device **before** committing the metadata.
++ *
++ * This ensures that the data blocks of any newly inserted mappings are
++ * properly written to non-volatile storage and won't be lost in case of a
++ * crash.
++ *
++ * Failure to do so can result in data corruption in the case of internal or
++ * external snapshots and in the case of newly provisioned blocks, when block
++ * zeroing is enabled.
++ */
++static int metadata_pre_commit_callback(void *context)
++{
++ struct pool_c *pt = context;
++ struct bio *flush_bio = &pt->flush_bio;
++
++ bio_reset(flush_bio);
++ bio_set_dev(flush_bio, pt->data_dev->bdev);
++ flush_bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
++
++ return submit_bio_wait(flush_bio);
++}
++
+ static sector_t get_dev_size(struct block_device *bdev)
+ {
+ return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
+@@ -3369,6 +3402,7 @@ static int pool_ctr(struct dm_target *ti
+ pt->data_dev = data_dev;
+ pt->low_water_blocks = low_water_blocks;
+ pt->adjusted_pf = pt->requested_pf = pf;
++ bio_init(&pt->flush_bio, NULL, 0);
+ ti->num_flush_bios = 1;
+
+ /*
+@@ -3395,6 +3429,10 @@ static int pool_ctr(struct dm_target *ti
+ if (r)
+ goto out_flags_changed;
+
++ dm_pool_register_pre_commit_callback(pt->pool->pmd,
++ metadata_pre_commit_callback,
++ pt);
++
+ pt->callbacks.congested_fn = pool_is_congested;
+ dm_table_add_target_callbacks(ti->table, &pt->callbacks);
+
--- /dev/null
+From ecda7c0280e6b3398459dc589b9a41c1adb45529 Mon Sep 17 00:00:00 2001
+From: Nikos Tsironis <ntsironis@arrikto.com>
+Date: Wed, 4 Dec 2019 16:07:41 +0200
+Subject: dm thin metadata: Add support for a pre-commit callback
+
+From: Nikos Tsironis <ntsironis@arrikto.com>
+
+commit ecda7c0280e6b3398459dc589b9a41c1adb45529 upstream.
+
+Add support for one pre-commit callback which is run right before the
+metadata are committed.
+
+This allows the thin provisioning target to run a callback before the
+metadata are committed and is required by the next commit.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Nikos Tsironis <ntsironis@arrikto.com>
+Acked-by: Joe Thornber <ejt@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-thin-metadata.c | 29 +++++++++++++++++++++++++++++
+ drivers/md/dm-thin-metadata.h | 7 +++++++
+ 2 files changed, 36 insertions(+)
+
+--- a/drivers/md/dm-thin-metadata.c
++++ b/drivers/md/dm-thin-metadata.c
+@@ -189,6 +189,15 @@ struct dm_pool_metadata {
+ sector_t data_block_size;
+
+ /*
++ * Pre-commit callback.
++ *
++ * This allows the thin provisioning target to run a callback before
++ * the metadata are committed.
++ */
++ dm_pool_pre_commit_fn pre_commit_fn;
++ void *pre_commit_context;
++
++ /*
+ * We reserve a section of the metadata for commit overhead.
+ * All reported space does *not* include this.
+ */
+@@ -791,6 +800,14 @@ static int __commit_transaction(struct d
+ */
+ BUILD_BUG_ON(sizeof(struct thin_disk_superblock) > 512);
+
++ if (pmd->pre_commit_fn) {
++ r = pmd->pre_commit_fn(pmd->pre_commit_context);
++ if (r < 0) {
++ DMERR("pre-commit callback failed");
++ return r;
++ }
++ }
++
+ r = __write_changed_details(pmd);
+ if (r < 0)
+ return r;
+@@ -864,6 +881,8 @@ struct dm_pool_metadata *dm_pool_metadat
+ pmd->fail_io = false;
+ pmd->bdev = bdev;
+ pmd->data_block_size = data_block_size;
++ pmd->pre_commit_fn = NULL;
++ pmd->pre_commit_context = NULL;
+
+ r = __create_persistent_data_objects(pmd, format_device);
+ if (r) {
+@@ -2008,6 +2027,16 @@ int dm_pool_register_metadata_threshold(
+ return r;
+ }
+
++void dm_pool_register_pre_commit_callback(struct dm_pool_metadata *pmd,
++ dm_pool_pre_commit_fn fn,
++ void *context)
++{
++ pmd_write_lock_in_core(pmd);
++ pmd->pre_commit_fn = fn;
++ pmd->pre_commit_context = context;
++ pmd_write_unlock(pmd);
++}
++
+ int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd)
+ {
+ int r;
+--- a/drivers/md/dm-thin-metadata.h
++++ b/drivers/md/dm-thin-metadata.h
+@@ -230,6 +230,13 @@ bool dm_pool_metadata_needs_check(struct
+ */
+ void dm_pool_issue_prefetches(struct dm_pool_metadata *pmd);
+
++/* Pre-commit callback */
++typedef int (*dm_pool_pre_commit_fn)(void *context);
++
++void dm_pool_register_pre_commit_callback(struct dm_pool_metadata *pmd,
++ dm_pool_pre_commit_fn fn,
++ void *context);
++
+ /*----------------------------------------------------------------*/
+
+ #endif
--- /dev/null
+From 6645d42d79d33e8a9fe262660a75d5f4556bbea9 Mon Sep 17 00:00:00 2001
+From: Navid Emamdoost <navid.emamdoost@gmail.com>
+Date: Fri, 22 Nov 2019 16:09:55 -0600
+Subject: dma-buf: Fix memory leak in sync_file_merge()
+
+From: Navid Emamdoost <navid.emamdoost@gmail.com>
+
+commit 6645d42d79d33e8a9fe262660a75d5f4556bbea9 upstream.
+
+In the implementation of sync_file_merge() the allocated sync_file is
+leaked if number of fences overflows. Release sync_file by goto err.
+
+Fixes: a02b9dc90d84 ("dma-buf/sync_file: refactor fence storage in struct sync_file")
+Signed-off-by: Navid Emamdoost <navid.emamdoost@gmail.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Link: https://patchwork.freedesktop.org/patch/msgid/20191122220957.30427-1-navid.emamdoost@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma-buf/sync_file.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/dma-buf/sync_file.c
++++ b/drivers/dma-buf/sync_file.c
+@@ -230,7 +230,7 @@ static struct sync_file *sync_file_merge
+ a_fences = get_fences(a, &a_num_fences);
+ b_fences = get_fences(b, &b_num_fences);
+ if (a_num_fences > INT_MAX - b_num_fences)
+- return NULL;
++ goto err;
+
+ num_fences = a_num_fences + b_num_fences;
+
--- /dev/null
+From 43cb86799ff03e9819c07f37f72f80f8246ad7ed Mon Sep 17 00:00:00 2001
+From: Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+Date: Sun, 8 Dec 2019 18:18:31 +0100
+Subject: drm: meson: venc: cvbs: fix CVBS mode matching
+
+From: Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+
+commit 43cb86799ff03e9819c07f37f72f80f8246ad7ed upstream.
+
+With commit 222ec1618c3ace ("drm: Add aspect ratio parsing in DRM
+layer") the drm core started honoring the picture_aspect_ratio field
+when comparing two drm_display_modes. Prior to that it was ignored.
+When the CVBS encoder driver was initially submitted there was no aspect
+ratio check.
+
+Switch from drm_mode_equal() to drm_mode_match() without
+DRM_MODE_MATCH_ASPECT_RATIO to fix "kmscube" and X.org output using the
+CVBS connector. When (for example) kmscube sets the output mode when
+using the CVBS connector it passes HDMI_PICTURE_ASPECT_NONE, making the
+drm_mode_equal() fail as it include the aspect ratio.
+
+Prior to this patch kmscube reported:
+ failed to set mode: Invalid argument
+
+The CVBS mode checking in the sun4i (drivers/gpu/drm/sun4i/sun4i_tv.c
+sun4i_tv_mode_to_drm_mode) and ZTE (drivers/gpu/drm/zte/zx_tvenc.c
+tvenc_mode_{pal,ntsc}) drivers don't set the "picture_aspect_ratio" at
+all. The Meson VPU driver does not rely on the aspect ratio for the CVBS
+output so we can safely decouple it from the hdmi_picture_aspect
+setting.
+
+Cc: <stable@vger.kernel.org>
+Fixes: 222ec1618c3ace ("drm: Add aspect ratio parsing in DRM layer")
+Fixes: bbbe775ec5b5da ("drm: Add support for Amlogic Meson Graphic Controller")
+Signed-off-by: Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+Acked-by: Neil Armstrong <narmstrong@baylibre.com>
+[narmstrong: squashed with drm: meson: venc: cvbs: deduplicate the meson_cvbs_mode lookup code]
+Signed-off-by: Neil Armstrong <narmstrong@baylibre.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20191208171832.1064772-3-martin.blumenstingl@googlemail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/meson/meson_venc_cvbs.c | 48 ++++++++++++++++++--------------
+ 1 file changed, 27 insertions(+), 21 deletions(-)
+
+--- a/drivers/gpu/drm/meson/meson_venc_cvbs.c
++++ b/drivers/gpu/drm/meson/meson_venc_cvbs.c
+@@ -75,6 +75,25 @@ struct meson_cvbs_mode meson_cvbs_modes[
+ },
+ };
+
++static const struct meson_cvbs_mode *
++meson_cvbs_get_mode(const struct drm_display_mode *req_mode)
++{
++ int i;
++
++ for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) {
++ struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i];
++
++ if (drm_mode_match(req_mode, &meson_mode->mode,
++ DRM_MODE_MATCH_TIMINGS |
++ DRM_MODE_MATCH_CLOCK |
++ DRM_MODE_MATCH_FLAGS |
++ DRM_MODE_MATCH_3D_FLAGS))
++ return meson_mode;
++ }
++
++ return NULL;
++}
++
+ /* Connector */
+
+ static void meson_cvbs_connector_destroy(struct drm_connector *connector)
+@@ -147,14 +166,8 @@ static int meson_venc_cvbs_encoder_atomi
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+ {
+- int i;
+-
+- for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) {
+- struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i];
+-
+- if (drm_mode_equal(&crtc_state->mode, &meson_mode->mode))
+- return 0;
+- }
++ if (meson_cvbs_get_mode(&crtc_state->mode))
++ return 0;
+
+ return -EINVAL;
+ }
+@@ -192,24 +205,17 @@ static void meson_venc_cvbs_encoder_mode
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+ {
++ const struct meson_cvbs_mode *meson_mode = meson_cvbs_get_mode(mode);
+ struct meson_venc_cvbs *meson_venc_cvbs =
+ encoder_to_meson_venc_cvbs(encoder);
+ struct meson_drm *priv = meson_venc_cvbs->priv;
+- int i;
+
+- for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) {
+- struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i];
++ if (meson_mode) {
++ meson_venci_cvbs_mode_set(priv, meson_mode->enci);
+
+- if (drm_mode_equal(mode, &meson_mode->mode)) {
+- meson_venci_cvbs_mode_set(priv,
+- meson_mode->enci);
+-
+- /* Setup 27MHz vclk2 for ENCI and VDAC */
+- meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS,
+- MESON_VCLK_CVBS, MESON_VCLK_CVBS,
+- MESON_VCLK_CVBS, true);
+- break;
+- }
++ /* Setup 27MHz vclk2 for ENCI and VDAC */
++ meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS, MESON_VCLK_CVBS,
++ MESON_VCLK_CVBS, MESON_VCLK_CVBS, true);
+ }
+ }
+
--- /dev/null
+From 008037d4d972c9c47b273e40e52ae34f9d9e33e7 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Tue, 26 Nov 2019 09:41:46 -0500
+Subject: drm/radeon: fix r1xx/r2xx register checker for POT textures
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit 008037d4d972c9c47b273e40e52ae34f9d9e33e7 upstream.
+
+Shift and mask were reversed. Noticed by chance.
+
+Tested-by: Meelis Roos <mroos@linux.ee>
+Reviewed-by: Michel Dänzer <mdaenzer@redhat.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/radeon/r100.c | 4 ++--
+ drivers/gpu/drm/radeon/r200.c | 4 ++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/gpu/drm/radeon/r100.c
++++ b/drivers/gpu/drm/radeon/r100.c
+@@ -1820,8 +1820,8 @@ static int r100_packet0_check(struct rad
+ track->textures[i].use_pitch = 1;
+ } else {
+ track->textures[i].use_pitch = 0;
+- track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
+- track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
++ track->textures[i].width = 1 << ((idx_value & RADEON_TXFORMAT_WIDTH_MASK) >> RADEON_TXFORMAT_WIDTH_SHIFT);
++ track->textures[i].height = 1 << ((idx_value & RADEON_TXFORMAT_HEIGHT_MASK) >> RADEON_TXFORMAT_HEIGHT_SHIFT);
+ }
+ if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE)
+ track->textures[i].tex_coord_type = 2;
+--- a/drivers/gpu/drm/radeon/r200.c
++++ b/drivers/gpu/drm/radeon/r200.c
+@@ -476,8 +476,8 @@ int r200_packet0_check(struct radeon_cs_
+ track->textures[i].use_pitch = 1;
+ } else {
+ track->textures[i].use_pitch = 0;
+- track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
+- track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
++ track->textures[i].width = 1 << ((idx_value & RADEON_TXFORMAT_WIDTH_MASK) >> RADEON_TXFORMAT_WIDTH_SHIFT);
++ track->textures[i].height = 1 << ((idx_value & RADEON_TXFORMAT_HEIGHT_MASK) >> RADEON_TXFORMAT_HEIGHT_SHIFT);
+ }
+ if (idx_value & R200_TXFORMAT_LOOKUP_DISABLE)
+ track->textures[i].lookup_disable = true;
--- /dev/null
+From c3dadc19b7564c732598b30d637c6f275c3b77b6 Mon Sep 17 00:00:00 2001
+From: Bjorn Andersson <bjorn.andersson@linaro.org>
+Date: Fri, 4 Oct 2019 15:27:01 -0700
+Subject: rpmsg: glink: Don't send pending rx_done during remove
+
+From: Bjorn Andersson <bjorn.andersson@linaro.org>
+
+commit c3dadc19b7564c732598b30d637c6f275c3b77b6 upstream.
+
+Attempting to transmit rx_done messages after the GLINK instance is
+being torn down will cause use after free and memory leaks. So cancel
+the intent_work and free up the pending intents.
+
+With this there are no concurrent accessors of the channel left during
+qcom_glink_native_remove() and there is therefor no need to hold the
+spinlock during this operation - which would prohibit the use of
+cancel_work_sync() in the release function. So remove this.
+
+Fixes: 1d2ea36eead9 ("rpmsg: glink: Add rx done command")
+Cc: stable@vger.kernel.org
+Acked-by: Chris Lew <clew@codeaurora.org>
+Tested-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/rpmsg/qcom_glink_native.c | 15 ++++++++++++---
+ 1 file changed, 12 insertions(+), 3 deletions(-)
+
+--- a/drivers/rpmsg/qcom_glink_native.c
++++ b/drivers/rpmsg/qcom_glink_native.c
+@@ -243,11 +243,23 @@ static void qcom_glink_channel_release(s
+ {
+ struct glink_channel *channel = container_of(ref, struct glink_channel,
+ refcount);
++ struct glink_core_rx_intent *intent;
+ struct glink_core_rx_intent *tmp;
+ unsigned long flags;
+ int iid;
+
++ /* cancel pending rx_done work */
++ cancel_work_sync(&channel->intent_work);
++
+ spin_lock_irqsave(&channel->intent_lock, flags);
++ /* Free all non-reuse intents pending rx_done work */
++ list_for_each_entry_safe(intent, tmp, &channel->done_intents, node) {
++ if (!intent->reuse) {
++ kfree(intent->data);
++ kfree(intent);
++ }
++ }
++
+ idr_for_each_entry(&channel->liids, tmp, iid) {
+ kfree(tmp->data);
+ kfree(tmp);
+@@ -1597,7 +1609,6 @@ void qcom_glink_native_remove(struct qco
+ struct glink_channel *channel;
+ int cid;
+ int ret;
+- unsigned long flags;
+
+ disable_irq(glink->irq);
+ cancel_work_sync(&glink->rx_work);
+@@ -1606,7 +1617,6 @@ void qcom_glink_native_remove(struct qco
+ if (ret)
+ dev_warn(glink->dev, "Can't remove GLINK devices: %d\n", ret);
+
+- spin_lock_irqsave(&glink->idr_lock, flags);
+ /* Release any defunct local channels, waiting for close-ack */
+ idr_for_each_entry(&glink->lcids, channel, cid)
+ kref_put(&channel->refcount, qcom_glink_channel_release);
+@@ -1617,7 +1627,6 @@ void qcom_glink_native_remove(struct qco
+
+ idr_destroy(&glink->lcids);
+ idr_destroy(&glink->rcids);
+- spin_unlock_irqrestore(&glink->idr_lock, flags);
+ mbox_free_channel(glink->mbox_chan);
+ }
+ EXPORT_SYMBOL_GPL(qcom_glink_native_remove);
--- /dev/null
+From b85f6b601407347f5425c4c058d1b7871f5bf4f0 Mon Sep 17 00:00:00 2001
+From: Arun Kumar Neelakantam <aneela@codeaurora.org>
+Date: Fri, 4 Oct 2019 15:26:57 -0700
+Subject: rpmsg: glink: Fix reuse intents memory leak issue
+
+From: Arun Kumar Neelakantam <aneela@codeaurora.org>
+
+commit b85f6b601407347f5425c4c058d1b7871f5bf4f0 upstream.
+
+Memory allocated for re-usable intents are not freed during channel
+cleanup which causes memory leak in system.
+
+Check and free all re-usable memory to avoid memory leak.
+
+Fixes: 933b45da5d1d ("rpmsg: glink: Add support for TX intents")
+Cc: stable@vger.kernel.org
+Acked-By: Chris Lew <clew@codeaurora.org>
+Tested-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Signed-off-by: Arun Kumar Neelakantam <aneela@codeaurora.org>
+Reported-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/rpmsg/qcom_glink_native.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/drivers/rpmsg/qcom_glink_native.c
++++ b/drivers/rpmsg/qcom_glink_native.c
+@@ -243,10 +243,19 @@ static void qcom_glink_channel_release(s
+ {
+ struct glink_channel *channel = container_of(ref, struct glink_channel,
+ refcount);
++ struct glink_core_rx_intent *tmp;
+ unsigned long flags;
++ int iid;
+
+ spin_lock_irqsave(&channel->intent_lock, flags);
++ idr_for_each_entry(&channel->liids, tmp, iid) {
++ kfree(tmp->data);
++ kfree(tmp);
++ }
+ idr_destroy(&channel->liids);
++
++ idr_for_each_entry(&channel->riids, tmp, iid)
++ kfree(tmp);
+ idr_destroy(&channel->riids);
+ spin_unlock_irqrestore(&channel->intent_lock, flags);
+
--- /dev/null
+From f7e714988edaffe6ac578318e99501149b067ba0 Mon Sep 17 00:00:00 2001
+From: Chris Lew <clew@codeaurora.org>
+Date: Fri, 4 Oct 2019 15:27:00 -0700
+Subject: rpmsg: glink: Fix rpmsg_register_device err handling
+
+From: Chris Lew <clew@codeaurora.org>
+
+commit f7e714988edaffe6ac578318e99501149b067ba0 upstream.
+
+The device release function is set before registering with rpmsg. If
+rpmsg registration fails, the framework will call device_put(), which
+invokes the release function. The channel create logic does not need to
+free rpdev if rpmsg_register_device() fails and release is called.
+
+Fixes: b4f8e52b89f6 ("rpmsg: Introduce Qualcomm RPM glink driver")
+Cc: stable@vger.kernel.org
+Tested-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Signed-off-by: Chris Lew <clew@codeaurora.org>
+Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/rpmsg/qcom_glink_native.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/drivers/rpmsg/qcom_glink_native.c
++++ b/drivers/rpmsg/qcom_glink_native.c
+@@ -1400,15 +1400,13 @@ static int qcom_glink_rx_open(struct qco
+
+ ret = rpmsg_register_device(rpdev);
+ if (ret)
+- goto free_rpdev;
++ goto rcid_remove;
+
+ channel->rpdev = rpdev;
+ }
+
+ return 0;
+
+-free_rpdev:
+- kfree(rpdev);
+ rcid_remove:
+ spin_lock_irqsave(&glink->idr_lock, flags);
+ idr_remove(&glink->rcids, channel->rcid);
--- /dev/null
+From ac74ea01860170699fb3b6ea80c0476774c8e94f Mon Sep 17 00:00:00 2001
+From: Arun Kumar Neelakantam <aneela@codeaurora.org>
+Date: Fri, 4 Oct 2019 15:26:58 -0700
+Subject: rpmsg: glink: Fix use after free in open_ack TIMEOUT case
+
+From: Arun Kumar Neelakantam <aneela@codeaurora.org>
+
+commit ac74ea01860170699fb3b6ea80c0476774c8e94f upstream.
+
+Extra channel reference put when remote sending OPEN_ACK after timeout
+causes use-after-free while handling next remote CLOSE command.
+
+Remove extra reference put in timeout case to avoid use-after-free.
+
+Fixes: b4f8e52b89f6 ("rpmsg: Introduce Qualcomm RPM glink driver")
+Cc: stable@vger.kernel.org
+Tested-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Signed-off-by: Arun Kumar Neelakantam <aneela@codeaurora.org>
+Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/rpmsg/qcom_glink_native.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+--- a/drivers/rpmsg/qcom_glink_native.c
++++ b/drivers/rpmsg/qcom_glink_native.c
+@@ -1104,13 +1104,12 @@ static int qcom_glink_create_remote(stru
+ close_link:
+ /*
+ * Send a close request to "undo" our open-ack. The close-ack will
+- * release the last reference.
++ * release qcom_glink_send_open_req() reference and the last reference
++ * will be relesed after receiving remote_close or transport unregister
++ * by calling qcom_glink_native_remove().
+ */
+ qcom_glink_send_close_req(glink, channel);
+
+- /* Release qcom_glink_send_open_req() reference */
+- kref_put(&channel->refcount, qcom_glink_channel_release);
+-
+ return ret;
+ }
+
--- /dev/null
+From 278bcb7300f61785dba63840bd2a8cf79f14554c Mon Sep 17 00:00:00 2001
+From: Bjorn Andersson <bjorn.andersson@linaro.org>
+Date: Fri, 4 Oct 2019 15:27:02 -0700
+Subject: rpmsg: glink: Free pending deferred work on remove
+
+From: Bjorn Andersson <bjorn.andersson@linaro.org>
+
+commit 278bcb7300f61785dba63840bd2a8cf79f14554c upstream.
+
+By just cancelling the deferred rx worker during GLINK instance teardown
+any pending deferred commands are leaked, so free them.
+
+Fixes: b4f8e52b89f6 ("rpmsg: Introduce Qualcomm RPM glink driver")
+Cc: stable@vger.kernel.org
+Acked-by: Chris Lew <clew@codeaurora.org>
+Tested-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/rpmsg/qcom_glink_native.c | 14 +++++++++++++-
+ 1 file changed, 13 insertions(+), 1 deletion(-)
+
+--- a/drivers/rpmsg/qcom_glink_native.c
++++ b/drivers/rpmsg/qcom_glink_native.c
+@@ -1539,6 +1539,18 @@ static void qcom_glink_work(struct work_
+ }
+ }
+
++static void qcom_glink_cancel_rx_work(struct qcom_glink *glink)
++{
++ struct glink_defer_cmd *dcmd;
++ struct glink_defer_cmd *tmp;
++
++ /* cancel any pending deferred rx_work */
++ cancel_work_sync(&glink->rx_work);
++
++ list_for_each_entry_safe(dcmd, tmp, &glink->rx_queue, node)
++ kfree(dcmd);
++}
++
+ struct qcom_glink *qcom_glink_native_probe(struct device *dev,
+ unsigned long features,
+ struct qcom_glink_pipe *rx,
+@@ -1611,7 +1623,7 @@ void qcom_glink_native_remove(struct qco
+ int ret;
+
+ disable_irq(glink->irq);
+- cancel_work_sync(&glink->rx_work);
++ qcom_glink_cancel_rx_work(glink);
+
+ ret = device_for_each_child(glink->dev, NULL, qcom_glink_remove_device);
+ if (ret)
--- /dev/null
+From b646293e272816dd0719529dcebbd659de0722f7 Mon Sep 17 00:00:00 2001
+From: Chris Lew <clew@codeaurora.org>
+Date: Fri, 4 Oct 2019 15:26:59 -0700
+Subject: rpmsg: glink: Put an extra reference during cleanup
+
+From: Chris Lew <clew@codeaurora.org>
+
+commit b646293e272816dd0719529dcebbd659de0722f7 upstream.
+
+In a remote processor crash scenario, there is no guarantee the remote
+processor sent close requests before it went into a bad state. Remove
+the reference that is normally handled by the close command in the
+so channel resources can be released.
+
+Fixes: b4f8e52b89f6 ("rpmsg: Introduce Qualcomm RPM glink driver")
+Cc: stable@vger.kernel.org
+Tested-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Signed-off-by: Chris Lew <clew@codeaurora.org>
+Reported-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/rpmsg/qcom_glink_native.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/rpmsg/qcom_glink_native.c
++++ b/drivers/rpmsg/qcom_glink_native.c
+@@ -1613,6 +1613,10 @@ void qcom_glink_native_remove(struct qco
+ idr_for_each_entry(&glink->lcids, channel, cid)
+ kref_put(&channel->refcount, qcom_glink_channel_release);
+
++ /* Release any defunct local channels, waiting for close-req */
++ idr_for_each_entry(&glink->rcids, channel, cid)
++ kref_put(&channel->refcount, qcom_glink_channel_release);
++
+ idr_destroy(&glink->lcids);
+ idr_destroy(&glink->rcids);
+ spin_unlock_irqrestore(&glink->idr_lock, flags);
--- /dev/null
+From 4623e8bf1de0b86e23a56cdb39a72f054e89c3bd Mon Sep 17 00:00:00 2001
+From: Chris Lew <clew@codeaurora.org>
+Date: Wed, 27 Jun 2018 18:19:57 -0700
+Subject: rpmsg: glink: Set tail pointer to 0 at end of FIFO
+
+From: Chris Lew <clew@codeaurora.org>
+
+commit 4623e8bf1de0b86e23a56cdb39a72f054e89c3bd upstream.
+
+When wrapping around the FIFO, the remote expects the tail pointer to
+be reset to 0 on the edge case where the tail equals the FIFO length.
+
+Fixes: caf989c350e8 ("rpmsg: glink: Introduce glink smem based transport")
+Cc: stable@vger.kernel.org
+Signed-off-by: Chris Lew <clew@codeaurora.org>
+Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/rpmsg/qcom_glink_smem.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/rpmsg/qcom_glink_smem.c
++++ b/drivers/rpmsg/qcom_glink_smem.c
+@@ -119,7 +119,7 @@ static void glink_smem_rx_advance(struct
+ tail = le32_to_cpu(*pipe->tail);
+
+ tail += count;
+- if (tail > pipe->native.length)
++ if (tail >= pipe->native.length)
+ tail -= pipe->native.length;
+
+ *pipe->tail = cpu_to_le32(tail);
--- /dev/null
+From 5480e299b5ae57956af01d4839c9fc88a465eeab Mon Sep 17 00:00:00 2001
+From: Bart Van Assche <bvanassche@acm.org>
+Date: Mon, 9 Dec 2019 09:34:57 -0800
+Subject: scsi: iscsi: Fix a potential deadlock in the timeout handler
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+commit 5480e299b5ae57956af01d4839c9fc88a465eeab upstream.
+
+Some time ago the block layer was modified such that timeout handlers are
+called from thread context instead of interrupt context. Make it safe to
+run the iSCSI timeout handler in thread context. This patch fixes the
+following lockdep complaint:
+
+================================
+WARNING: inconsistent lock state
+5.5.1-dbg+ #11 Not tainted
+--------------------------------
+inconsistent {IN-SOFTIRQ-W} -> {SOFTIRQ-ON-W} usage.
+kworker/7:1H/206 [HC0[0]:SC0[0]:HE1:SE1] takes:
+ffff88802d9827e8 (&(&session->frwd_lock)->rlock){+.?.}, at: iscsi_eh_cmd_timed_out+0xa6/0x6d0 [libiscsi]
+{IN-SOFTIRQ-W} state was registered at:
+ lock_acquire+0x106/0x240
+ _raw_spin_lock+0x38/0x50
+ iscsi_check_transport_timeouts+0x3e/0x210 [libiscsi]
+ call_timer_fn+0x132/0x470
+ __run_timers.part.0+0x39f/0x5b0
+ run_timer_softirq+0x63/0xc0
+ __do_softirq+0x12d/0x5fd
+ irq_exit+0xb3/0x110
+ smp_apic_timer_interrupt+0x131/0x3d0
+ apic_timer_interrupt+0xf/0x20
+ default_idle+0x31/0x230
+ arch_cpu_idle+0x13/0x20
+ default_idle_call+0x53/0x60
+ do_idle+0x38a/0x3f0
+ cpu_startup_entry+0x24/0x30
+ start_secondary+0x222/0x290
+ secondary_startup_64+0xa4/0xb0
+irq event stamp: 1383705
+hardirqs last enabled at (1383705): [<ffffffff81aace5c>] _raw_spin_unlock_irq+0x2c/0x50
+hardirqs last disabled at (1383704): [<ffffffff81aacb98>] _raw_spin_lock_irq+0x18/0x50
+softirqs last enabled at (1383690): [<ffffffffa0e2efea>] iscsi_queuecommand+0x76a/0xa20 [libiscsi]
+softirqs last disabled at (1383682): [<ffffffffa0e2e998>] iscsi_queuecommand+0x118/0xa20 [libiscsi]
+
+other info that might help us debug this:
+ Possible unsafe locking scenario:
+
+ CPU0
+ ----
+ lock(&(&session->frwd_lock)->rlock);
+ <Interrupt>
+ lock(&(&session->frwd_lock)->rlock);
+
+ *** DEADLOCK ***
+
+2 locks held by kworker/7:1H/206:
+ #0: ffff8880d57bf928 ((wq_completion)kblockd){+.+.}, at: process_one_work+0x472/0xab0
+ #1: ffff88802b9c7de8 ((work_completion)(&q->timeout_work)){+.+.}, at: process_one_work+0x476/0xab0
+
+stack backtrace:
+CPU: 7 PID: 206 Comm: kworker/7:1H Not tainted 5.5.1-dbg+ #11
+Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011
+Workqueue: kblockd blk_mq_timeout_work
+Call Trace:
+ dump_stack+0xa5/0xe6
+ print_usage_bug.cold+0x232/0x23b
+ mark_lock+0x8dc/0xa70
+ __lock_acquire+0xcea/0x2af0
+ lock_acquire+0x106/0x240
+ _raw_spin_lock+0x38/0x50
+ iscsi_eh_cmd_timed_out+0xa6/0x6d0 [libiscsi]
+ scsi_times_out+0xf4/0x440 [scsi_mod]
+ scsi_timeout+0x1d/0x20 [scsi_mod]
+ blk_mq_check_expired+0x365/0x3a0
+ bt_iter+0xd6/0xf0
+ blk_mq_queue_tag_busy_iter+0x3de/0x650
+ blk_mq_timeout_work+0x1af/0x380
+ process_one_work+0x56d/0xab0
+ worker_thread+0x7a/0x5d0
+ kthread+0x1bc/0x210
+ ret_from_fork+0x24/0x30
+
+Fixes: 287922eb0b18 ("block: defer timeouts to a workqueue")
+Cc: Christoph Hellwig <hch@lst.de>
+Cc: Keith Busch <keith.busch@intel.com>
+Cc: Lee Duncan <lduncan@suse.com>
+Cc: Chris Leech <cleech@redhat.com>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20191209173457.187370-1-bvanassche@acm.org
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Reviewed-by: Lee Duncan <lduncan@suse.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/libiscsi.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -1983,7 +1983,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_ti
+
+ ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc);
+
+- spin_lock(&session->frwd_lock);
++ spin_lock_bh(&session->frwd_lock);
+ task = (struct iscsi_task *)sc->SCp.ptr;
+ if (!task) {
+ /*
+@@ -2110,7 +2110,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_ti
+ done:
+ if (task)
+ task->last_timeout = jiffies;
+- spin_unlock(&session->frwd_lock);
++ spin_unlock_bh(&session->frwd_lock);
+ ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
+ "timer reset" : "shutdown or nh");
+ return rc;
pci-fix-intel-acs-quirk-updcr-register-address.patch
pci-msi-fix-incorrect-msi-x-masking-on-resume.patch
pci-apply-cavium-acs-quirk-to-thunderx2-and-thunderx3.patch
+xtensa-fix-tlb-sanity-checker.patch
+rpmsg-glink-set-tail-pointer-to-0-at-end-of-fifo.patch
+rpmsg-glink-fix-reuse-intents-memory-leak-issue.patch
+rpmsg-glink-fix-use-after-free-in-open_ack-timeout-case.patch
+rpmsg-glink-put-an-extra-reference-during-cleanup.patch
+rpmsg-glink-fix-rpmsg_register_device-err-handling.patch
+rpmsg-glink-don-t-send-pending-rx_done-during-remove.patch
+rpmsg-glink-free-pending-deferred-work-on-remove.patch
+cifs-respect-o_sync-and-o_direct-flags-during-reconnect.patch
+arm-dts-s3c64xx-fix-init-order-of-clock-providers.patch
+arm-tegra-fix-flow_ctlr_halt-register-clobbering-by-tegra_resume.patch
+vfio-pci-call-irq_bypass_unregister_producer-before-freeing-irq.patch
+dma-buf-fix-memory-leak-in-sync_file_merge.patch
+drm-meson-venc-cvbs-fix-cvbs-mode-matching.patch
+dm-btree-increase-rebalance-threshold-in-__rebalance2.patch
+dm-thin-metadata-add-support-for-a-pre-commit-callback.patch
+dm-thin-flush-data-device-before-committing-metadata.patch
+scsi-iscsi-fix-a-potential-deadlock-in-the-timeout-handler.patch
+drm-radeon-fix-r1xx-r2xx-register-checker-for-pot-textures.patch
--- /dev/null
+From d567fb8819162099035e546b11a736e29c2af0ea Mon Sep 17 00:00:00 2001
+From: Jiang Yi <giangyi@amazon.com>
+Date: Wed, 27 Nov 2019 17:49:10 +0100
+Subject: vfio/pci: call irq_bypass_unregister_producer() before freeing irq
+
+From: Jiang Yi <giangyi@amazon.com>
+
+commit d567fb8819162099035e546b11a736e29c2af0ea upstream.
+
+Since irq_bypass_register_producer() is called after request_irq(), we
+should do tear-down in reverse order: irq_bypass_unregister_producer()
+then free_irq().
+
+Specifically free_irq() may release resources required by the
+irqbypass del_producer() callback. Notably an example provided by
+Marc Zyngier on arm64 with GICv4 that he indicates has the potential
+to wedge the hardware:
+
+ free_irq(irq)
+ __free_irq(irq)
+ irq_domain_deactivate_irq(irq)
+ its_irq_domain_deactivate()
+ [unmap the VLPI from the ITS]
+
+ kvm_arch_irq_bypass_del_producer(cons, prod)
+ kvm_vgic_v4_unset_forwarding(kvm, irq, ...)
+ its_unmap_vlpi(irq)
+ [Unmap the VLPI from the ITS (again), remap the original LPI]
+
+Signed-off-by: Jiang Yi <giangyi@amazon.com>
+Cc: stable@vger.kernel.org # v4.4+
+Fixes: 6d7425f109d26 ("vfio: Register/unregister irq_bypass_producer")
+Link: https://lore.kernel.org/kvm/20191127164910.15888-1-giangyi@amazon.com
+Reviewed-by: Marc Zyngier <maz@kernel.org>
+Reviewed-by: Eric Auger <eric.auger@redhat.com>
+[aw: commit log]
+Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/vfio/pci/vfio_pci_intrs.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/vfio/pci/vfio_pci_intrs.c
++++ b/drivers/vfio/pci/vfio_pci_intrs.c
+@@ -297,8 +297,8 @@ static int vfio_msi_set_vector_signal(st
+ irq = pci_irq_vector(pdev, vector);
+
+ if (vdev->ctx[vector].trigger) {
+- free_irq(irq, vdev->ctx[vector].trigger);
+ irq_bypass_unregister_producer(&vdev->ctx[vector].producer);
++ free_irq(irq, vdev->ctx[vector].trigger);
+ kfree(vdev->ctx[vector].name);
+ eventfd_ctx_put(vdev->ctx[vector].trigger);
+ vdev->ctx[vector].trigger = NULL;
--- /dev/null
+From 36de10c4788efc6efe6ff9aa10d38cb7eea4c818 Mon Sep 17 00:00:00 2001
+From: Max Filippov <jcmvbkbc@gmail.com>
+Date: Wed, 13 Nov 2019 13:18:31 -0800
+Subject: xtensa: fix TLB sanity checker
+
+From: Max Filippov <jcmvbkbc@gmail.com>
+
+commit 36de10c4788efc6efe6ff9aa10d38cb7eea4c818 upstream.
+
+Virtual and translated addresses retrieved by the xtensa TLB sanity
+checker must be consistent, i.e. correspond to the same state of the
+checked TLB entry. KASAN shadow memory is mapped dynamically using
+auto-refill TLB entries and thus may change TLB state between the
+virtual and translated address retrieval, resulting in false TLB
+insanity report.
+Move read_xtlb_translation close to read_xtlb_virtual to make sure that
+read values are consistent.
+
+Cc: stable@vger.kernel.org
+Fixes: a99e07ee5e88 ("xtensa: check TLB sanity on return to userspace")
+Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/xtensa/mm/tlb.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/xtensa/mm/tlb.c
++++ b/arch/xtensa/mm/tlb.c
+@@ -218,6 +218,8 @@ static int check_tlb_entry(unsigned w, u
+ unsigned tlbidx = w | (e << PAGE_SHIFT);
+ unsigned r0 = dtlb ?
+ read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx);
++ unsigned r1 = dtlb ?
++ read_dtlb_translation(tlbidx) : read_itlb_translation(tlbidx);
+ unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT);
+ unsigned pte = get_pte_for_vaddr(vpn);
+ unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK;
+@@ -233,8 +235,6 @@ static int check_tlb_entry(unsigned w, u
+ }
+
+ if (tlb_asid == mm_asid) {
+- unsigned r1 = dtlb ? read_dtlb_translation(tlbidx) :
+- read_itlb_translation(tlbidx);
+ if ((pte ^ r1) & PAGE_MASK) {
+ pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n",
+ dtlb ? 'D' : 'I', w, e, r0, r1, pte);