--- /dev/null
+From 800a7340ab7dd667edf95e74d8e4f23a17e87076 Mon Sep 17 00:00:00 2001
+From: Wenwen Wang <wang6495@umn.edu>
+Date: Wed, 3 Oct 2018 11:43:59 -0500
+Subject: dm ioctl: harden copy_params()'s copy_from_user() from malicious users
+
+From: Wenwen Wang <wang6495@umn.edu>
+
+commit 800a7340ab7dd667edf95e74d8e4f23a17e87076 upstream.
+
+In copy_params(), the struct 'dm_ioctl' is first copied from the user
+space buffer 'user' to 'param_kernel' and the field 'data_size' is
+checked against 'minimum_data_size' (size of 'struct dm_ioctl' payload
+up to its 'data' member). If the check fails, an error code EINVAL will be
+returned. Otherwise, param_kernel->data_size is used to do a second copy,
+which copies from the same user-space buffer to 'dmi'. After the second
+copy, only 'dmi->data_size' is checked against 'param_kernel->data_size'.
+Given that the buffer 'user' resides in the user space, a malicious
+user-space process can race to change the content in the buffer between
+the two copies. This way, the attacker can inject inconsistent data
+into 'dmi' (versus previously validated 'param_kernel').
+
+Fix redundant copying of 'minimum_data_size' from user-space buffer by
+using the first copy stored in 'param_kernel'. Also remove the
+'data_size' check after the second copy because it is now unnecessary.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Wenwen Wang <wang6495@umn.edu>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-ioctl.c | 18 ++++++------------
+ 1 file changed, 6 insertions(+), 12 deletions(-)
+
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -1719,8 +1719,7 @@ static void free_params(struct dm_ioctl
+ }
+
+ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kernel,
+- int ioctl_flags,
+- struct dm_ioctl **param, int *param_flags)
++ int ioctl_flags, struct dm_ioctl **param, int *param_flags)
+ {
+ struct dm_ioctl *dmi;
+ int secure_data;
+@@ -1761,18 +1760,13 @@ static int copy_params(struct dm_ioctl _
+
+ *param_flags |= DM_PARAMS_MALLOC;
+
+- if (copy_from_user(dmi, user, param_kernel->data_size))
+- goto bad;
++ /* Copy from param_kernel (which was already copied from user) */
++ memcpy(dmi, param_kernel, minimum_data_size);
+
+-data_copied:
+- /*
+- * Abort if something changed the ioctl data while it was being copied.
+- */
+- if (dmi->data_size != param_kernel->data_size) {
+- DMERR("rejecting ioctl: data size modified while processing parameters");
++ if (copy_from_user(&dmi->data, (char __user *)user + minimum_data_size,
++ param_kernel->data_size - minimum_data_size))
+ goto bad;
+- }
+-
++data_copied:
+ /* Wipe the user buffer so we do not return it to userspace */
+ if (secure_data && clear_user(user, param_kernel->data_size))
+ goto bad;
--- /dev/null
+From 33c2865f8d011a2ca9f67124ddab9dc89382e9f1 Mon Sep 17 00:00:00 2001
+From: Damien Le Moal <damien.lemoal@wdc.com>
+Date: Wed, 17 Oct 2018 18:05:07 +0900
+Subject: dm zoned: fix metadata block ref counting
+
+From: Damien Le Moal <damien.lemoal@wdc.com>
+
+commit 33c2865f8d011a2ca9f67124ddab9dc89382e9f1 upstream.
+
+Since the ref field of struct dmz_mblock is always used with the
+spinlock of struct dmz_metadata locked, there is no need to use an
+atomic_t type. Change the type of the ref field to an unsigne
+integer.
+
+Fixes: 3b1a94c88b79 ("dm zoned: drive-managed zoned block device target")
+Cc: stable@vger.kernel.org
+Signed-off-by: Damien Le Moal <damien.lemoal@wdc.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-zoned-metadata.c | 20 +++++++++++---------
+ 1 file changed, 11 insertions(+), 9 deletions(-)
+
+--- a/drivers/md/dm-zoned-metadata.c
++++ b/drivers/md/dm-zoned-metadata.c
+@@ -99,7 +99,7 @@ struct dmz_mblock {
+ struct rb_node node;
+ struct list_head link;
+ sector_t no;
+- atomic_t ref;
++ unsigned int ref;
+ unsigned long state;
+ struct page *page;
+ void *data;
+@@ -296,7 +296,7 @@ static struct dmz_mblock *dmz_alloc_mblo
+
+ RB_CLEAR_NODE(&mblk->node);
+ INIT_LIST_HEAD(&mblk->link);
+- atomic_set(&mblk->ref, 0);
++ mblk->ref = 0;
+ mblk->state = 0;
+ mblk->no = mblk_no;
+ mblk->data = page_address(mblk->page);
+@@ -397,7 +397,7 @@ static struct dmz_mblock *dmz_fetch_mblo
+ return NULL;
+
+ spin_lock(&zmd->mblk_lock);
+- atomic_inc(&mblk->ref);
++ mblk->ref++;
+ set_bit(DMZ_META_READING, &mblk->state);
+ dmz_insert_mblock(zmd, mblk);
+ spin_unlock(&zmd->mblk_lock);
+@@ -484,7 +484,8 @@ static void dmz_release_mblock(struct dm
+
+ spin_lock(&zmd->mblk_lock);
+
+- if (atomic_dec_and_test(&mblk->ref)) {
++ mblk->ref--;
++ if (mblk->ref == 0) {
+ if (test_bit(DMZ_META_ERROR, &mblk->state)) {
+ rb_erase(&mblk->node, &zmd->mblk_rbtree);
+ dmz_free_mblock(zmd, mblk);
+@@ -511,7 +512,8 @@ static struct dmz_mblock *dmz_get_mblock
+ mblk = dmz_lookup_mblock(zmd, mblk_no);
+ if (mblk) {
+ /* Cache hit: remove block from LRU list */
+- if (atomic_inc_return(&mblk->ref) == 1 &&
++ mblk->ref++;
++ if (mblk->ref == 1 &&
+ !test_bit(DMZ_META_DIRTY, &mblk->state))
+ list_del_init(&mblk->link);
+ }
+@@ -753,7 +755,7 @@ int dmz_flush_metadata(struct dmz_metada
+
+ spin_lock(&zmd->mblk_lock);
+ clear_bit(DMZ_META_DIRTY, &mblk->state);
+- if (atomic_read(&mblk->ref) == 0)
++ if (mblk->ref == 0)
+ list_add_tail(&mblk->link, &zmd->mblk_lru_list);
+ spin_unlock(&zmd->mblk_lock);
+ }
+@@ -2308,7 +2310,7 @@ static void dmz_cleanup_metadata(struct
+ mblk = list_first_entry(&zmd->mblk_dirty_list,
+ struct dmz_mblock, link);
+ dmz_dev_warn(zmd->dev, "mblock %llu still in dirty list (ref %u)",
+- (u64)mblk->no, atomic_read(&mblk->ref));
++ (u64)mblk->no, mblk->ref);
+ list_del_init(&mblk->link);
+ rb_erase(&mblk->node, &zmd->mblk_rbtree);
+ dmz_free_mblock(zmd, mblk);
+@@ -2326,8 +2328,8 @@ static void dmz_cleanup_metadata(struct
+ root = &zmd->mblk_rbtree;
+ rbtree_postorder_for_each_entry_safe(mblk, next, root, node) {
+ dmz_dev_warn(zmd->dev, "mblock %llu ref %u still in rbtree",
+- (u64)mblk->no, atomic_read(&mblk->ref));
+- atomic_set(&mblk->ref, 0);
++ (u64)mblk->no, mblk->ref);
++ mblk->ref = 0;
+ dmz_free_mblock(zmd, mblk);
+ }
+
--- /dev/null
+From 3d4e738311327bc4ba1d55fbe2f1da3de4a475f9 Mon Sep 17 00:00:00 2001
+From: Damien Le Moal <damien.lemoal@wdc.com>
+Date: Wed, 17 Oct 2018 18:05:08 +0900
+Subject: dm zoned: fix various dmz_get_mblock() issues
+
+From: Damien Le Moal <damien.lemoal@wdc.com>
+
+commit 3d4e738311327bc4ba1d55fbe2f1da3de4a475f9 upstream.
+
+dmz_fetch_mblock() called from dmz_get_mblock() has a race since the
+allocation of the new metadata block descriptor and its insertion in
+the cache rbtree with the READING state is not atomic. Two different
+contexts requesting the same block may end up each adding two different
+descriptors of the same block to the cache.
+
+Another problem for this function is that the BIO for processing the
+block read is allocated after the metadata block descriptor is inserted
+in the cache rbtree. If the BIO allocation fails, the metadata block
+descriptor is freed without first being removed from the rbtree.
+
+Fix the first problem by checking again if the requested block is not in
+the cache right before inserting the newly allocated descriptor,
+atomically under the mblk_lock spinlock. The second problem is fixed by
+simply allocating the BIO before inserting the new block in the cache.
+
+Finally, since dmz_fetch_mblock() also increments a block reference
+counter, rename the function to dmz_get_mblock_slow(). To be symmetric
+and clear, also rename dmz_lookup_mblock() to dmz_get_mblock_fast() and
+increment the block reference counter directly in that function rather
+than in dmz_get_mblock().
+
+Fixes: 3b1a94c88b79 ("dm zoned: drive-managed zoned block device target")
+Cc: stable@vger.kernel.org
+Signed-off-by: Damien Le Moal <damien.lemoal@wdc.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-zoned-metadata.c | 66 ++++++++++++++++++++++++++---------------
+ 1 file changed, 42 insertions(+), 24 deletions(-)
+
+--- a/drivers/md/dm-zoned-metadata.c
++++ b/drivers/md/dm-zoned-metadata.c
+@@ -339,10 +339,11 @@ static void dmz_insert_mblock(struct dmz
+ }
+
+ /*
+- * Lookup a metadata block in the rbtree.
++ * Lookup a metadata block in the rbtree. If the block is found, increment
++ * its reference count.
+ */
+-static struct dmz_mblock *dmz_lookup_mblock(struct dmz_metadata *zmd,
+- sector_t mblk_no)
++static struct dmz_mblock *dmz_get_mblock_fast(struct dmz_metadata *zmd,
++ sector_t mblk_no)
+ {
+ struct rb_root *root = &zmd->mblk_rbtree;
+ struct rb_node *node = root->rb_node;
+@@ -350,8 +351,17 @@ static struct dmz_mblock *dmz_lookup_mbl
+
+ while (node) {
+ mblk = container_of(node, struct dmz_mblock, node);
+- if (mblk->no == mblk_no)
++ if (mblk->no == mblk_no) {
++ /*
++ * If this is the first reference to the block,
++ * remove it from the LRU list.
++ */
++ mblk->ref++;
++ if (mblk->ref == 1 &&
++ !test_bit(DMZ_META_DIRTY, &mblk->state))
++ list_del_init(&mblk->link);
+ return mblk;
++ }
+ node = (mblk->no < mblk_no) ? node->rb_left : node->rb_right;
+ }
+
+@@ -382,32 +392,47 @@ static void dmz_mblock_bio_end_io(struct
+ }
+
+ /*
+- * Read a metadata block from disk.
++ * Read an uncached metadata block from disk and add it to the cache.
+ */
+-static struct dmz_mblock *dmz_fetch_mblock(struct dmz_metadata *zmd,
+- sector_t mblk_no)
++static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
++ sector_t mblk_no)
+ {
+- struct dmz_mblock *mblk;
++ struct dmz_mblock *mblk, *m;
+ sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no;
+ struct bio *bio;
+
+- /* Get block and insert it */
++ /* Get a new block and a BIO to read it */
+ mblk = dmz_alloc_mblock(zmd, mblk_no);
+ if (!mblk)
+ return NULL;
+
+- spin_lock(&zmd->mblk_lock);
+- mblk->ref++;
+- set_bit(DMZ_META_READING, &mblk->state);
+- dmz_insert_mblock(zmd, mblk);
+- spin_unlock(&zmd->mblk_lock);
+-
+ bio = bio_alloc(GFP_NOIO, 1);
+ if (!bio) {
+ dmz_free_mblock(zmd, mblk);
+ return NULL;
+ }
+
++ spin_lock(&zmd->mblk_lock);
++
++ /*
++ * Make sure that another context did not start reading
++ * the block already.
++ */
++ m = dmz_get_mblock_fast(zmd, mblk_no);
++ if (m) {
++ spin_unlock(&zmd->mblk_lock);
++ dmz_free_mblock(zmd, mblk);
++ bio_put(bio);
++ return m;
++ }
++
++ mblk->ref++;
++ set_bit(DMZ_META_READING, &mblk->state);
++ dmz_insert_mblock(zmd, mblk);
++
++ spin_unlock(&zmd->mblk_lock);
++
++ /* Submit read BIO */
+ bio->bi_iter.bi_sector = dmz_blk2sect(block);
+ bio_set_dev(bio, zmd->dev->bdev);
+ bio->bi_private = mblk;
+@@ -509,19 +534,12 @@ static struct dmz_mblock *dmz_get_mblock
+
+ /* Check rbtree */
+ spin_lock(&zmd->mblk_lock);
+- mblk = dmz_lookup_mblock(zmd, mblk_no);
+- if (mblk) {
+- /* Cache hit: remove block from LRU list */
+- mblk->ref++;
+- if (mblk->ref == 1 &&
+- !test_bit(DMZ_META_DIRTY, &mblk->state))
+- list_del_init(&mblk->link);
+- }
++ mblk = dmz_get_mblock_fast(zmd, mblk_no);
+ spin_unlock(&zmd->mblk_lock);
+
+ if (!mblk) {
+ /* Cache miss: read the block from disk */
+- mblk = dmz_fetch_mblock(zmd, mblk_no);
++ mblk = dmz_get_mblock_slow(zmd, mblk_no);
+ if (!mblk)
+ return ERR_PTR(-ENOMEM);
+ }
--- /dev/null
+From 721fb6fbfd2132164c2e8777cc837f9b2c1794dc Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Wed, 17 Oct 2018 13:07:05 +0200
+Subject: fsnotify: Fix busy inodes during unmount
+
+From: Jan Kara <jack@suse.cz>
+
+commit 721fb6fbfd2132164c2e8777cc837f9b2c1794dc upstream.
+
+Detaching of mark connector from fsnotify_put_mark() can race with
+unmounting of the filesystem like:
+
+ CPU1 CPU2
+fsnotify_put_mark()
+ spin_lock(&conn->lock);
+ ...
+ inode = fsnotify_detach_connector_from_object(conn)
+ spin_unlock(&conn->lock);
+ generic_shutdown_super()
+ fsnotify_unmount_inodes()
+ sees connector detached for inode
+ -> nothing to do
+ evict_inode()
+ barfs on pending inode reference
+ iput(inode);
+
+Resulting in "Busy inodes after unmount" message and possible kernel
+oops. Make fsnotify_unmount_inodes() properly wait for outstanding inode
+references from detached connectors.
+
+Note that the accounting of outstanding inode references in the
+superblock can cause some cacheline contention on the counter. OTOH it
+happens only during deletion of the last notification mark from an inode
+(or during unlinking of watched inode) and that is not too bad. I have
+measured time to create & delete inotify watch 100000 times from 64
+processes in parallel (each process having its own inotify group and its
+own file on a shared superblock) on a 64 CPU machine. Average and
+standard deviation of 15 runs look like:
+
+ Avg Stddev
+Vanilla 9.817400 0.276165
+Fixed 9.710467 0.228294
+
+So there's no statistically significant difference.
+
+Fixes: 6b3f05d24d35 ("fsnotify: Detach mark from object list when last reference is dropped")
+CC: stable@vger.kernel.org
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/notify/fsnotify.c | 3 +++
+ fs/notify/mark.c | 39 +++++++++++++++++++++++++++++++--------
+ include/linux/fs.h | 3 +++
+ 3 files changed, 37 insertions(+), 8 deletions(-)
+
+--- a/fs/notify/fsnotify.c
++++ b/fs/notify/fsnotify.c
+@@ -96,6 +96,9 @@ void fsnotify_unmount_inodes(struct supe
+
+ if (iput_inode)
+ iput(iput_inode);
++ /* Wait for outstanding inode references from connectors */
++ wait_var_event(&sb->s_fsnotify_inode_refs,
++ !atomic_long_read(&sb->s_fsnotify_inode_refs));
+ }
+
+ /*
+--- a/fs/notify/mark.c
++++ b/fs/notify/mark.c
+@@ -161,15 +161,18 @@ static void fsnotify_connector_destroy_w
+ }
+ }
+
+-static struct inode *fsnotify_detach_connector_from_object(
+- struct fsnotify_mark_connector *conn)
++static void *fsnotify_detach_connector_from_object(
++ struct fsnotify_mark_connector *conn,
++ unsigned int *type)
+ {
+ struct inode *inode = NULL;
+
++ *type = conn->type;
+ if (conn->flags & FSNOTIFY_OBJ_TYPE_INODE) {
+ inode = conn->inode;
+ rcu_assign_pointer(inode->i_fsnotify_marks, NULL);
+ inode->i_fsnotify_mask = 0;
++ atomic_long_inc(&inode->i_sb->s_fsnotify_inode_refs);
+ conn->inode = NULL;
+ conn->flags &= ~FSNOTIFY_OBJ_TYPE_INODE;
+ } else if (conn->flags & FSNOTIFY_OBJ_TYPE_VFSMOUNT) {
+@@ -193,10 +196,29 @@ static void fsnotify_final_mark_destroy(
+ fsnotify_put_group(group);
+ }
+
++/* Drop object reference originally held by a connector */
++static void fsnotify_drop_object(unsigned int type, void *objp)
++{
++ struct inode *inode;
++ struct super_block *sb;
++
++ if (!objp)
++ return;
++ /* Currently only inode references are passed to be dropped */
++ if (WARN_ON_ONCE(type != FSNOTIFY_OBJ_TYPE_INODE))
++ return;
++ inode = objp;
++ sb = inode->i_sb;
++ iput(inode);
++ if (atomic_long_dec_and_test(&sb->s_fsnotify_inode_refs))
++ wake_up_var(&sb->s_fsnotify_inode_refs);
++}
++
+ void fsnotify_put_mark(struct fsnotify_mark *mark)
+ {
+ struct fsnotify_mark_connector *conn;
+- struct inode *inode = NULL;
++ void *objp = NULL;
++ unsigned int type = FSNOTIFY_OBJ_TYPE_DETACHED;
+ bool free_conn = false;
+
+ /* Catch marks that were actually never attached to object */
+@@ -216,7 +238,7 @@ void fsnotify_put_mark(struct fsnotify_m
+ conn = mark->connector;
+ hlist_del_init_rcu(&mark->obj_list);
+ if (hlist_empty(&conn->list)) {
+- inode = fsnotify_detach_connector_from_object(conn);
++ objp = fsnotify_detach_connector_from_object(conn, &type);
+ free_conn = true;
+ } else {
+ __fsnotify_recalc_mask(conn);
+@@ -224,7 +246,7 @@ void fsnotify_put_mark(struct fsnotify_m
+ mark->connector = NULL;
+ spin_unlock(&conn->lock);
+
+- iput(inode);
++ fsnotify_drop_object(type, objp);
+
+ if (free_conn) {
+ spin_lock(&destroy_lock);
+@@ -694,7 +716,8 @@ void fsnotify_destroy_marks(struct fsnot
+ {
+ struct fsnotify_mark_connector *conn;
+ struct fsnotify_mark *mark, *old_mark = NULL;
+- struct inode *inode;
++ void *objp;
++ unsigned int type;
+
+ conn = fsnotify_grab_connector(connp);
+ if (!conn)
+@@ -720,11 +743,11 @@ void fsnotify_destroy_marks(struct fsnot
+ * mark references get dropped. It would lead to strange results such
+ * as delaying inode deletion or blocking unmount.
+ */
+- inode = fsnotify_detach_connector_from_object(conn);
++ objp = fsnotify_detach_connector_from_object(conn, &type);
+ spin_unlock(&conn->lock);
+ if (old_mark)
+ fsnotify_put_mark(old_mark);
+- iput(inode);
++ fsnotify_drop_object(type, objp);
+ }
+
+ /*
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -1406,6 +1406,9 @@ struct super_block {
+ /* Number of inodes with nlink == 0 but still referenced */
+ atomic_long_t s_remove_count;
+
++ /* Pending fsnotify inode refs */
++ atomic_long_t s_fsnotify_inode_refs;
++
+ /* Being remounted read-only */
+ int s_readonly_remount;
+
--- /dev/null
+From 7d321bd3542500caf125249f44dc37cb4e738013 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Mon, 1 Oct 2018 12:42:49 +0100
+Subject: iommu/arm-smmu: Ensure that page-table updates are visible before TLBI
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit 7d321bd3542500caf125249f44dc37cb4e738013 upstream.
+
+The IO-pgtable code relies on the driver TLB invalidation callbacks to
+ensure that all page-table updates are visible to the IOMMU page-table
+walker.
+
+In the case that the page-table walker is cache-coherent, we cannot rely
+on an implicit DSB from the DMA-mapping code, so we must ensure that we
+execute a DSB in our tlb_add_flush() callback prior to triggering the
+invalidation.
+
+Cc: <stable@vger.kernel.org>
+Cc: Robin Murphy <robin.murphy@arm.com>
+Fixes: 2df7a25ce4a7 ("iommu/arm-smmu: Clean up DMA API usage")
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iommu/arm-smmu.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/iommu/arm-smmu.c
++++ b/drivers/iommu/arm-smmu.c
+@@ -475,6 +475,9 @@ static void arm_smmu_tlb_inv_range_nosyn
+ bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
+ void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
+
++ if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
++ wmb();
++
+ if (stage1) {
+ reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
+
+@@ -516,6 +519,9 @@ static void arm_smmu_tlb_inv_vmid_nosync
+ struct arm_smmu_domain *smmu_domain = cookie;
+ void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu);
+
++ if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
++ wmb();
++
+ writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
+ }
+
--- /dev/null
+From 93f38b6fae0ea8987e22d9e6c38f8dfdccd867ee Mon Sep 17 00:00:00 2001
+From: Amir Goldstein <amir73il@gmail.com>
+Date: Fri, 28 Sep 2018 20:41:48 +0300
+Subject: lockd: fix access beyond unterminated strings in prints
+
+From: Amir Goldstein <amir73il@gmail.com>
+
+commit 93f38b6fae0ea8987e22d9e6c38f8dfdccd867ee upstream.
+
+printk format used %*s instead of %.*s, so hostname_len does not limit
+the number of bytes accessed from hostname.
+
+Signed-off-by: Amir Goldstein <amir73il@gmail.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: J. Bruce Fields <bfields@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/lockd/host.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/lockd/host.c
++++ b/fs/lockd/host.c
+@@ -341,7 +341,7 @@ struct nlm_host *nlmsvc_lookup_host(cons
+ };
+ struct lockd_net *ln = net_generic(net, lockd_net_id);
+
+- dprintk("lockd: %s(host='%*s', vers=%u, proto=%s)\n", __func__,
++ dprintk("lockd: %s(host='%.*s', vers=%u, proto=%s)\n", __func__,
+ (int)hostname_len, hostname, rqstp->rq_vers,
+ (rqstp->rq_prot == IPPROTO_UDP ? "udp" : "tcp"));
+
--- /dev/null
+From 250854eed5d45a73d81e4137dfd85180af6f2ec3 Mon Sep 17 00:00:00 2001
+From: Hans Verkuil <hverkuil@xs4all.nl>
+Date: Mon, 8 Oct 2018 15:08:27 -0400
+Subject: media: v4l2-tpg: fix kernel oops when enabling HFLIP and OSD
+
+From: Hans Verkuil <hverkuil@xs4all.nl>
+
+commit 250854eed5d45a73d81e4137dfd85180af6f2ec3 upstream.
+
+When the OSD is on (i.e. vivid displays text on top of the test pattern), and
+you enable hflip, then the driver crashes.
+
+The cause turned out to be a division of a negative number by an unsigned value.
+You expect that -8 / 2U would be -4, but in reality it is 2147483644 :-(
+
+Fixes: 3e14e7a82c1ef ("vivid-tpg: add hor/vert downsampling support to tpg_gen_text")
+
+Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com>
+Reported-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
+Cc: <stable@vger.kernel.org> # for v4.1 and up
+Signed-off-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/media/common/v4l2-tpg/v4l2-tpg-core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
++++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
+@@ -1765,7 +1765,7 @@ typedef struct { u16 __; u8 _; } __packe
+ pos[7] = (chr & (0x01 << 0) ? fg : bg); \
+ } \
+ \
+- pos += (tpg->hflip ? -8 : 8) / hdiv; \
++ pos += (tpg->hflip ? -8 : 8) / (int)hdiv; \
+ } \
+ } \
+ } while (0)
--- /dev/null
+From c0fae7e2452b90c31edd2d25eb3baf0c76b400ca Mon Sep 17 00:00:00 2001
+From: Aaro Koskinen <aaro.koskinen@iki.fi>
+Date: Sat, 27 Oct 2018 01:46:34 +0300
+Subject: MIPS: OCTEON: fix out of bounds array access on CN68XX
+
+From: Aaro Koskinen <aaro.koskinen@iki.fi>
+
+commit c0fae7e2452b90c31edd2d25eb3baf0c76b400ca upstream.
+
+The maximum number of interfaces is returned by
+cvmx_helper_get_number_of_interfaces(), and the value is used to access
+interface_port_count[]. When CN68XX support was added, we forgot
+to increase the array size. Fix that.
+
+Fixes: 2c8c3f0201333 ("MIPS: Octeon: Support additional interfaces on CN68XX")
+Signed-off-by: Aaro Koskinen <aaro.koskinen@iki.fi>
+Signed-off-by: Paul Burton <paul.burton@mips.com>
+Patchwork: https://patchwork.linux-mips.org/patch/20949/
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: linux-mips@linux-mips.org
+Cc: linux-kernel@vger.kernel.org
+Cc: stable@vger.kernel.org # v4.3+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/cavium-octeon/executive/cvmx-helper.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/mips/cavium-octeon/executive/cvmx-helper.c
++++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c
+@@ -67,7 +67,7 @@ void (*cvmx_override_pko_queue_priority)
+ void (*cvmx_override_ipd_port_setup) (int ipd_port);
+
+ /* Port count per interface */
+-static int interface_port_count[5];
++static int interface_port_count[9];
+
+ /**
+ * Return the number of interfaces the chip has. Each interface
--- /dev/null
+From d397dbe606120a1ea1b11b0020c3f7a3852da5ac Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan@kernel.org>
+Date: Mon, 27 Aug 2018 10:21:50 +0200
+Subject: net: bcmgenet: fix OF child-node lookup
+
+From: Johan Hovold <johan@kernel.org>
+
+commit d397dbe606120a1ea1b11b0020c3f7a3852da5ac upstream.
+
+Use the new of_get_compatible_child() helper to lookup the mdio child
+node instead of using of_find_compatible_node(), which searches the
+entire tree from a given start node and thus can return an unrelated
+(i.e. non-child) node.
+
+This also addresses a potential use-after-free (e.g. after probe
+deferral) as the tree-wide helper drops a reference to its first
+argument (i.e. the node of the device being probed).
+
+Fixes: aa09677cba42 ("net: bcmgenet: add MDIO routines")
+Cc: stable <stable@vger.kernel.org> # 3.15
+Cc: David S. Miller <davem@davemloft.net>
+Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Rob Herring <robh@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/ethernet/broadcom/genet/bcmmii.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
+@@ -360,7 +360,7 @@ static struct device_node *bcmgenet_mii_
+ if (!compat)
+ return NULL;
+
+- priv->mdio_dn = of_find_compatible_node(dn, NULL, compat);
++ priv->mdio_dn = of_get_compatible_child(dn, compat);
+ kfree(compat);
+ if (!priv->mdio_dn) {
+ dev_err(kdev, "unable to find MDIO bus node\n");
--- /dev/null
+From 5bf59773aaf36dd62117dc83d50e1bbf9ef432da Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan@kernel.org>
+Date: Mon, 27 Aug 2018 10:21:52 +0200
+Subject: NFC: nfcmrvl_uart: fix OF child-node lookup
+
+From: Johan Hovold <johan@kernel.org>
+
+commit 5bf59773aaf36dd62117dc83d50e1bbf9ef432da upstream.
+
+Use the new of_get_compatible_child() helper to lookup the nfc child
+node instead of using of_find_compatible_node(), which searches the
+entire tree from a given start node and thus can return an unrelated
+(i.e. non-child) node.
+
+This also addresses a potential use-after-free (e.g. after probe
+deferral) as the tree-wide helper drops a reference to its first
+argument (i.e. the parent node).
+
+Fixes: e097dc624f78 ("NFC: nfcmrvl: add UART driver")
+Fixes: d8e018c0b321 ("NFC: nfcmrvl: update device tree bindings for Marvell NFC")
+Cc: stable <stable@vger.kernel.org> # 4.2
+Cc: Vincent Cuissard <cuissard@marvell.com>
+Cc: Samuel Ortiz <sameo@linux.intel.com>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Rob Herring <robh@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/nfc/nfcmrvl/uart.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/drivers/nfc/nfcmrvl/uart.c
++++ b/drivers/nfc/nfcmrvl/uart.c
+@@ -73,10 +73,9 @@ static int nfcmrvl_uart_parse_dt(struct
+ struct device_node *matched_node;
+ int ret;
+
+- matched_node = of_find_compatible_node(node, NULL, "marvell,nfc-uart");
++ matched_node = of_get_compatible_child(node, "marvell,nfc-uart");
+ if (!matched_node) {
+- matched_node = of_find_compatible_node(node, NULL,
+- "mrvl,nfc-uart");
++ matched_node = of_get_compatible_child(node, "mrvl,nfc-uart");
+ if (!matched_node)
+ return -ENODEV;
+ }
--- /dev/null
+From fdbd1a2e4a71adcb1ae219fcfd964930d77a7f84 Mon Sep 17 00:00:00 2001
+From: Benjamin Coddington <bcodding@redhat.com>
+Date: Thu, 18 Oct 2018 15:01:48 -0400
+Subject: nfs: Fix a missed page unlock after pg_doio()
+
+From: Benjamin Coddington <bcodding@redhat.com>
+
+commit fdbd1a2e4a71adcb1ae219fcfd964930d77a7f84 upstream.
+
+We must check pg_error and call error_cleanup after any call to pg_doio.
+Currently, we are skipping the unlock of a page if we encounter an error in
+nfs_pageio_complete() before handing off the work to the RPC layer.
+
+Signed-off-by: Benjamin Coddington <bcodding@redhat.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/nfs/pagelist.c | 40 +++++++++++++++++++++-------------------
+ 1 file changed, 21 insertions(+), 19 deletions(-)
+
+--- a/fs/nfs/pagelist.c
++++ b/fs/nfs/pagelist.c
+@@ -1110,6 +1110,20 @@ static int nfs_pageio_add_request_mirror
+ return ret;
+ }
+
++static void nfs_pageio_error_cleanup(struct nfs_pageio_descriptor *desc)
++{
++ u32 midx;
++ struct nfs_pgio_mirror *mirror;
++
++ if (!desc->pg_error)
++ return;
++
++ for (midx = 0; midx < desc->pg_mirror_count; midx++) {
++ mirror = &desc->pg_mirrors[midx];
++ desc->pg_completion_ops->error_cleanup(&mirror->pg_list);
++ }
++}
++
+ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
+ struct nfs_page *req)
+ {
+@@ -1160,25 +1174,11 @@ int nfs_pageio_add_request(struct nfs_pa
+ return 1;
+
+ out_failed:
+- /*
+- * We might have failed before sending any reqs over wire.
+- * Clean up rest of the reqs in mirror pg_list.
+- */
+- if (desc->pg_error) {
+- struct nfs_pgio_mirror *mirror;
+- void (*func)(struct list_head *);
+-
+- /* remember fatal errors */
+- if (nfs_error_is_fatal(desc->pg_error))
+- nfs_context_set_write_error(req->wb_context,
+- desc->pg_error);
+-
+- func = desc->pg_completion_ops->error_cleanup;
+- for (midx = 0; midx < desc->pg_mirror_count; midx++) {
+- mirror = &desc->pg_mirrors[midx];
+- func(&mirror->pg_list);
+- }
+- }
++ /* remember fatal errors */
++ if (nfs_error_is_fatal(desc->pg_error))
++ nfs_context_set_write_error(req->wb_context,
++ desc->pg_error);
++ nfs_pageio_error_cleanup(desc);
+ return 0;
+ }
+
+@@ -1250,6 +1250,8 @@ void nfs_pageio_complete(struct nfs_page
+ for (midx = 0; midx < desc->pg_mirror_count; midx++)
+ nfs_pageio_complete_mirror(desc, midx);
+
++ if (desc->pg_error < 0)
++ nfs_pageio_error_cleanup(desc);
+ if (desc->pg_ops->pg_cleanup)
+ desc->pg_ops->pg_cleanup(desc);
+ nfs_pageio_cleanup_mirroring(desc);
--- /dev/null
+From bb6ad5572c0022e17e846b382d7413cdcf8055be Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <trondmy@gmail.com>
+Date: Tue, 9 Oct 2018 15:54:15 -0400
+Subject: nfsd: Fix an Oops in free_session()
+
+From: Trond Myklebust <trondmy@gmail.com>
+
+commit bb6ad5572c0022e17e846b382d7413cdcf8055be upstream.
+
+In call_xpt_users(), we delete the entry from the list, but we
+do not reinitialise it. This triggers the list poisoning when
+we later call unregister_xpt_user() in nfsd4_del_conns().
+
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: J. Bruce Fields <bfields@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/sunrpc/svc_xprt.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/sunrpc/svc_xprt.c
++++ b/net/sunrpc/svc_xprt.c
+@@ -1040,7 +1040,7 @@ static void call_xpt_users(struct svc_xp
+ spin_lock(&xprt->xpt_lock);
+ while (!list_empty(&xprt->xpt_users)) {
+ u = list_first_entry(&xprt->xpt_users, struct svc_xpt_user, list);
+- list_del(&u->list);
++ list_del_init(&u->list);
+ u->callback(u);
+ }
+ spin_unlock(&xprt->xpt_lock);
--- /dev/null
+From 943cff67b842839f4f35364ba2db5c2d3f025d94 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+Date: Tue, 18 Sep 2018 10:07:44 -0400
+Subject: NFSv4.1: Fix the r/wsize checking
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+commit 943cff67b842839f4f35364ba2db5c2d3f025d94 upstream.
+
+The intention of nfs4_session_set_rwsize() was to cap the r/wsize to the
+buffer sizes negotiated by the CREATE_SESSION. The initial code had a
+bug whereby we would not check the values negotiated by nfs_probe_fsinfo()
+(the assumption being that CREATE_SESSION will always negotiate buffer values
+that are sane w.r.t. the server's preferred r/wsizes) but would only check
+values set by the user in the 'mount' command.
+
+The code was changed in 4.11 to _always_ set the r/wsize, meaning that we
+now never use the server preferred r/wsizes. This is the regression that
+this patch fixes.
+Also rename the function to nfs4_session_limit_rwsize() in order to avoid
+future confusion.
+
+Fixes: 033853325fe3 (NFSv4.1 respect server's max size in CREATE_SESSION")
+Cc: stable@vger.kernel.org # v4.11+
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/nfs/nfs4client.c | 16 +++++++++-------
+ 1 file changed, 9 insertions(+), 7 deletions(-)
+
+--- a/fs/nfs/nfs4client.c
++++ b/fs/nfs/nfs4client.c
+@@ -925,10 +925,10 @@ EXPORT_SYMBOL_GPL(nfs4_set_ds_client);
+
+ /*
+ * Session has been established, and the client marked ready.
+- * Set the mount rsize and wsize with negotiated fore channel
+- * attributes which will be bound checked in nfs_server_set_fsinfo.
++ * Limit the mount rsize, wsize and dtsize using negotiated fore
++ * channel attributes.
+ */
+-static void nfs4_session_set_rwsize(struct nfs_server *server)
++static void nfs4_session_limit_rwsize(struct nfs_server *server)
+ {
+ #ifdef CONFIG_NFS_V4_1
+ struct nfs4_session *sess;
+@@ -941,9 +941,11 @@ static void nfs4_session_set_rwsize(stru
+ server_resp_sz = sess->fc_attrs.max_resp_sz - nfs41_maxread_overhead;
+ server_rqst_sz = sess->fc_attrs.max_rqst_sz - nfs41_maxwrite_overhead;
+
+- if (!server->rsize || server->rsize > server_resp_sz)
++ if (server->dtsize > server_resp_sz)
++ server->dtsize = server_resp_sz;
++ if (server->rsize > server_resp_sz)
+ server->rsize = server_resp_sz;
+- if (!server->wsize || server->wsize > server_rqst_sz)
++ if (server->wsize > server_rqst_sz)
+ server->wsize = server_rqst_sz;
+ #endif /* CONFIG_NFS_V4_1 */
+ }
+@@ -990,12 +992,12 @@ static int nfs4_server_common_setup(stru
+ (unsigned long long) server->fsid.minor);
+ nfs_display_fhandle(mntfh, "Pseudo-fs root FH");
+
+- nfs4_session_set_rwsize(server);
+-
+ error = nfs_probe_fsinfo(server, mntfh, fattr);
+ if (error < 0)
+ goto out;
+
++ nfs4_session_limit_rwsize(server);
++
+ if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN)
+ server->namelen = NFS4_MAXNAMLEN;
+
--- /dev/null
+From 9844fb2e351311210e6660a9a1c62d17424a6145 Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan@kernel.org>
+Date: Mon, 27 Aug 2018 10:21:53 +0200
+Subject: power: supply: twl4030-charger: fix OF sibling-node lookup
+
+From: Johan Hovold <johan@kernel.org>
+
+commit 9844fb2e351311210e6660a9a1c62d17424a6145 upstream.
+
+Use the new of_get_compatible_child() helper to lookup the usb sibling
+node instead of using of_find_compatible_node(), which searches the
+entire tree from a given start node and thus can return an unrelated
+(non-sibling) node.
+
+This also addresses a potential use-after-free (e.g. after probe
+deferral) as the tree-wide helper drops a reference to its first
+argument (i.e. the parent device node).
+
+While at it, also fix the related phy-node reference leak.
+
+Fixes: f5e4edb8c888 ("power: twl4030_charger: find associated phy by more reliable means.")
+Cc: stable <stable@vger.kernel.org> # 4.2
+Cc: NeilBrown <neilb@suse.de>
+Cc: Felipe Balbi <felipe.balbi@linux.intel.com>
+Cc: Sebastian Reichel <sre@kernel.org>
+Reviewed-by: Sebastian Reichel <sre@kernel.org>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Rob Herring <robh@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/power/supply/twl4030_charger.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/power/supply/twl4030_charger.c
++++ b/drivers/power/supply/twl4030_charger.c
+@@ -996,12 +996,13 @@ static int twl4030_bci_probe(struct plat
+ if (bci->dev->of_node) {
+ struct device_node *phynode;
+
+- phynode = of_find_compatible_node(bci->dev->of_node->parent,
+- NULL, "ti,twl4030-usb");
++ phynode = of_get_compatible_child(bci->dev->of_node->parent,
++ "ti,twl4030-usb");
+ if (phynode) {
+ bci->usb_nb.notifier_call = twl4030_bci_usb_ncb;
+ bci->transceiver = devm_usb_get_phy_by_node(
+ bci->dev, phynode, &bci->usb_nb);
++ of_node_put(phynode);
+ if (IS_ERR(bci->transceiver)) {
+ ret = PTR_ERR(bci->transceiver);
+ if (ret == -EPROBE_DEFER)
--- /dev/null
+From 0f99153def98134403c9149128e59d3e1786cf04 Mon Sep 17 00:00:00 2001
+From: Christophe Leroy <christophe.leroy@c-s.fr>
+Date: Fri, 19 Oct 2018 06:12:50 +0000
+Subject: powerpc/msi: Fix compile error on mpc83xx
+
+From: Christophe Leroy <christophe.leroy@c-s.fr>
+
+commit 0f99153def98134403c9149128e59d3e1786cf04 upstream.
+
+mpic_get_primary_version() is not defined when not using MPIC.
+The compile error log like:
+
+arch/powerpc/sysdev/built-in.o: In function `fsl_of_msi_probe':
+fsl_msi.c:(.text+0x150c): undefined reference to `fsl_mpic_primary_get_version'
+
+Signed-off-by: Jia Hongtao <hongtao.jia@freescale.com>
+Signed-off-by: Scott Wood <scottwood@freescale.com>
+Reported-by: Radu Rendec <radu.rendec@gmail.com>
+Fixes: 807d38b73b6 ("powerpc/mpic: Add get_version API both for internal and external use")
+Cc: stable@vger.kernel.org
+Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/mpic.h | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/arch/powerpc/include/asm/mpic.h
++++ b/arch/powerpc/include/asm/mpic.h
+@@ -393,7 +393,14 @@ extern struct bus_type mpic_subsys;
+ #define MPIC_REGSET_TSI108 MPIC_REGSET(1) /* Tsi108/109 PIC */
+
+ /* Get the version of primary MPIC */
++#ifdef CONFIG_MPIC
+ extern u32 fsl_mpic_primary_get_version(void);
++#else
++static inline u32 fsl_mpic_primary_get_version(void)
++{
++ return 0;
++}
++#endif
+
+ /* Allocate the controller structure and setup the linux irq descs
+ * for the range if interrupts passed in. No HW initialization is
smb3-on-kerberos-mount-if-server-doesn-t-specify-auth-type-use-krb5.patch
printk-fix-panic-caused-by-passing-log_buf_len-to-command-line.patch
genirq-fix-race-on-spurious-interrupt-detection.patch
+nfc-nfcmrvl_uart-fix-of-child-node-lookup.patch
+nfsv4.1-fix-the-r-wsize-checking.patch
+nfs-fix-a-missed-page-unlock-after-pg_doio.patch
+nfsd-fix-an-oops-in-free_session.patch
+lockd-fix-access-beyond-unterminated-strings-in-prints.patch
+dm-ioctl-harden-copy_params-s-copy_from_user-from-malicious-users.patch
+dm-zoned-fix-metadata-block-ref-counting.patch
+dm-zoned-fix-various-dmz_get_mblock-issues.patch
+fsnotify-fix-busy-inodes-during-unmount.patch
+powerpc-msi-fix-compile-error-on-mpc83xx.patch
+mips-octeon-fix-out-of-bounds-array-access-on-cn68xx.patch
+power-supply-twl4030-charger-fix-of-sibling-node-lookup.patch
+iommu-arm-smmu-ensure-that-page-table-updates-are-visible-before-tlbi.patch
+tc-set-dma-masks-for-devices.patch
+xprtrdma-reset-credit-grant-properly-after-a-disconnect.patch
+net-bcmgenet-fix-of-child-node-lookup.patch
+media-v4l2-tpg-fix-kernel-oops-when-enabling-hflip-and-osd.patch
--- /dev/null
+From 3f2aa244ee1a0d17ed5b6c86564d2c1b24d1c96b Mon Sep 17 00:00:00 2001
+From: "Maciej W. Rozycki" <macro@linux-mips.org>
+Date: Wed, 3 Oct 2018 13:21:07 +0100
+Subject: TC: Set DMA masks for devices
+
+From: Maciej W. Rozycki <macro@linux-mips.org>
+
+commit 3f2aa244ee1a0d17ed5b6c86564d2c1b24d1c96b upstream.
+
+Fix a TURBOchannel support regression with commit 205e1b7f51e4
+("dma-mapping: warn when there is no coherent_dma_mask") that caused
+coherent DMA allocations to produce a warning such as:
+
+defxx: v1.11 2014/07/01 Lawrence V. Stefani and others
+tc1: DEFTA at MMIO addr = 0x1e900000, IRQ = 20, Hardware addr = 08-00-2b-a3-a3-29
+------------[ cut here ]------------
+WARNING: CPU: 0 PID: 1 at ./include/linux/dma-mapping.h:516 dfx_dev_register+0x670/0x678
+Modules linked in:
+CPU: 0 PID: 1 Comm: swapper Not tainted 4.19.0-rc6 #2
+Stack : ffffffff8009ffc0 fffffffffffffec0 0000000000000000 ffffffff80647650
+ 0000000000000000 0000000000000000 ffffffff806f5f80 ffffffffffffffff
+ 0000000000000000 0000000000000000 0000000000000001 ffffffff8065d4e8
+ 98000000031b6300 ffffffff80563478 ffffffff805685b0 ffffffffffffffff
+ 0000000000000000 ffffffff805d6720 0000000000000204 ffffffff80388df8
+ 0000000000000000 0000000000000009 ffffffff8053efd0 ffffffff806657d0
+ 0000000000000000 ffffffff803177f8 0000000000000000 ffffffff806d0000
+ 9800000003078000 980000000307b9e0 000000001e900000 ffffffff80067940
+ 0000000000000000 ffffffff805d6720 0000000000000204 ffffffff80388df8
+ ffffffff805176c0 ffffffff8004dc78 0000000000000000 ffffffff80067940
+ ...
+Call Trace:
+[<ffffffff8004dc78>] show_stack+0xa0/0x130
+[<ffffffff80067940>] __warn+0x128/0x170
+---[ end trace b1d1e094f67f3bb2 ]---
+
+This is because the TURBOchannel bus driver fails to set the coherent
+DMA mask for devices enumerated.
+
+Set the regular and coherent DMA masks for TURBOchannel devices then,
+observing that the bus protocol supports a 34-bit (16GiB) DMA address
+space, by interpreting the value presented in the address cycle across
+the 32 `ad' lines as a 32-bit word rather than byte address[1]. The
+architectural size of the TURBOchannel DMA address space exceeds the
+maximum amount of RAM any actual TURBOchannel system in existence may
+have, hence both masks are the same.
+
+This removes the warning shown above.
+
+References:
+
+[1] "TURBOchannel Hardware Specification", EK-369AA-OD-007B, Digital
+ Equipment Corporation, January 1993, Section "DMA", pp. 1-15 -- 1-17
+
+Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
+Signed-off-by: Paul Burton <paul.burton@mips.com>
+Patchwork: https://patchwork.linux-mips.org/patch/20835/
+Fixes: 205e1b7f51e4 ("dma-mapping: warn when there is no coherent_dma_mask")
+Cc: stable@vger.kernel.org # 4.16+
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/tc/tc.c | 8 +++++++-
+ include/linux/tc.h | 1 +
+ 2 files changed, 8 insertions(+), 1 deletion(-)
+
+--- a/drivers/tc/tc.c
++++ b/drivers/tc/tc.c
+@@ -2,7 +2,7 @@
+ * TURBOchannel bus services.
+ *
+ * Copyright (c) Harald Koerfgen, 1998
+- * Copyright (c) 2001, 2003, 2005, 2006 Maciej W. Rozycki
++ * Copyright (c) 2001, 2003, 2005, 2006, 2018 Maciej W. Rozycki
+ * Copyright (c) 2005 James Simmons
+ *
+ * This file is subject to the terms and conditions of the GNU
+@@ -10,6 +10,7 @@
+ * directory of this archive for more details.
+ */
+ #include <linux/compiler.h>
++#include <linux/dma-mapping.h>
+ #include <linux/errno.h>
+ #include <linux/init.h>
+ #include <linux/ioport.h>
+@@ -92,6 +93,11 @@ static void __init tc_bus_add_devices(st
+ tdev->dev.bus = &tc_bus_type;
+ tdev->slot = slot;
+
++ /* TURBOchannel has 34-bit DMA addressing (16GiB space). */
++ tdev->dma_mask = DMA_BIT_MASK(34);
++ tdev->dev.dma_mask = &tdev->dma_mask;
++ tdev->dev.coherent_dma_mask = DMA_BIT_MASK(34);
++
+ for (i = 0; i < 8; i++) {
+ tdev->firmware[i] =
+ readb(module + offset + TC_FIRM_VER + 4 * i);
+--- a/include/linux/tc.h
++++ b/include/linux/tc.h
+@@ -84,6 +84,7 @@ struct tc_dev {
+ device. */
+ struct device dev; /* Generic device interface. */
+ struct resource resource; /* Address space of this device. */
++ u64 dma_mask; /* DMA addressable range. */
+ char vendor[9];
+ char name[9];
+ char firmware[9];
--- /dev/null
+From ef739b2175dde9c05594f768cb78149f1ce2ac36 Mon Sep 17 00:00:00 2001
+From: Chuck Lever <chuck.lever@oracle.com>
+Date: Mon, 1 Oct 2018 14:25:14 -0400
+Subject: xprtrdma: Reset credit grant properly after a disconnect
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+commit ef739b2175dde9c05594f768cb78149f1ce2ac36 upstream.
+
+On a fresh connection, an RPC/RDMA client is supposed to send only
+one RPC Call until it gets a credit grant in the first RPC Reply
+from the server [RFC 8166, Section 3.3.3].
+
+There is a bug in the Linux client's credit accounting mechanism
+introduced by commit e7ce710a8802 ("xprtrdma: Avoid deadlock when
+credit window is reset"). On connect, it simply dumps all pending
+RPC Calls onto the new connection.
+
+Servers have been tolerant of this bad behavior. Currently no server
+implementation ever changes its credit grant over reconnects, and
+servers always repost enough Receives before connections are fully
+established.
+
+To correct this issue, ensure that the client resets both the credit
+grant _and_ the congestion window when handling a reconnect.
+
+Fixes: e7ce710a8802 ("xprtrdma: Avoid deadlock when credit ... ")
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Cc: stable@kernel.org
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/sunrpc/xprtrdma/svc_rdma_backchannel.c | 1 +
+ net/sunrpc/xprtrdma/transport.c | 6 ++++++
+ 2 files changed, 7 insertions(+)
+
+--- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
+@@ -263,6 +263,7 @@ static void
+ xprt_rdma_bc_close(struct rpc_xprt *xprt)
+ {
+ dprintk("svcrdma: %s: xprt %p\n", __func__, xprt);
++ xprt->cwnd = RPC_CWNDSHIFT;
+ }
+
+ static void
+--- a/net/sunrpc/xprtrdma/transport.c
++++ b/net/sunrpc/xprtrdma/transport.c
+@@ -484,6 +484,12 @@ xprt_rdma_close(struct rpc_xprt *xprt)
+ xprt->reestablish_timeout = 0;
+ xprt_disconnect_done(xprt);
+ rpcrdma_ep_disconnect(ep, ia);
++
++ /* Prepare @xprt for the next connection by reinitializing
++ * its credit grant to one (see RFC 8166, Section 3.3.3).
++ */
++ r_xprt->rx_buf.rb_credits = 1;
++ xprt->cwnd = RPC_CWNDSHIFT;
+ }
+
+ static void