--- /dev/null
+From 182b1917109892ab9f26d66bfdcbc4ba6f0a0a65 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= <toke@toke.dk>
+Date: Tue, 27 Feb 2018 19:09:44 +0200
+Subject: ath9k: Protect queue draining by rcu_read_lock()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Toke Høiland-Jørgensen <toke@toke.dk>
+
+commit 182b1917109892ab9f26d66bfdcbc4ba6f0a0a65 upstream.
+
+When ath9k was switched over to use the mac80211 intermediate queues,
+node cleanup now drains the mac80211 queues. However, this call path is
+not protected by rcu_read_lock() as it was previously entirely internal
+to the driver which uses its own locking.
+
+This leads to a possible rcu_dereference() without holding
+rcu_read_lock(); but only if a station is cleaned up while having
+packets queued on the TXQ. Fix this by adding the rcu_read_lock() to the
+caller in ath9k.
+
+Fixes: 50f08edf9809 ("ath9k: Switch to using mac80211 intermediate software queues.")
+Cc: stable@vger.kernel.org
+Reported-by: Ben Greear <greearb@candelatech.com>
+Signed-off-by: Toke Høiland-Jørgensen <toke@toke.dk>
+Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/ath/ath9k/xmit.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/net/wireless/ath/ath9k/xmit.c
++++ b/drivers/net/wireless/ath/ath9k/xmit.c
+@@ -2892,6 +2892,8 @@ void ath_tx_node_cleanup(struct ath_soft
+ struct ath_txq *txq;
+ int tidno;
+
++ rcu_read_lock();
++
+ for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) {
+ tid = ath_node_to_tid(an, tidno);
+ txq = tid->txq;
+@@ -2909,6 +2911,8 @@ void ath_tx_node_cleanup(struct ath_soft
+ if (!an->sta)
+ break; /* just one multicast ath_atx_tid */
+ }
++
++ rcu_read_unlock();
+ }
+
+ #ifdef CONFIG_ATH9K_TX99
--- /dev/null
+From bffa9909a6b48d8ca3398dec601bc9162a4020c4 Mon Sep 17 00:00:00 2001
+From: Ming Lei <ming.lei@redhat.com>
+Date: Sun, 8 Apr 2018 17:48:08 +0800
+Subject: blk-mq: don't keep offline CPUs mapped to hctx 0
+
+From: Ming Lei <ming.lei@redhat.com>
+
+commit bffa9909a6b48d8ca3398dec601bc9162a4020c4 upstream.
+
+From commit 4b855ad37194 ("blk-mq: Create hctx for each present CPU),
+blk-mq doesn't remap queue after CPU topo is changed, that said when
+some of these offline CPUs become online, they are still mapped to
+hctx 0, then hctx 0 may become the bottleneck of IO dispatch and
+completion.
+
+This patch sets up the mapping from the beginning, and aligns to
+queue mapping for PCI device (blk_mq_pci_map_queues()).
+
+Cc: Stefan Haberland <sth@linux.vnet.ibm.com>
+Cc: Keith Busch <keith.busch@intel.com>
+Cc: stable@vger.kernel.org
+Fixes: 4b855ad37194 ("blk-mq: Create hctx for each present CPU)
+Tested-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/blk-mq-cpumap.c | 5 -----
+ 1 file changed, 5 deletions(-)
+
+--- a/block/blk-mq-cpumap.c
++++ b/block/blk-mq-cpumap.c
+@@ -16,11 +16,6 @@
+
+ static int cpu_to_queue_index(unsigned int nr_queues, const int cpu)
+ {
+- /*
+- * Non present CPU will be mapped to queue index 0.
+- */
+- if (!cpu_present(cpu))
+- return 0;
+ return cpu % nr_queues;
+ }
+
--- /dev/null
+From b94929d975c8423defc9aededb0f499ff936b509 Mon Sep 17 00:00:00 2001
+From: Yunlong Song <yunlong.song@huawei.com>
+Date: Mon, 29 Jan 2018 11:37:45 +0800
+Subject: f2fs: fix heap mode to reset it back
+
+From: Yunlong Song <yunlong.song@huawei.com>
+
+commit b94929d975c8423defc9aededb0f499ff936b509 upstream.
+
+Commit 7a20b8a61eff81bdb7097a578752a74860e9d142 ("f2fs: allocate node
+and hot data in the beginning of partition") introduces another mount
+option, heap, to reset it back. But it does not do anything for heap
+mode, so fix it.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Yunlong Song <yunlong.song@huawei.com>
+Reviewed-by: Chao Yu <yuchao0@huawei.com>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/f2fs/gc.c | 5 +++--
+ fs/f2fs/segment.c | 3 ++-
+ 2 files changed, 5 insertions(+), 3 deletions(-)
+
+--- a/fs/f2fs/gc.c
++++ b/fs/f2fs/gc.c
+@@ -191,8 +191,9 @@ static void select_policy(struct f2fs_sb
+ if (gc_type != FG_GC && p->max_search > sbi->max_victim_search)
+ p->max_search = sbi->max_victim_search;
+
+- /* let's select beginning hot/small space first */
+- if (type == CURSEG_HOT_DATA || IS_NODESEG(type))
++ /* let's select beginning hot/small space first in no_heap mode*/
++ if (test_opt(sbi, NOHEAP) &&
++ (type == CURSEG_HOT_DATA || IS_NODESEG(type)))
+ p->offset = 0;
+ else
+ p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -1992,7 +1992,8 @@ static unsigned int __get_next_segno(str
+ if (sbi->segs_per_sec != 1)
+ return CURSEG_I(sbi, type)->segno;
+
+- if (type == CURSEG_HOT_DATA || IS_NODESEG(type))
++ if (test_opt(sbi, NOHEAP) &&
++ (type == CURSEG_HOT_DATA || IS_NODESEG(type)))
+ return 0;
+
+ if (SIT_I(sbi)->last_victim[ALLOC_NEXT])
--- /dev/null
+From 5df63c2a149ae65a9ec239e7c2af44efa6f79beb Mon Sep 17 00:00:00 2001
+From: Mike Kravetz <mike.kravetz@oracle.com>
+Date: Thu, 5 Apr 2018 16:18:21 -0700
+Subject: hugetlbfs: fix bug in pgoff overflow checking
+
+From: Mike Kravetz <mike.kravetz@oracle.com>
+
+commit 5df63c2a149ae65a9ec239e7c2af44efa6f79beb upstream.
+
+This is a fix for a regression in 32 bit kernels caused by an invalid
+check for pgoff overflow in hugetlbfs mmap setup. The check incorrectly
+specified that the size of a loff_t was the same as the size of a long.
+The regression prevents mapping hugetlbfs files at offsets greater than
+4GB on 32 bit kernels.
+
+On 32 bit kernels conversion from a page based unsigned long can not
+overflow a loff_t byte offset. Therefore, skip this check if
+sizeof(unsigned long) != sizeof(loff_t).
+
+Link: http://lkml.kernel.org/r/20180330145402.5053-1-mike.kravetz@oracle.com
+Fixes: 63489f8e8211 ("hugetlbfs: check for pgoff value overflow")
+Reported-by: Dan Rue <dan.rue@linaro.org>
+Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
+Tested-by: Anders Roxell <anders.roxell@linaro.org>
+Cc: Michal Hocko <mhocko@kernel.org>
+Cc: Yisheng Xie <xieyisheng1@huawei.com>
+Cc: "Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>
+Cc: Nic Losby <blurbdust@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/hugetlbfs/inode.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -148,10 +148,14 @@ static int hugetlbfs_file_mmap(struct fi
+
+ /*
+ * page based offset in vm_pgoff could be sufficiently large to
+- * overflow a (l)off_t when converted to byte offset.
++ * overflow a loff_t when converted to byte offset. This can
++ * only happen on architectures where sizeof(loff_t) ==
++ * sizeof(unsigned long). So, only check in those instances.
+ */
+- if (vma->vm_pgoff & PGOFF_LOFFT_MAX)
+- return -EINVAL;
++ if (sizeof(unsigned long) == sizeof(loff_t)) {
++ if (vma->vm_pgoff & PGOFF_LOFFT_MAX)
++ return -EINVAL;
++ }
+
+ /* must be huge page aligned */
+ if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
--- /dev/null
+From 8351760ff5b2042039554b4948ddabaac644a976 Mon Sep 17 00:00:00 2001
+From: Yury Norov <ynorov@caviumnetworks.com>
+Date: Thu, 5 Apr 2018 16:18:25 -0700
+Subject: lib: fix stall in __bitmap_parselist()
+
+From: Yury Norov <ynorov@caviumnetworks.com>
+
+commit 8351760ff5b2042039554b4948ddabaac644a976 upstream.
+
+syzbot is catching stalls at __bitmap_parselist()
+(https://syzkaller.appspot.com/bug?id=ad7e0351fbc90535558514a71cd3edc11681997a).
+The trigger is
+
+ unsigned long v = 0;
+ bitmap_parselist("7:,", &v, BITS_PER_LONG);
+
+which results in hitting infinite loop at
+
+ while (a <= b) {
+ off = min(b - a + 1, used_size);
+ bitmap_set(maskp, a, off);
+ a += group_size;
+ }
+
+due to used_size == group_size == 0.
+
+Link: http://lkml.kernel.org/r/20180404162647.15763-1-ynorov@caviumnetworks.com
+Fixes: 0a5ce0831d04382a ("lib/bitmap.c: make bitmap_parselist() thread-safe and much faster")
+Signed-off-by: Yury Norov <ynorov@caviumnetworks.com>
+Reported-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Reported-by: syzbot <syzbot+6887cbb011c8054e8a3d@syzkaller.appspotmail.com>
+Cc: Noam Camus <noamca@mellanox.com>
+Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk>
+Cc: Matthew Wilcox <mawilcox@microsoft.com>
+Cc: Mauro Carvalho Chehab <mchehab@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ lib/bitmap.c | 2 +-
+ lib/test_bitmap.c | 4 ++++
+ 2 files changed, 5 insertions(+), 1 deletion(-)
+
+--- a/lib/bitmap.c
++++ b/lib/bitmap.c
+@@ -605,7 +605,7 @@ static int __bitmap_parselist(const char
+ /* if no digit is after '-', it's wrong*/
+ if (at_start && in_range)
+ return -EINVAL;
+- if (!(a <= b) || !(used_size <= group_size))
++ if (!(a <= b) || group_size == 0 || !(used_size <= group_size))
+ return -EINVAL;
+ if (b >= nmaskbits)
+ return -ERANGE;
+--- a/lib/test_bitmap.c
++++ b/lib/test_bitmap.c
+@@ -218,6 +218,10 @@ static const struct test_bitmap_parselis
+ {-EINVAL, "-1", NULL, 8, 0},
+ {-EINVAL, "-0", NULL, 8, 0},
+ {-EINVAL, "10-1", NULL, 8, 0},
++ {-EINVAL, "0-31:", NULL, 8, 0},
++ {-EINVAL, "0-31:0", NULL, 8, 0},
++ {-EINVAL, "0-31:0/0", NULL, 8, 0},
++ {-EINVAL, "0-31:1/0", NULL, 8, 0},
+ {-EINVAL, "0-31:10/1", NULL, 8, 0},
+ };
+
--- /dev/null
+From 880a3a5325489a143269a8e172e7563ebf9897bc Mon Sep 17 00:00:00 2001
+From: "J. Bruce Fields" <bfields@redhat.com>
+Date: Wed, 21 Mar 2018 17:19:02 -0400
+Subject: nfsd: fix incorrect umasks
+
+From: J. Bruce Fields <bfields@redhat.com>
+
+commit 880a3a5325489a143269a8e172e7563ebf9897bc upstream.
+
+We're neglecting to clear the umask after it's set, which can cause a
+later unrelated rpc to (incorrectly) use the same umask if it happens to
+be processed by the same thread.
+
+There's a more subtle problem here too:
+
+An NFSv4 compound request is decoded all in one pass before any
+operations are executed.
+
+Currently we're setting current->fs->umask at the time we decode the
+compound. In theory a single compound could contain multiple creates
+each setting a umask. In that case we'd end up using whichever umask
+was passed in the *last* operation as the umask for all the creates,
+whether that was correct or not.
+
+So, we should just be saving the umask at decode time and waiting to set
+it until we actually process the corresponding operation.
+
+In practice it's unlikely any client would do multiple creates in a
+single compound. And even if it did they'd likely be from the same
+process (hence carry the same umask). So this is a little academic, but
+we should get it right anyway.
+
+Fixes: 47057abde515 (nfsd: add support for the umask attribute)
+Cc: stable@vger.kernel.org
+Reported-by: Lucash Stach <l.stach@pengutronix.de>
+Signed-off-by: J. Bruce Fields <bfields@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/nfsd/nfs4proc.c | 12 ++++++++++--
+ fs/nfsd/nfs4xdr.c | 8 +++-----
+ fs/nfsd/xdr4.h | 2 ++
+ 3 files changed, 15 insertions(+), 7 deletions(-)
+
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -32,6 +32,7 @@
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
++#include <linux/fs_struct.h>
+ #include <linux/file.h>
+ #include <linux/falloc.h>
+ #include <linux/slab.h>
+@@ -252,11 +253,13 @@ do_open_lookup(struct svc_rqst *rqstp, s
+ * Note: create modes (UNCHECKED,GUARDED...) are the same
+ * in NFSv4 as in v3 except EXCLUSIVE4_1.
+ */
++ current->fs->umask = open->op_umask;
+ status = do_nfsd_create(rqstp, current_fh, open->op_fname.data,
+ open->op_fname.len, &open->op_iattr,
+ *resfh, open->op_createmode,
+ (u32 *)open->op_verf.data,
+ &open->op_truncate, &open->op_created);
++ current->fs->umask = 0;
+
+ if (!status && open->op_label.len)
+ nfsd4_security_inode_setsecctx(*resfh, &open->op_label, open->op_bmval);
+@@ -608,6 +611,7 @@ nfsd4_create(struct svc_rqst *rqstp, str
+ if (status)
+ return status;
+
++ current->fs->umask = create->cr_umask;
+ switch (create->cr_type) {
+ case NF4LNK:
+ status = nfsd_symlink(rqstp, &cstate->current_fh,
+@@ -616,20 +620,22 @@ nfsd4_create(struct svc_rqst *rqstp, str
+ break;
+
+ case NF4BLK:
++ status = nfserr_inval;
+ rdev = MKDEV(create->cr_specdata1, create->cr_specdata2);
+ if (MAJOR(rdev) != create->cr_specdata1 ||
+ MINOR(rdev) != create->cr_specdata2)
+- return nfserr_inval;
++ goto out_umask;
+ status = nfsd_create(rqstp, &cstate->current_fh,
+ create->cr_name, create->cr_namelen,
+ &create->cr_iattr, S_IFBLK, rdev, &resfh);
+ break;
+
+ case NF4CHR:
++ status = nfserr_inval;
+ rdev = MKDEV(create->cr_specdata1, create->cr_specdata2);
+ if (MAJOR(rdev) != create->cr_specdata1 ||
+ MINOR(rdev) != create->cr_specdata2)
+- return nfserr_inval;
++ goto out_umask;
+ status = nfsd_create(rqstp, &cstate->current_fh,
+ create->cr_name, create->cr_namelen,
+ &create->cr_iattr,S_IFCHR, rdev, &resfh);
+@@ -673,6 +679,8 @@ nfsd4_create(struct svc_rqst *rqstp, str
+ fh_dup2(&cstate->current_fh, &resfh);
+ out:
+ fh_put(&resfh);
++out_umask:
++ current->fs->umask = 0;
+ return status;
+ }
+
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -33,7 +33,6 @@
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+-#include <linux/fs_struct.h>
+ #include <linux/file.h>
+ #include <linux/slab.h>
+ #include <linux/namei.h>
+@@ -683,7 +682,7 @@ nfsd4_decode_create(struct nfsd4_compoun
+
+ status = nfsd4_decode_fattr(argp, create->cr_bmval, &create->cr_iattr,
+ &create->cr_acl, &create->cr_label,
+- ¤t->fs->umask);
++ &create->cr_umask);
+ if (status)
+ goto out;
+
+@@ -928,7 +927,6 @@ nfsd4_decode_open(struct nfsd4_compounda
+ case NFS4_OPEN_NOCREATE:
+ break;
+ case NFS4_OPEN_CREATE:
+- current->fs->umask = 0;
+ READ_BUF(4);
+ open->op_createmode = be32_to_cpup(p++);
+ switch (open->op_createmode) {
+@@ -936,7 +934,7 @@ nfsd4_decode_open(struct nfsd4_compounda
+ case NFS4_CREATE_GUARDED:
+ status = nfsd4_decode_fattr(argp, open->op_bmval,
+ &open->op_iattr, &open->op_acl, &open->op_label,
+- ¤t->fs->umask);
++ &open->op_umask);
+ if (status)
+ goto out;
+ break;
+@@ -951,7 +949,7 @@ nfsd4_decode_open(struct nfsd4_compounda
+ COPYMEM(open->op_verf.data, NFS4_VERIFIER_SIZE);
+ status = nfsd4_decode_fattr(argp, open->op_bmval,
+ &open->op_iattr, &open->op_acl, &open->op_label,
+- ¤t->fs->umask);
++ &open->op_umask);
+ if (status)
+ goto out;
+ break;
+--- a/fs/nfsd/xdr4.h
++++ b/fs/nfsd/xdr4.h
+@@ -118,6 +118,7 @@ struct nfsd4_create {
+ } u;
+ u32 cr_bmval[3]; /* request */
+ struct iattr cr_iattr; /* request */
++ int cr_umask; /* request */
+ struct nfsd4_change_info cr_cinfo; /* response */
+ struct nfs4_acl *cr_acl;
+ struct xdr_netobj cr_label;
+@@ -228,6 +229,7 @@ struct nfsd4_open {
+ u32 op_why_no_deleg; /* response - DELEG_NONE_EXT only */
+ u32 op_create; /* request */
+ u32 op_createmode; /* request */
++ int op_umask; /* request */
+ u32 op_bmval[3]; /* request */
+ struct iattr op_iattr; /* UNCHECKED4, GUARDED4, EXCLUSIVE4_1 */
+ nfs4_verifier op_verf __attribute__((aligned(32)));
--- /dev/null
+From 3ec9b3fafcaf441cc4d46b9742cd6ec0c79f8df0 Mon Sep 17 00:00:00 2001
+From: Amir Goldstein <amir73il@gmail.com>
+Date: Mon, 12 Mar 2018 10:30:41 -0400
+Subject: ovl: fix lookup with middle layer opaque dir and absolute path redirects
+
+From: Amir Goldstein <amir73il@gmail.com>
+
+commit 3ec9b3fafcaf441cc4d46b9742cd6ec0c79f8df0 upstream.
+
+As of now if we encounter an opaque dir while looking for a dentry, we set
+d->last=true. This means that there is no need to look further in any of
+the lower layers. This works fine as long as there are no redirets or
+relative redircts. But what if there is an absolute redirect on the
+children dentry of opaque directory. We still need to continue to look into
+next lower layer. This patch fixes it.
+
+Here is an example to demonstrate the issue. Say you have following setup.
+
+upper: /redirect (redirect=/a/b/c)
+lower1: /a/[b]/c ([b] is opaque) (c has absolute redirect=/a/b/d/)
+lower0: /a/b/d/foo
+
+Now "redirect" dir should merge with lower1:/a/b/c/ and lower0:/a/b/d.
+Note, despite the fact lower1:/a/[b] is opaque, we need to continue to look
+into lower0 because children c has an absolute redirect.
+
+Following is a reproducer.
+
+Watch me make foo disappear:
+
+ $ mkdir lower middle upper work work2 merged
+ $ mkdir lower/origin
+ $ touch lower/origin/foo
+ $ mount -t overlay none merged/ \
+ -olowerdir=lower,upperdir=middle,workdir=work2
+ $ mkdir merged/pure
+ $ mv merged/origin merged/pure/redirect
+ $ umount merged
+ $ mount -t overlay none merged/ \
+ -olowerdir=middle:lower,upperdir=upper,workdir=work
+ $ mv merged/pure/redirect merged/redirect
+
+Now you see foo inside a twice redirected merged dir:
+
+ $ ls merged/redirect
+ foo
+ $ umount merged
+ $ mount -t overlay none merged/ \
+ -olowerdir=middle:lower,upperdir=upper,workdir=work
+
+After mount cycle you don't see foo inside the same dir:
+
+ $ ls merged/redirect
+
+During middle layer lookup, the opaqueness of middle/pure is left in
+the lookup state and then middle/pure/redirect is wrongly treated as
+opaque.
+
+Fixes: 02b69b284cd7 ("ovl: lookup redirects")
+Cc: <stable@vger.kernel.org> #v4.10
+Signed-off-by: Amir Goldstein <amir73il@gmail.com>
+Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/overlayfs/namei.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/fs/overlayfs/namei.c
++++ b/fs/overlayfs/namei.c
+@@ -56,6 +56,15 @@ static int ovl_check_redirect(struct den
+ if (s == next)
+ goto invalid;
+ }
++ /*
++ * One of the ancestor path elements in an absolute path
++ * lookup in ovl_lookup_layer() could have been opaque and
++ * that will stop further lookup in lower layers (d->stop=true)
++ * But we have found an absolute redirect in decendant path
++ * element and that should force continue lookup in lower
++ * layers (reset d->stop).
++ */
++ d->stop = false;
+ } else {
+ if (strchr(buf, '/') != NULL)
+ goto invalid;
x86-mce-amd-edac-mce_amd-enumerate-reserved-smca-bank-type.patch
x86-mce-amd-get-address-from-already-initialized-block.patch
hwmon-ina2xx-fix-access-to-uninitialized-mutex.patch
+ath9k-protect-queue-draining-by-rcu_read_lock.patch
+sunrpc-remove-incorrect-hmac-request-initialization.patch
+f2fs-fix-heap-mode-to-reset-it-back.patch
+lib-fix-stall-in-__bitmap_parselist.patch
+blk-mq-don-t-keep-offline-cpus-mapped-to-hctx-0.patch
+ovl-fix-lookup-with-middle-layer-opaque-dir-and-absolute-path-redirects.patch
+xen-xenbus_dev_frontend-fix-xs_transaction_end-handling.patch
+hugetlbfs-fix-bug-in-pgoff-overflow-checking.patch
+nfsd-fix-incorrect-umasks.patch
--- /dev/null
+From f3aefb6a7066e24bfea7fcf1b07907576de69d63 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Wed, 28 Mar 2018 10:57:22 -0700
+Subject: sunrpc: remove incorrect HMAC request initialization
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit f3aefb6a7066e24bfea7fcf1b07907576de69d63 upstream.
+
+make_checksum_hmac_md5() is allocating an HMAC transform and doing
+crypto API calls in the following order:
+
+ crypto_ahash_init()
+ crypto_ahash_setkey()
+ crypto_ahash_digest()
+
+This is wrong because it makes no sense to init() the request before a
+key has been set, given that the initial state depends on the key. And
+digest() is short for init() + update() + final(), so in this case
+there's no need to explicitly call init() at all.
+
+Before commit 9fa68f620041 ("crypto: hash - prevent using keyed hashes
+without setting key") the extra init() had no real effect, at least for
+the software HMAC implementation. (There are also hardware drivers that
+implement HMAC-MD5, and it's not immediately obvious how gracefully they
+handle init() before setkey().) But now the crypto API detects this
+incorrect initialization and returns -ENOKEY. This is breaking NFS
+mounts in some cases.
+
+Fix it by removing the incorrect call to crypto_ahash_init().
+
+Reported-by: Michael Young <m.a.young@durham.ac.uk>
+Fixes: 9fa68f620041 ("crypto: hash - prevent using keyed hashes without setting key")
+Fixes: fffdaef2eb4a ("gss_krb5: Add support for rc4-hmac encryption")
+Cc: stable@vger.kernel.org
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: J. Bruce Fields <bfields@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/sunrpc/auth_gss/gss_krb5_crypto.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
++++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
+@@ -237,9 +237,6 @@ make_checksum_hmac_md5(struct krb5_ctx *
+
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+
+- err = crypto_ahash_init(req);
+- if (err)
+- goto out;
+ err = crypto_ahash_setkey(hmac_md5, cksumkey, kctx->gk5e->keylength);
+ if (err)
+ goto out;
--- /dev/null
+From 2a22ee6c3ab1d761bc9c04f1e4117edd55b82f09 Mon Sep 17 00:00:00 2001
+From: Simon Gaiser <simon@invisiblethingslab.com>
+Date: Thu, 15 Mar 2018 03:43:20 +0100
+Subject: xen: xenbus_dev_frontend: Fix XS_TRANSACTION_END handling
+
+From: Simon Gaiser <simon@invisiblethingslab.com>
+
+commit 2a22ee6c3ab1d761bc9c04f1e4117edd55b82f09 upstream.
+
+Commit fd8aa9095a95 ("xen: optimize xenbus driver for multiple
+concurrent xenstore accesses") made a subtle change to the semantic of
+xenbus_dev_request_and_reply() and xenbus_transaction_end().
+
+Before on an error response to XS_TRANSACTION_END
+xenbus_dev_request_and_reply() would not decrement the active
+transaction counter. But xenbus_transaction_end() has always counted the
+transaction as finished regardless of the response.
+
+The new behavior is that xenbus_dev_request_and_reply() and
+xenbus_transaction_end() will always count the transaction as finished
+regardless the response code (handled in xs_request_exit()).
+
+But xenbus_dev_frontend tries to end a transaction on closing of the
+device if the XS_TRANSACTION_END failed before. Trying to close the
+transaction twice corrupts the reference count. So fix this by also
+considering a transaction closed if we have sent XS_TRANSACTION_END once
+regardless of the return code.
+
+Cc: <stable@vger.kernel.org> # 4.11
+Fixes: fd8aa9095a95 ("xen: optimize xenbus driver for multiple concurrent xenstore accesses")
+Signed-off-by: Simon Gaiser <simon@invisiblethingslab.com>
+Reviewed-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/xen/xenbus/xenbus_dev_frontend.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
++++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
+@@ -365,7 +365,7 @@ void xenbus_dev_queue_reply(struct xb_re
+ if (WARN_ON(rc))
+ goto out;
+ }
+- } else if (req->msg.type == XS_TRANSACTION_END) {
++ } else if (req->type == XS_TRANSACTION_END) {
+ trans = xenbus_get_transaction(u, req->msg.tx_id);
+ if (WARN_ON(!trans))
+ goto out;