--- /dev/null
+From 4ddd56b003f251091a67c15ae3fe4a5c5c5e390a Mon Sep 17 00:00:00 2001
+From: Bart Van Assche <bart.vanassche@wdc.com>
+Date: Thu, 17 Aug 2017 13:12:44 -0700
+Subject: block: Relax a check in blk_start_queue()
+
+From: Bart Van Assche <bart.vanassche@wdc.com>
+
+commit 4ddd56b003f251091a67c15ae3fe4a5c5c5e390a upstream.
+
+Calling blk_start_queue() from interrupt context with the queue
+lock held and without disabling IRQs, as the skd driver does, is
+safe. This patch avoids that loading the skd driver triggers the
+following warning:
+
+WARNING: CPU: 11 PID: 1348 at block/blk-core.c:283 blk_start_queue+0x84/0xa0
+RIP: 0010:blk_start_queue+0x84/0xa0
+Call Trace:
+ skd_unquiesce_dev+0x12a/0x1d0 [skd]
+ skd_complete_internal+0x1e7/0x5a0 [skd]
+ skd_complete_other+0xc2/0xd0 [skd]
+ skd_isr_completion_posted.isra.30+0x2a5/0x470 [skd]
+ skd_isr+0x14f/0x180 [skd]
+ irq_forced_thread_fn+0x2a/0x70
+ irq_thread+0x144/0x1a0
+ kthread+0x125/0x140
+ ret_from_fork+0x2a/0x40
+
+Fixes: commit a038e2536472 ("[PATCH] blk_start_queue() must be called with irq disabled - add warning")
+Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
+Cc: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
+Cc: Andrew Morton <akpm@osdl.org>
+Cc: Christoph Hellwig <hch@lst.de>
+Cc: Hannes Reinecke <hare@suse.de>
+Cc: Johannes Thumshirn <jthumshirn@suse.de>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/blk-core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -194,7 +194,7 @@ EXPORT_SYMBOL(blk_delay_queue);
+ **/
+ void blk_start_queue(struct request_queue *q)
+ {
+- WARN_ON(!irqs_disabled());
++ WARN_ON(!in_interrupt() && !irqs_disabled());
+
+ queue_flag_clear(QUEUE_FLAG_STOPPED, q);
+ __blk_run_queue(q);
--- /dev/null
+From smueller@chronox.de Fri Sep 22 11:04:43 2017
+From: Stephan Mueller <smueller@chronox.de>
+Date: Thu, 21 Sep 2017 10:16:53 +0200
+Subject: [PATCH - RESEND] crypto: AF_ALG - remove SGL terminator indicator when chaining
+To: herbert@gondor.apana.org.au, greg@kroah.com
+Cc: linux-crypto@vger.kernel.org
+Message-ID: <5857040.2sfW0oRrdW@tauon.chronox.de>
+
+From: Stephan Mueller <smueller@chronox.de>
+
+Fixed differently upstream as commit 2d97591ef43d ("crypto: af_alg - consolidation of duplicate code")
+
+The SGL is MAX_SGL_ENTS + 1 in size. The last SG entry is used for the
+chaining and is properly updated with the sg_chain invocation. During
+the filling-in of the initial SG entries, sg_mark_end is called for each
+SG entry. This is appropriate as long as no additional SGL is chained
+with the current SGL. However, when a new SGL is chained and the last
+SG entry is updated with sg_chain, the last but one entry still contains
+the end marker from the sg_mark_end. This end marker must be removed as
+otherwise a walk of the chained SGLs will cause a NULL pointer
+dereference at the last but one SG entry, because sg_next will return
+NULL.
+
+The patch only applies to all kernels up to and including 4.13. The
+patch 2d97591ef43d0587be22ad1b0d758d6df4999a0b added to 4.14-rc1
+introduced a complete new code base which addresses this bug in
+a different way. Yet, that patch is too invasive for stable kernels
+and was therefore not marked for stable.
+
+Fixes: 8ff590903d5fc ("crypto: algif_skcipher - User-space interface for skcipher operations")
+Signed-off-by: Stephan Mueller <smueller@chronox.de>
+Acked-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ crypto/algif_skcipher.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/crypto/algif_skcipher.c
++++ b/crypto/algif_skcipher.c
+@@ -92,8 +92,10 @@ static int skcipher_alloc_sgl(struct soc
+ sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
+ sgl->cur = 0;
+
+- if (sg)
++ if (sg) {
+ scatterwalk_sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
++ sg_unmark_end(sg + (MAX_SGL_ENTS - 1));
++ }
+
+ list_add_tail(&sgl->list, &ctx->tsgl);
+ }
--- /dev/null
+From b0a5a9589decd07db755d6a8d9c0910d96ff7992 Mon Sep 17 00:00:00 2001
+From: "zhangyi (F)" <yi.zhang@huawei.com>
+Date: Thu, 24 Aug 2017 15:19:39 -0400
+Subject: ext4: fix incorrect quotaoff if the quota feature is enabled
+
+From: zhangyi (F) <yi.zhang@huawei.com>
+
+commit b0a5a9589decd07db755d6a8d9c0910d96ff7992 upstream.
+
+Current ext4 quota should always "usage enabled" if the
+quota feautre is enabled. But in ext4_orphan_cleanup(), it
+turn quotas off directly (used for the older journaled
+quota), so we cannot turn it on again via "quotaon" unless
+umount and remount ext4.
+
+Simple reproduce:
+
+ mkfs.ext4 -O project,quota /dev/vdb1
+ mount -o prjquota /dev/vdb1 /mnt
+ chattr -p 123 /mnt
+ chattr +P /mnt
+ touch /mnt/aa /mnt/bb
+ exec 100<>/mnt/aa
+ rm -f /mnt/aa
+ sync
+ echo c > /proc/sysrq-trigger
+
+ #reboot and mount
+ mount -o prjquota /dev/vdb1 /mnt
+ #query status
+ quotaon -Ppv /dev/vdb1
+ #output
+ quotaon: Cannot find mountpoint for device /dev/vdb1
+ quotaon: No correct mountpoint specified.
+
+This patch add check for journaled quotas to avoid incorrect
+quotaoff when ext4 has quota feautre.
+
+Signed-off-by: zhangyi (F) <yi.zhang@huawei.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/super.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -2205,7 +2205,7 @@ static void ext4_orphan_cleanup(struct s
+ #ifdef CONFIG_QUOTA
+ /* Needed for iput() to work correctly and not trash data */
+ sb->s_flags |= MS_ACTIVE;
+- /* Turn on quotas so that they are updated correctly */
++ /* Turn on journaled quotas so that they are updated correctly */
+ for (i = 0; i < EXT4_MAXQUOTAS; i++) {
+ if (EXT4_SB(sb)->s_qf_names[i]) {
+ int ret = ext4_quota_on_mount(sb, i);
+@@ -2271,9 +2271,9 @@ static void ext4_orphan_cleanup(struct s
+ ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up",
+ PLURAL(nr_truncates));
+ #ifdef CONFIG_QUOTA
+- /* Turn quotas off */
++ /* Turn off journaled quotas if they were enabled for orphan cleanup */
+ for (i = 0; i < EXT4_MAXQUOTAS; i++) {
+- if (sb_dqopt(sb)->files[i])
++ if (EXT4_SB(sb)->s_qf_names[i] && sb_dqopt(sb)->files[i])
+ dquot_quota_off(sb, i);
+ }
+ #endif
--- /dev/null
+From e8a27f836f165c26f867ece7f31eb5c811692319 Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.com>
+Date: Thu, 31 Aug 2017 10:23:25 +1000
+Subject: md/bitmap: disable bitmap_resize for file-backed bitmaps.
+
+From: NeilBrown <neilb@suse.com>
+
+commit e8a27f836f165c26f867ece7f31eb5c811692319 upstream.
+
+bitmap_resize() does not work for file-backed bitmaps.
+The buffer_heads are allocated and initialized when
+the bitmap is read from the file, but resize doesn't
+read from the file, it loads from the internal bitmap.
+When it comes time to write the new bitmap, the bh is
+non-existent and we crash.
+
+The common case when growing an array involves making the array larger,
+and that normally means making the bitmap larger. Doing
+that inside the kernel is possible, but would need more code.
+It is probably easier to require people who use file-backed
+bitmaps to remove them and re-add after a reshape.
+
+So this patch disables the resizing of arrays which have
+file-backed bitmaps. This is better than crashing.
+
+Reported-by: Zhilong Liu <zlliu@suse.com>
+Fixes: d60b479d177a ("md/bitmap: add bitmap_resize function to allow bitmap resizing.")
+Signed-off-by: NeilBrown <neilb@suse.com>
+Signed-off-by: Shaohua Li <shli@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/bitmap.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/md/bitmap.c
++++ b/drivers/md/bitmap.c
+@@ -1802,6 +1802,11 @@ int bitmap_resize(struct bitmap *bitmap,
+ long pages;
+ struct bitmap_page *new_bp;
+
++ if (bitmap->storage.file && !init) {
++ pr_info("md: cannot resize file-based bitmap\n");
++ return -EINVAL;
++ }
++
+ if (chunksize == 0) {
+ /* If there is enough space, leave the chunk size unchanged,
+ * else increase by factor of two until there is enough space.
--- /dev/null
+From f9effe925039cf54489b5c04e0d40073bb3a123d Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Thu, 24 Aug 2017 20:49:57 +1000
+Subject: powerpc: Fix DAR reporting when alignment handler faults
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit f9effe925039cf54489b5c04e0d40073bb3a123d upstream.
+
+Anton noticed that if we fault part way through emulating an unaligned
+instruction, we don't update the DAR to reflect that.
+
+The DAR value is eventually reported back to userspace as the address
+in the SEGV signal, and if userspace is using that value to demand
+fault then it can be confused by us not setting the value correctly.
+
+This patch is ugly as hell, but is intended to be the minimal fix and
+back ports easily.
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Reviewed-by: Paul Mackerras <paulus@ozlabs.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/align.c | 119 +++++++++++++++++++++++++++-----------------
+ 1 file changed, 74 insertions(+), 45 deletions(-)
+
+--- a/arch/powerpc/kernel/align.c
++++ b/arch/powerpc/kernel/align.c
+@@ -236,6 +236,28 @@ static int emulate_dcbz(struct pt_regs *
+
+ #define SWIZ_PTR(p) ((unsigned char __user *)((p) ^ swiz))
+
++#define __get_user_or_set_dar(_regs, _dest, _addr) \
++ ({ \
++ int rc = 0; \
++ typeof(_addr) __addr = (_addr); \
++ if (__get_user_inatomic(_dest, __addr)) { \
++ _regs->dar = (unsigned long)__addr; \
++ rc = -EFAULT; \
++ } \
++ rc; \
++ })
++
++#define __put_user_or_set_dar(_regs, _src, _addr) \
++ ({ \
++ int rc = 0; \
++ typeof(_addr) __addr = (_addr); \
++ if (__put_user_inatomic(_src, __addr)) { \
++ _regs->dar = (unsigned long)__addr; \
++ rc = -EFAULT; \
++ } \
++ rc; \
++ })
++
+ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
+ unsigned int reg, unsigned int nb,
+ unsigned int flags, unsigned int instr,
+@@ -264,9 +286,10 @@ static int emulate_multiple(struct pt_re
+ } else {
+ unsigned long pc = regs->nip ^ (swiz & 4);
+
+- if (__get_user_inatomic(instr,
+- (unsigned int __user *)pc))
++ if (__get_user_or_set_dar(regs, instr,
++ (unsigned int __user *)pc))
+ return -EFAULT;
++
+ if (swiz == 0 && (flags & SW))
+ instr = cpu_to_le32(instr);
+ nb = (instr >> 11) & 0x1f;
+@@ -310,31 +333,31 @@ static int emulate_multiple(struct pt_re
+ ((nb0 + 3) / 4) * sizeof(unsigned long));
+
+ for (i = 0; i < nb; ++i, ++p)
+- if (__get_user_inatomic(REG_BYTE(rptr, i ^ bswiz),
+- SWIZ_PTR(p)))
++ if (__get_user_or_set_dar(regs, REG_BYTE(rptr, i ^ bswiz),
++ SWIZ_PTR(p)))
+ return -EFAULT;
+ if (nb0 > 0) {
+ rptr = ®s->gpr[0];
+ addr += nb;
+ for (i = 0; i < nb0; ++i, ++p)
+- if (__get_user_inatomic(REG_BYTE(rptr,
+- i ^ bswiz),
+- SWIZ_PTR(p)))
++ if (__get_user_or_set_dar(regs,
++ REG_BYTE(rptr, i ^ bswiz),
++ SWIZ_PTR(p)))
+ return -EFAULT;
+ }
+
+ } else {
+ for (i = 0; i < nb; ++i, ++p)
+- if (__put_user_inatomic(REG_BYTE(rptr, i ^ bswiz),
+- SWIZ_PTR(p)))
++ if (__put_user_or_set_dar(regs, REG_BYTE(rptr, i ^ bswiz),
++ SWIZ_PTR(p)))
+ return -EFAULT;
+ if (nb0 > 0) {
+ rptr = ®s->gpr[0];
+ addr += nb;
+ for (i = 0; i < nb0; ++i, ++p)
+- if (__put_user_inatomic(REG_BYTE(rptr,
+- i ^ bswiz),
+- SWIZ_PTR(p)))
++ if (__put_user_or_set_dar(regs,
++ REG_BYTE(rptr, i ^ bswiz),
++ SWIZ_PTR(p)))
+ return -EFAULT;
+ }
+ }
+@@ -346,29 +369,32 @@ static int emulate_multiple(struct pt_re
+ * Only POWER6 has these instructions, and it does true little-endian,
+ * so we don't need the address swizzling.
+ */
+-static int emulate_fp_pair(unsigned char __user *addr, unsigned int reg,
+- unsigned int flags)
++static int emulate_fp_pair(struct pt_regs *regs, unsigned char __user *addr,
++ unsigned int reg, unsigned int flags)
+ {
+ char *ptr0 = (char *) ¤t->thread.TS_FPR(reg);
+ char *ptr1 = (char *) ¤t->thread.TS_FPR(reg+1);
+- int i, ret, sw = 0;
++ int i, sw = 0;
+
+ if (reg & 1)
+ return 0; /* invalid form: FRS/FRT must be even */
+ if (flags & SW)
+ sw = 7;
+- ret = 0;
++
+ for (i = 0; i < 8; ++i) {
+ if (!(flags & ST)) {
+- ret |= __get_user(ptr0[i^sw], addr + i);
+- ret |= __get_user(ptr1[i^sw], addr + i + 8);
++ if (__get_user_or_set_dar(regs, ptr0[i^sw], addr + i))
++ return -EFAULT;
++ if (__get_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
++ return -EFAULT;
+ } else {
+- ret |= __put_user(ptr0[i^sw], addr + i);
+- ret |= __put_user(ptr1[i^sw], addr + i + 8);
++ if (__put_user_or_set_dar(regs, ptr0[i^sw], addr + i))
++ return -EFAULT;
++ if (__put_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
++ return -EFAULT;
+ }
+ }
+- if (ret)
+- return -EFAULT;
++
+ return 1; /* exception handled and fixed up */
+ }
+
+@@ -378,24 +404,27 @@ static int emulate_lq_stq(struct pt_regs
+ {
+ char *ptr0 = (char *)®s->gpr[reg];
+ char *ptr1 = (char *)®s->gpr[reg+1];
+- int i, ret, sw = 0;
++ int i, sw = 0;
+
+ if (reg & 1)
+ return 0; /* invalid form: GPR must be even */
+ if (flags & SW)
+ sw = 7;
+- ret = 0;
++
+ for (i = 0; i < 8; ++i) {
+ if (!(flags & ST)) {
+- ret |= __get_user(ptr0[i^sw], addr + i);
+- ret |= __get_user(ptr1[i^sw], addr + i + 8);
++ if (__get_user_or_set_dar(regs, ptr0[i^sw], addr + i))
++ return -EFAULT;
++ if (__get_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
++ return -EFAULT;
+ } else {
+- ret |= __put_user(ptr0[i^sw], addr + i);
+- ret |= __put_user(ptr1[i^sw], addr + i + 8);
++ if (__put_user_or_set_dar(regs, ptr0[i^sw], addr + i))
++ return -EFAULT;
++ if (__put_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
++ return -EFAULT;
+ }
+ }
+- if (ret)
+- return -EFAULT;
++
+ return 1; /* exception handled and fixed up */
+ }
+ #endif /* CONFIG_PPC64 */
+@@ -688,9 +717,14 @@ static int emulate_vsx(unsigned char __u
+ for (j = 0; j < length; j += elsize) {
+ for (i = 0; i < elsize; ++i) {
+ if (flags & ST)
+- ret |= __put_user(ptr[i^sw], addr + i);
++ ret = __put_user_or_set_dar(regs, ptr[i^sw],
++ addr + i);
+ else
+- ret |= __get_user(ptr[i^sw], addr + i);
++ ret = __get_user_or_set_dar(regs, ptr[i^sw],
++ addr + i);
++
++ if (ret)
++ return ret;
+ }
+ ptr += elsize;
+ #ifdef __LITTLE_ENDIAN__
+@@ -740,7 +774,7 @@ int fix_alignment(struct pt_regs *regs)
+ unsigned int dsisr;
+ unsigned char __user *addr;
+ unsigned long p, swiz;
+- int ret, i;
++ int i;
+ union data {
+ u64 ll;
+ double dd;
+@@ -923,7 +957,7 @@ int fix_alignment(struct pt_regs *regs)
+ if (flags & F) {
+ /* Special case for 16-byte FP loads and stores */
+ PPC_WARN_ALIGNMENT(fp_pair, regs);
+- return emulate_fp_pair(addr, reg, flags);
++ return emulate_fp_pair(regs, addr, reg, flags);
+ } else {
+ #ifdef CONFIG_PPC64
+ /* Special case for 16-byte loads and stores */
+@@ -953,15 +987,12 @@ int fix_alignment(struct pt_regs *regs)
+ }
+
+ data.ll = 0;
+- ret = 0;
+ p = (unsigned long)addr;
+
+ for (i = 0; i < nb; i++)
+- ret |= __get_user_inatomic(data.v[start + i],
+- SWIZ_PTR(p++));
+-
+- if (unlikely(ret))
+- return -EFAULT;
++ if (__get_user_or_set_dar(regs, data.v[start + i],
++ SWIZ_PTR(p++)))
++ return -EFAULT;
+
+ } else if (flags & F) {
+ data.ll = current->thread.TS_FPR(reg);
+@@ -1031,15 +1062,13 @@ int fix_alignment(struct pt_regs *regs)
+ break;
+ }
+
+- ret = 0;
+ p = (unsigned long)addr;
+
+ for (i = 0; i < nb; i++)
+- ret |= __put_user_inatomic(data.v[start + i],
+- SWIZ_PTR(p++));
++ if (__put_user_or_set_dar(regs, data.v[start + i],
++ SWIZ_PTR(p++)))
++ return -EFAULT;
+
+- if (unlikely(ret))
+- return -EFAULT;
+ } else if (flags & F)
+ current->thread.TS_FPR(reg) = data.ll;
+ else
revert-usb-musb-fix-tx-fifo-flush-handling-again.patch
ip6_gre-fix-endianness-errors-in-ip6gre_err.patch
input-i8042-add-gigabyte-p57-to-the-keyboard-reset-table.patch
+crypto-af_alg-remove-sgl-terminator-indicator-when-chaining.patch
+ext4-fix-incorrect-quotaoff-if-the-quota-feature-is-enabled.patch
+powerpc-fix-dar-reporting-when-alignment-handler-faults.patch
+block-relax-a-check-in-blk_start_queue.patch
+md-bitmap-disable-bitmap_resize-for-file-backed-bitmaps.patch