--- /dev/null
+From c8156fc77d0796ba2618936dbb3084e769e916c1 Mon Sep 17 00:00:00 2001
+From: Ming Lei <ming.lei@redhat.com>
+Date: Wed, 11 Sep 2019 19:31:33 +0800
+Subject: dm raid: fix updating of max_discard_sectors limit
+
+From: Ming Lei <ming.lei@redhat.com>
+
+commit c8156fc77d0796ba2618936dbb3084e769e916c1 upstream.
+
+Unit of 'chunk_size' is byte, instead of sector, so fix it by setting
+the queue_limits' max_discard_sectors to rs->md.chunk_sectors. Also,
+rename chunk_size to chunk_size_bytes.
+
+Without this fix, too big max_discard_sectors is applied on the request
+queue of dm-raid, finally raid code has to split the bio again.
+
+This re-split done by raid causes the following nested clone_endio:
+
+1) one big bio 'A' is submitted to dm queue, and served as the original
+bio
+
+2) one new bio 'B' is cloned from the original bio 'A', and .map()
+is run on this bio of 'B', and B's original bio points to 'A'
+
+3) raid code sees that 'B' is too big, and split 'B' and re-submit
+the remainded part of 'B' to dm-raid queue via generic_make_request().
+
+4) now dm will handle 'B' as new original bio, then allocate a new
+clone bio of 'C' and run .map() on 'C'. Meantime C's original bio
+points to 'B'.
+
+5) suppose now 'C' is completed by raid directly, then the following
+clone_endio() is called recursively:
+
+ clone_endio(C)
+ ->clone_endio(B) #B is original bio of 'C'
+ ->bio_endio(A)
+
+'A' can be big enough to make hundreds of nested clone_endio(), then
+stack can be corrupted easily.
+
+Fixes: 61697a6abd24a ("dm: eliminate 'split_discard_bios' flag from DM target interface")
+Cc: stable@vger.kernel.org
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-raid.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/drivers/md/dm-raid.c
++++ b/drivers/md/dm-raid.c
+@@ -3738,18 +3738,18 @@ static int raid_iterate_devices(struct d
+ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
+ {
+ struct raid_set *rs = ti->private;
+- unsigned int chunk_size = to_bytes(rs->md.chunk_sectors);
++ unsigned int chunk_size_bytes = to_bytes(rs->md.chunk_sectors);
+
+- blk_limits_io_min(limits, chunk_size);
+- blk_limits_io_opt(limits, chunk_size * mddev_data_stripes(rs));
++ blk_limits_io_min(limits, chunk_size_bytes);
++ blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs));
+
+ /*
+ * RAID1 and RAID10 personalities require bio splitting,
+ * RAID0/4/5/6 don't and process large discard bios properly.
+ */
+ if (rs_is_raid1(rs) || rs_is_raid10(rs)) {
+- limits->discard_granularity = chunk_size;
+- limits->max_discard_sectors = chunk_size;
++ limits->discard_granularity = chunk_size_bytes;
++ limits->max_discard_sectors = rs->md.chunk_sectors;
+ }
+ }
+
--- /dev/null
+From 0c8e9c2d668278652af028c3cc068c65f66342f4 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Mon, 26 Aug 2019 02:41:17 -0400
+Subject: dm zoned: fix invalid memory access
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit 0c8e9c2d668278652af028c3cc068c65f66342f4 upstream.
+
+Commit 75d66ffb48efb30f2dd42f041ba8b39c5b2bd115 ("dm zoned: properly
+handle backing device failure") triggers a coverity warning:
+
+---
+ drivers/md/dm-zoned-target.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/drivers/md/dm-zoned-target.c
++++ b/drivers/md/dm-zoned-target.c
+@@ -134,8 +134,6 @@ static int dmz_submit_bio(struct dmz_tar
+
+ refcount_inc(&bioctx->ref);
+ generic_make_request(clone);
+- if (clone->bi_status == BLK_STS_IOERR)
+- return -EIO;
+
+ if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
+ zone->wp_block += nr_blocks;
net-socionext-netsec-always-grab-descriptor-lock.patch
net-sched-cbs-avoid-division-by-zero-when-calculating-the-port-rate.patch
net-sched-taprio-avoid-division-by-zero-on-invalid-link-speed.patch
+smack-don-t-ignore-other-bprm-unsafe-flags-if-lsm_unsafe_ptrace-is-set.patch
+smack-use-gfp_nofs-while-holding-inode_smack-smk_lock.patch
+dm-raid-fix-updating-of-max_discard_sectors-limit.patch
+dm-zoned-fix-invalid-memory-access.patch
--- /dev/null
+From 3675f052b43ba51b99b85b073c7070e083f3e6fb Mon Sep 17 00:00:00 2001
+From: Jann Horn <jannh@google.com>
+Date: Thu, 4 Jul 2019 20:44:44 +0200
+Subject: Smack: Don't ignore other bprm->unsafe flags if LSM_UNSAFE_PTRACE is set
+
+From: Jann Horn <jannh@google.com>
+
+commit 3675f052b43ba51b99b85b073c7070e083f3e6fb upstream.
+
+There is a logic bug in the current smack_bprm_set_creds():
+If LSM_UNSAFE_PTRACE is set, but the ptrace state is deemed to be
+acceptable (e.g. because the ptracer detached in the meantime), the other
+->unsafe flags aren't checked. As far as I can tell, this means that
+something like the following could work (but I haven't tested it):
+
+ - task A: create task B with fork()
+ - task B: set NO_NEW_PRIVS
+ - task B: install a seccomp filter that makes open() return 0 under some
+ conditions
+ - task B: replace fd 0 with a malicious library
+ - task A: attach to task B with PTRACE_ATTACH
+ - task B: execve() a file with an SMACK64EXEC extended attribute
+ - task A: while task B is still in the middle of execve(), exit (which
+ destroys the ptrace relationship)
+
+Make sure that if any flags other than LSM_UNSAFE_PTRACE are set in
+bprm->unsafe, we reject the execve().
+
+Cc: stable@vger.kernel.org
+Fixes: 5663884caab1 ("Smack: unify all ptrace accesses in the smack")
+Signed-off-by: Jann Horn <jannh@google.com>
+Signed-off-by: Casey Schaufler <casey@schaufler-ca.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ security/smack/smack_lsm.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -937,7 +937,8 @@ static int smack_bprm_set_creds(struct l
+
+ if (rc != 0)
+ return rc;
+- } else if (bprm->unsafe)
++ }
++ if (bprm->unsafe & ~LSM_UNSAFE_PTRACE)
+ return -EPERM;
+
+ bsp->smk_task = isp->smk_task;
--- /dev/null
+From e5bfad3d7acc5702f32aafeb388362994f4d7bd0 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Wed, 21 Aug 2019 22:54:41 -0700
+Subject: smack: use GFP_NOFS while holding inode_smack::smk_lock
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit e5bfad3d7acc5702f32aafeb388362994f4d7bd0 upstream.
+
+inode_smack::smk_lock is taken during smack_d_instantiate(), which is
+called during a filesystem transaction when creating a file on ext4.
+Therefore to avoid a deadlock, all code that takes this lock must use
+GFP_NOFS, to prevent memory reclaim from waiting for the filesystem
+transaction to complete.
+
+Reported-by: syzbot+0eefc1e06a77d327a056@syzkaller.appspotmail.com
+Cc: stable@vger.kernel.org
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Casey Schaufler <casey@schaufler-ca.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ security/smack/smack_access.c | 6 +++---
+ security/smack/smack_lsm.c | 2 +-
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+--- a/security/smack/smack_access.c
++++ b/security/smack/smack_access.c
+@@ -465,7 +465,7 @@ char *smk_parse_smack(const char *string
+ if (i == 0 || i >= SMK_LONGLABEL)
+ return ERR_PTR(-EINVAL);
+
+- smack = kzalloc(i + 1, GFP_KERNEL);
++ smack = kzalloc(i + 1, GFP_NOFS);
+ if (smack == NULL)
+ return ERR_PTR(-ENOMEM);
+
+@@ -500,7 +500,7 @@ int smk_netlbl_mls(int level, char *cats
+ if ((m & *cp) == 0)
+ continue;
+ rc = netlbl_catmap_setbit(&sap->attr.mls.cat,
+- cat, GFP_KERNEL);
++ cat, GFP_NOFS);
+ if (rc < 0) {
+ netlbl_catmap_free(sap->attr.mls.cat);
+ return rc;
+@@ -536,7 +536,7 @@ struct smack_known *smk_import_entry(con
+ if (skp != NULL)
+ goto freeout;
+
+- skp = kzalloc(sizeof(*skp), GFP_KERNEL);
++ skp = kzalloc(sizeof(*skp), GFP_NOFS);
+ if (skp == NULL) {
+ skp = ERR_PTR(-ENOMEM);
+ goto freeout;
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -288,7 +288,7 @@ static struct smack_known *smk_fetch(con
+ if (!(ip->i_opflags & IOP_XATTR))
+ return ERR_PTR(-EOPNOTSUPP);
+
+- buffer = kzalloc(SMK_LONGLABEL, GFP_KERNEL);
++ buffer = kzalloc(SMK_LONGLABEL, GFP_NOFS);
+ if (buffer == NULL)
+ return ERR_PTR(-ENOMEM);
+