--- /dev/null
+From 02f51d45937f7bc7f4dee21e9f85b2d5eac37104 Mon Sep 17 00:00:00 2001
+From: Tomas Bortoli <tomasbortoli@gmail.com>
+Date: Fri, 13 Jul 2018 16:58:59 -0700
+Subject: autofs: fix slab out of bounds read in getname_kernel()
+
+From: Tomas Bortoli <tomasbortoli@gmail.com>
+
+commit 02f51d45937f7bc7f4dee21e9f85b2d5eac37104 upstream.
+
+The autofs subsystem does not check that the "path" parameter is present
+for all cases where it is required when it is passed in via the "param"
+struct.
+
+In particular it isn't checked for the AUTOFS_DEV_IOCTL_OPENMOUNT_CMD
+ioctl command.
+
+To solve it, modify validate_dev_ioctl(function to check that a path has
+been provided for ioctl commands that require it.
+
+Link: http://lkml.kernel.org/r/153060031527.26631.18306637892746301555.stgit@pluto.themaw.net
+Signed-off-by: Tomas Bortoli <tomasbortoli@gmail.com>
+Signed-off-by: Ian Kent <raven@themaw.net>
+Reported-by: syzbot+60c837b428dc84e83a93@syzkaller.appspotmail.com
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/autofs4/dev-ioctl.c | 22 +++++++++++++---------
+ 1 file changed, 13 insertions(+), 9 deletions(-)
+
+--- a/fs/autofs4/dev-ioctl.c
++++ b/fs/autofs4/dev-ioctl.c
+@@ -148,6 +148,15 @@ static int validate_dev_ioctl(int cmd, s
+ cmd);
+ goto out;
+ }
++ } else {
++ unsigned int inr = _IOC_NR(cmd);
++
++ if (inr == AUTOFS_DEV_IOCTL_OPENMOUNT_CMD ||
++ inr == AUTOFS_DEV_IOCTL_REQUESTER_CMD ||
++ inr == AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD) {
++ err = -EINVAL;
++ goto out;
++ }
+ }
+
+ err = 0;
+@@ -284,7 +293,8 @@ static int autofs_dev_ioctl_openmount(st
+ dev_t devid;
+ int err, fd;
+
+- /* param->path has already been checked */
++ /* param->path has been checked in validate_dev_ioctl() */
++
+ if (!param->openmount.devid)
+ return -EINVAL;
+
+@@ -446,10 +456,7 @@ static int autofs_dev_ioctl_requester(st
+ dev_t devid;
+ int err = -ENOENT;
+
+- if (param->size <= AUTOFS_DEV_IOCTL_SIZE) {
+- err = -EINVAL;
+- goto out;
+- }
++ /* param->path has been checked in validate_dev_ioctl() */
+
+ devid = sbi->sb->s_dev;
+
+@@ -534,10 +541,7 @@ static int autofs_dev_ioctl_ismountpoint
+ unsigned int devid, magic;
+ int err = -ENOENT;
+
+- if (param->size <= AUTOFS_DEV_IOCTL_SIZE) {
+- err = -EINVAL;
+- goto out;
+- }
++ /* param->path has been checked in validate_dev_ioctl() */
+
+ name = param->path;
+ type = param->ismountpoint.in.type;
--- /dev/null
+From 3ee7e8697d5860b173132606d80a9cd35e7113ee Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Mon, 18 Jun 2018 15:46:58 +0200
+Subject: bdi: Fix another oops in wb_workfn()
+
+From: Jan Kara <jack@suse.cz>
+
+commit 3ee7e8697d5860b173132606d80a9cd35e7113ee upstream.
+
+syzbot is reporting NULL pointer dereference at wb_workfn() [1] due to
+wb->bdi->dev being NULL. And Dmitry confirmed that wb->state was
+WB_shutting_down after wb->bdi->dev became NULL. This indicates that
+unregister_bdi() failed to call wb_shutdown() on one of wb objects.
+
+The problem is in cgwb_bdi_unregister() which does cgwb_kill() and thus
+drops bdi's reference to wb structures before going through the list of
+wbs again and calling wb_shutdown() on each of them. This way the loop
+iterating through all wbs can easily miss a wb if that wb has already
+passed through cgwb_remove_from_bdi_list() called from wb_shutdown()
+from cgwb_release_workfn() and as a result fully shutdown bdi although
+wb_workfn() for this wb structure is still running. In fact there are
+also other ways cgwb_bdi_unregister() can race with
+cgwb_release_workfn() leading e.g. to use-after-free issues:
+
+CPU1 CPU2
+ cgwb_bdi_unregister()
+ cgwb_kill(*slot);
+
+cgwb_release()
+ queue_work(cgwb_release_wq, &wb->release_work);
+cgwb_release_workfn()
+ wb = list_first_entry(&bdi->wb_list, ...)
+ spin_unlock_irq(&cgwb_lock);
+ wb_shutdown(wb);
+ ...
+ kfree_rcu(wb, rcu);
+ wb_shutdown(wb); -> oops use-after-free
+
+We solve these issues by synchronizing writeback structure shutdown from
+cgwb_bdi_unregister() with cgwb_release_workfn() using a new mutex. That
+way we also no longer need synchronization using WB_shutting_down as the
+mutex provides it for CONFIG_CGROUP_WRITEBACK case and without
+CONFIG_CGROUP_WRITEBACK wb_shutdown() can be called only once from
+bdi_unregister().
+
+Reported-by: syzbot <syzbot+4a7438e774b21ddd8eca@syzkaller.appspotmail.com>
+Acked-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/backing-dev-defs.h | 2 +-
+ mm/backing-dev.c | 20 +++++++-------------
+ 2 files changed, 8 insertions(+), 14 deletions(-)
+
+--- a/include/linux/backing-dev-defs.h
++++ b/include/linux/backing-dev-defs.h
+@@ -22,7 +22,6 @@ struct dentry;
+ */
+ enum wb_state {
+ WB_registered, /* bdi_register() was done */
+- WB_shutting_down, /* wb_shutdown() in progress */
+ WB_writeback_running, /* Writeback is in progress */
+ WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */
+ WB_start_all, /* nr_pages == 0 (all) work pending */
+@@ -189,6 +188,7 @@ struct backing_dev_info {
+ #ifdef CONFIG_CGROUP_WRITEBACK
+ struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
+ struct rb_root cgwb_congested_tree; /* their congested states */
++ struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */
+ #else
+ struct bdi_writeback_congested *wb_congested;
+ #endif
+--- a/mm/backing-dev.c
++++ b/mm/backing-dev.c
+@@ -359,15 +359,8 @@ static void wb_shutdown(struct bdi_write
+ spin_lock_bh(&wb->work_lock);
+ if (!test_and_clear_bit(WB_registered, &wb->state)) {
+ spin_unlock_bh(&wb->work_lock);
+- /*
+- * Wait for wb shutdown to finish if someone else is just
+- * running wb_shutdown(). Otherwise we could proceed to wb /
+- * bdi destruction before wb_shutdown() is finished.
+- */
+- wait_on_bit(&wb->state, WB_shutting_down, TASK_UNINTERRUPTIBLE);
+ return;
+ }
+- set_bit(WB_shutting_down, &wb->state);
+ spin_unlock_bh(&wb->work_lock);
+
+ cgwb_remove_from_bdi_list(wb);
+@@ -379,12 +372,6 @@ static void wb_shutdown(struct bdi_write
+ mod_delayed_work(bdi_wq, &wb->dwork, 0);
+ flush_delayed_work(&wb->dwork);
+ WARN_ON(!list_empty(&wb->work_list));
+- /*
+- * Make sure bit gets cleared after shutdown is finished. Matches with
+- * the barrier provided by test_and_clear_bit() above.
+- */
+- smp_wmb();
+- clear_and_wake_up_bit(WB_shutting_down, &wb->state);
+ }
+
+ static void wb_exit(struct bdi_writeback *wb)
+@@ -508,10 +495,12 @@ static void cgwb_release_workfn(struct w
+ struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
+ release_work);
+
++ mutex_lock(&wb->bdi->cgwb_release_mutex);
+ wb_shutdown(wb);
+
+ css_put(wb->memcg_css);
+ css_put(wb->blkcg_css);
++ mutex_unlock(&wb->bdi->cgwb_release_mutex);
+
+ fprop_local_destroy_percpu(&wb->memcg_completions);
+ percpu_ref_exit(&wb->refcnt);
+@@ -697,6 +686,7 @@ static int cgwb_bdi_init(struct backing_
+
+ INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
+ bdi->cgwb_congested_tree = RB_ROOT;
++ mutex_init(&bdi->cgwb_release_mutex);
+
+ ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
+ if (!ret) {
+@@ -717,7 +707,10 @@ static void cgwb_bdi_unregister(struct b
+ spin_lock_irq(&cgwb_lock);
+ radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
+ cgwb_kill(*slot);
++ spin_unlock_irq(&cgwb_lock);
+
++ mutex_lock(&bdi->cgwb_release_mutex);
++ spin_lock_irq(&cgwb_lock);
+ while (!list_empty(&bdi->wb_list)) {
+ wb = list_first_entry(&bdi->wb_list, struct bdi_writeback,
+ bdi_node);
+@@ -726,6 +719,7 @@ static void cgwb_bdi_unregister(struct b
+ spin_lock_irq(&cgwb_lock);
+ }
+ spin_unlock_irq(&cgwb_lock);
++ mutex_unlock(&bdi->cgwb_release_mutex);
+ }
+
+ /**
--- /dev/null
+From cd4a4ae4683dc2e09380118e205e057896dcda2b Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Sat, 2 Jun 2018 14:04:07 -0600
+Subject: block: don't use blocking queue entered for recursive bio submits
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit cd4a4ae4683dc2e09380118e205e057896dcda2b upstream.
+
+If we end up splitting a bio and the queue goes away between
+the initial submission and the later split submission, then we
+can block forever in blk_queue_enter() waiting for the reference
+to drop to zero. This will never happen, since we already hold
+a reference.
+
+Mark a split bio as already having entered the queue, so we can
+just use the live non-blocking queue enter variant.
+
+Thanks to Tetsuo Handa for the analysis.
+
+Reported-by: syzbot+c4f9cebf9d651f6e54de@syzkaller.appspotmail.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/blk-core.c | 4 +++-
+ block/blk-merge.c | 10 ++++++++++
+ include/linux/blk_types.h | 2 ++
+ 3 files changed, 15 insertions(+), 1 deletion(-)
+
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -2392,7 +2392,9 @@ blk_qc_t generic_make_request(struct bio
+
+ if (bio->bi_opf & REQ_NOWAIT)
+ flags = BLK_MQ_REQ_NOWAIT;
+- if (blk_queue_enter(q, flags) < 0) {
++ if (bio_flagged(bio, BIO_QUEUE_ENTERED))
++ blk_queue_enter_live(q);
++ else if (blk_queue_enter(q, flags) < 0) {
+ if (!blk_queue_dying(q) && (bio->bi_opf & REQ_NOWAIT))
+ bio_wouldblock_error(bio);
+ else
+--- a/block/blk-merge.c
++++ b/block/blk-merge.c
+@@ -210,6 +210,16 @@ void blk_queue_split(struct request_queu
+ /* there isn't chance to merge the splitted bio */
+ split->bi_opf |= REQ_NOMERGE;
+
++ /*
++ * Since we're recursing into make_request here, ensure
++ * that we mark this bio as already having entered the queue.
++ * If not, and the queue is going away, we can get stuck
++ * forever on waiting for the queue reference to drop. But
++ * that will never happen, as we're already holding a
++ * reference to it.
++ */
++ bio_set_flag(*bio, BIO_QUEUE_ENTERED);
++
+ bio_chain(split, *bio);
+ trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
+ generic_make_request(*bio);
+--- a/include/linux/blk_types.h
++++ b/include/linux/blk_types.h
+@@ -186,6 +186,8 @@ struct bio {
+ * throttling rules. Don't do it again. */
+ #define BIO_TRACE_COMPLETION 10 /* bio_endio() should trace the final completion
+ * of this bio. */
++#define BIO_QUEUE_ENTERED 11 /* can use blk_queue_enter_live() */
++
+ /* See BVEC_POOL_OFFSET below before adding new flags */
+
+ /*
--- /dev/null
+From c7a897843224a92209f306c984975b704969b89d Mon Sep 17 00:00:00 2001
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Thu, 12 Jul 2018 21:44:28 +0200
+Subject: bpf: don't leave partial mangled prog in jit_subprogs error path
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+commit c7a897843224a92209f306c984975b704969b89d upstream.
+
+syzkaller managed to trigger the following bug through fault injection:
+
+ [...]
+ [ 141.043668] verifier bug. No program starts at insn 3
+ [ 141.044648] WARNING: CPU: 3 PID: 4072 at kernel/bpf/verifier.c:1613
+ get_callee_stack_depth kernel/bpf/verifier.c:1612 [inline]
+ [ 141.044648] WARNING: CPU: 3 PID: 4072 at kernel/bpf/verifier.c:1613
+ fixup_call_args kernel/bpf/verifier.c:5587 [inline]
+ [ 141.044648] WARNING: CPU: 3 PID: 4072 at kernel/bpf/verifier.c:1613
+ bpf_check+0x525e/0x5e60 kernel/bpf/verifier.c:5952
+ [ 141.047355] CPU: 3 PID: 4072 Comm: a.out Not tainted 4.18.0-rc4+ #51
+ [ 141.048446] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996),BIOS 1.10.2-1 04/01/2014
+ [ 141.049877] Call Trace:
+ [ 141.050324] __dump_stack lib/dump_stack.c:77 [inline]
+ [ 141.050324] dump_stack+0x1c9/0x2b4 lib/dump_stack.c:113
+ [ 141.050950] ? dump_stack_print_info.cold.2+0x52/0x52 lib/dump_stack.c:60
+ [ 141.051837] panic+0x238/0x4e7 kernel/panic.c:184
+ [ 141.052386] ? add_taint.cold.5+0x16/0x16 kernel/panic.c:385
+ [ 141.053101] ? __warn.cold.8+0x148/0x1ba kernel/panic.c:537
+ [ 141.053814] ? __warn.cold.8+0x117/0x1ba kernel/panic.c:530
+ [ 141.054506] ? get_callee_stack_depth kernel/bpf/verifier.c:1612 [inline]
+ [ 141.054506] ? fixup_call_args kernel/bpf/verifier.c:5587 [inline]
+ [ 141.054506] ? bpf_check+0x525e/0x5e60 kernel/bpf/verifier.c:5952
+ [ 141.055163] __warn.cold.8+0x163/0x1ba kernel/panic.c:538
+ [ 141.055820] ? get_callee_stack_depth kernel/bpf/verifier.c:1612 [inline]
+ [ 141.055820] ? fixup_call_args kernel/bpf/verifier.c:5587 [inline]
+ [ 141.055820] ? bpf_check+0x525e/0x5e60 kernel/bpf/verifier.c:5952
+ [...]
+
+What happens in jit_subprogs() is that kcalloc() for the subprog func
+buffer is failing with NULL where we then bail out. Latter is a plain
+return -ENOMEM, and this is definitely not okay since earlier in the
+loop we are walking all subprogs and temporarily rewrite insn->off to
+remember the subprog id as well as insn->imm to temporarily point the
+call to __bpf_call_base + 1 for the initial JIT pass. Thus, bailing
+out in such state and handing this over to the interpreter is troublesome
+since later/subsequent e.g. find_subprog() lookups are based on wrong
+insn->imm.
+
+Therefore, once we hit this point, we need to jump to out_free path
+where we undo all changes from earlier loop, so that interpreter can
+work on unmodified insn->{off,imm}.
+
+Another point is that should find_subprog() fail in jit_subprogs() due
+to a verifier bug, then we also should not simply defer the program to
+the interpreter since also here we did partial modifications. Instead
+we should just bail out entirely and return an error to the user who is
+trying to load the program.
+
+Fixes: 1c2a088a6626 ("bpf: x64: add JIT support for multi-function programs")
+Reported-by: syzbot+7d427828b2ea6e592804@syzkaller.appspotmail.com
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/bpf/verifier.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -5349,6 +5349,10 @@ static int jit_subprogs(struct bpf_verif
+ if (insn->code != (BPF_JMP | BPF_CALL) ||
+ insn->src_reg != BPF_PSEUDO_CALL)
+ continue;
++ /* Upon error here we cannot fall back to interpreter but
++ * need a hard reject of the program. Thus -EFAULT is
++ * propagated in any case.
++ */
+ subprog = find_subprog(env, i + insn->imm + 1);
+ if (subprog < 0) {
+ WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
+@@ -5369,7 +5373,7 @@ static int jit_subprogs(struct bpf_verif
+
+ func = kzalloc(sizeof(prog) * (env->subprog_cnt + 1), GFP_KERNEL);
+ if (!func)
+- return -ENOMEM;
++ goto out_undo_insn;
+
+ for (i = 0; i <= env->subprog_cnt; i++) {
+ subprog_start = subprog_end;
+@@ -5424,7 +5428,7 @@ static int jit_subprogs(struct bpf_verif
+ tmp = bpf_int_jit_compile(func[i]);
+ if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
+ verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
+- err = -EFAULT;
++ err = -ENOTSUPP;
+ goto out_free;
+ }
+ cond_resched();
+@@ -5466,6 +5470,7 @@ out_free:
+ if (func[i])
+ bpf_jit_free(func[i]);
+ kfree(func);
++out_undo_insn:
+ /* cleanup main prog to be interpreted */
+ prog->jit_requested = 0;
+ for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
+@@ -5492,6 +5497,8 @@ static int fixup_call_args(struct bpf_ve
+ err = jit_subprogs(env);
+ if (err == 0)
+ return 0;
++ if (err == -EFAULT)
++ return err;
+ }
+ #ifndef CONFIG_BPF_JIT_ALWAYS_ON
+ for (i = 0; i < prog->len; i++, insn++) {
--- /dev/null
+From 9facc336876f7ecf9edba4c67b90426fde4ec898 Mon Sep 17 00:00:00 2001
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Fri, 15 Jun 2018 02:30:48 +0200
+Subject: bpf: reject any prog that failed read-only lock
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+commit 9facc336876f7ecf9edba4c67b90426fde4ec898 upstream.
+
+We currently lock any JITed image as read-only via bpf_jit_binary_lock_ro()
+as well as the BPF image as read-only through bpf_prog_lock_ro(). In
+the case any of these would fail we throw a WARN_ON_ONCE() in order to
+yell loudly to the log. Perhaps, to some extend, this may be comparable
+to an allocation where __GFP_NOWARN is explicitly not set.
+
+Added via 65869a47f348 ("bpf: improve read-only handling"), this behavior
+is slightly different compared to any of the other in-kernel set_memory_ro()
+users who do not check the return code of set_memory_ro() and friends /at
+all/ (e.g. in the case of module_enable_ro() / module_disable_ro()). Given
+in BPF this is mandatory hardening step, we want to know whether there
+are any issues that would leave both BPF data writable. So it happens
+that syzkaller enabled fault injection and it triggered memory allocation
+failure deep inside x86's change_page_attr_set_clr() which was triggered
+from set_memory_ro().
+
+Now, there are two options: i) leaving everything as is, and ii) reworking
+the image locking code in order to have a final checkpoint out of the
+central bpf_prog_select_runtime() which probes whether any of the calls
+during prog setup weren't successful, and then bailing out with an error.
+Option ii) is a better approach since this additional paranoia avoids
+altogether leaving any potential W+X pages from BPF side in the system.
+Therefore, lets be strict about it, and reject programs in such unlikely
+occasion. While testing I noticed also that one bpf_prog_lock_ro()
+call was missing on the outer dummy prog in case of calls, e.g. in the
+destructor we call bpf_prog_free_deferred() on the main prog where we
+try to bpf_prog_unlock_free() the program, and since we go via
+bpf_prog_select_runtime() do that as well.
+
+Reported-by: syzbot+3b889862e65a98317058@syzkaller.appspotmail.com
+Reported-by: syzbot+9e762b52dd17e616a7a5@syzkaller.appspotmail.com
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Martin KaFai Lau <kafai@fb.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/filter.h | 60 +++++++++++++++++++++++++++++++------------------
+ kernel/bpf/core.c | 53 ++++++++++++++++++++++++++++++++++++++-----
+ kernel/bpf/syscall.c | 4 ---
+ 3 files changed, 86 insertions(+), 31 deletions(-)
+
+--- a/include/linux/filter.h
++++ b/include/linux/filter.h
+@@ -453,7 +453,8 @@ struct sock_fprog_kern {
+ };
+
+ struct bpf_binary_header {
+- unsigned int pages;
++ u16 pages;
++ u16 locked:1;
+ u8 image[];
+ };
+
+@@ -644,15 +645,18 @@ bpf_ctx_narrow_access_ok(u32 off, u32 si
+
+ #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
+
+-#ifdef CONFIG_ARCH_HAS_SET_MEMORY
+ static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
+ {
++#ifdef CONFIG_ARCH_HAS_SET_MEMORY
+ fp->locked = 1;
+- WARN_ON_ONCE(set_memory_ro((unsigned long)fp, fp->pages));
++ if (set_memory_ro((unsigned long)fp, fp->pages))
++ fp->locked = 0;
++#endif
+ }
+
+ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
+ {
++#ifdef CONFIG_ARCH_HAS_SET_MEMORY
+ if (fp->locked) {
+ WARN_ON_ONCE(set_memory_rw((unsigned long)fp, fp->pages));
+ /* In case set_memory_rw() fails, we want to be the first
+@@ -660,34 +664,30 @@ static inline void bpf_prog_unlock_ro(st
+ */
+ fp->locked = 0;
+ }
++#endif
+ }
+
+ static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
+ {
+- WARN_ON_ONCE(set_memory_ro((unsigned long)hdr, hdr->pages));
+-}
+-
+-static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
+-{
+- WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages));
+-}
+-#else
+-static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
+-{
+-}
+-
+-static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
+-{
+-}
+-
+-static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
+-{
++#ifdef CONFIG_ARCH_HAS_SET_MEMORY
++ hdr->locked = 1;
++ if (set_memory_ro((unsigned long)hdr, hdr->pages))
++ hdr->locked = 0;
++#endif
+ }
+
+ static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
+ {
++#ifdef CONFIG_ARCH_HAS_SET_MEMORY
++ if (hdr->locked) {
++ WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages));
++ /* In case set_memory_rw() fails, we want to be the first
++ * to crash here instead of some random place later on.
++ */
++ hdr->locked = 0;
++ }
++#endif
+ }
+-#endif /* CONFIG_ARCH_HAS_SET_MEMORY */
+
+ static inline struct bpf_binary_header *
+ bpf_jit_binary_hdr(const struct bpf_prog *fp)
+@@ -698,6 +698,22 @@ bpf_jit_binary_hdr(const struct bpf_prog
+ return (void *)addr;
+ }
+
++#ifdef CONFIG_ARCH_HAS_SET_MEMORY
++static inline int bpf_prog_check_pages_ro_single(const struct bpf_prog *fp)
++{
++ if (!fp->locked)
++ return -ENOLCK;
++ if (fp->jited) {
++ const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
++
++ if (!hdr->locked)
++ return -ENOLCK;
++ }
++
++ return 0;
++}
++#endif
++
+ int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
+ static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
+ {
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -583,6 +583,8 @@ bpf_jit_binary_alloc(unsigned int progle
+ bpf_fill_ill_insns(hdr, size);
+
+ hdr->pages = size / PAGE_SIZE;
++ hdr->locked = 0;
++
+ hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
+ PAGE_SIZE - sizeof(*hdr));
+ start = (get_random_int() % hole) & ~(alignment - 1);
+@@ -1513,6 +1515,33 @@ static int bpf_check_tail_call(const str
+ return 0;
+ }
+
++static int bpf_prog_check_pages_ro_locked(const struct bpf_prog *fp)
++{
++#ifdef CONFIG_ARCH_HAS_SET_MEMORY
++ int i, err;
++
++ for (i = 0; i < fp->aux->func_cnt; i++) {
++ err = bpf_prog_check_pages_ro_single(fp->aux->func[i]);
++ if (err)
++ return err;
++ }
++
++ return bpf_prog_check_pages_ro_single(fp);
++#endif
++ return 0;
++}
++
++static void bpf_prog_select_func(struct bpf_prog *fp)
++{
++#ifndef CONFIG_BPF_JIT_ALWAYS_ON
++ u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
++
++ fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
++#else
++ fp->bpf_func = __bpf_prog_ret0_warn;
++#endif
++}
++
+ /**
+ * bpf_prog_select_runtime - select exec runtime for BPF program
+ * @fp: bpf_prog populated with internal BPF program
+@@ -1523,13 +1552,13 @@ static int bpf_check_tail_call(const str
+ */
+ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
+ {
+-#ifndef CONFIG_BPF_JIT_ALWAYS_ON
+- u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
++ /* In case of BPF to BPF calls, verifier did all the prep
++ * work with regards to JITing, etc.
++ */
++ if (fp->bpf_func)
++ goto finalize;
+
+- fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
+-#else
+- fp->bpf_func = __bpf_prog_ret0_warn;
+-#endif
++ bpf_prog_select_func(fp);
+
+ /* eBPF JITs can rewrite the program in case constant
+ * blinding is active. However, in case of error during
+@@ -1550,6 +1579,8 @@ struct bpf_prog *bpf_prog_select_runtime
+ if (*err)
+ return fp;
+ }
++
++finalize:
+ bpf_prog_lock_ro(fp);
+
+ /* The tail call compatibility check can only be done at
+@@ -1558,7 +1589,17 @@ struct bpf_prog *bpf_prog_select_runtime
+ * all eBPF JITs might immediately support all features.
+ */
+ *err = bpf_check_tail_call(fp);
++ if (*err)
++ return fp;
+
++ /* Checkpoint: at this point onwards any cBPF -> eBPF or
++ * native eBPF program is read-only. If we failed to change
++ * the page attributes (e.g. allocation failure from
++ * splitting large pages), then reject the whole program
++ * in order to guarantee not ending up with any W+X pages
++ * from BPF side in kernel.
++ */
++ *err = bpf_prog_check_pages_ro_locked(fp);
+ return fp;
+ }
+ EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -1328,9 +1328,7 @@ static int bpf_prog_load(union bpf_attr
+ if (err < 0)
+ goto free_used_maps;
+
+- /* eBPF program is ready to be JITed */
+- if (!prog->bpf_func)
+- prog = bpf_prog_select_runtime(prog, &err);
++ prog = bpf_prog_select_runtime(prog, &err);
+ if (err < 0)
+ goto free_used_maps;
+
--- /dev/null
+From 7ebc14d507b4b55105da8d1a1eda323381529cc7 Mon Sep 17 00:00:00 2001
+From: John Fastabend <john.fastabend@gmail.com>
+Date: Thu, 5 Jul 2018 08:50:10 -0700
+Subject: bpf: sockmap, consume_skb in close path
+
+From: John Fastabend <john.fastabend@gmail.com>
+
+commit 7ebc14d507b4b55105da8d1a1eda323381529cc7 upstream.
+
+Currently, when a sock is closed and the bpf_tcp_close() callback is
+used we remove memory but do not free the skb. Call consume_skb() if
+the skb is attached to the buffer.
+
+Reported-by: syzbot+d464d2c20c717ef5a6a8@syzkaller.appspotmail.com
+Fixes: 1aa12bdf1bfb ("bpf: sockmap, add sock close() hook to remove socks")
+Signed-off-by: John Fastabend <john.fastabend@gmail.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/bpf/sockmap.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/kernel/bpf/sockmap.c
++++ b/kernel/bpf/sockmap.c
+@@ -471,7 +471,8 @@ static int free_sg(struct sock *sk, int
+ while (sg[i].length) {
+ free += sg[i].length;
+ sk_mem_uncharge(sk, sg[i].length);
+- put_page(sg_page(&sg[i]));
++ if (!md->skb)
++ put_page(sg_page(&sg[i]));
+ sg[i].length = 0;
+ sg[i].page_link = 0;
+ sg[i].offset = 0;
+@@ -480,6 +481,8 @@ static int free_sg(struct sock *sk, int
+ if (i == MAX_SKB_FRAGS)
+ i = 0;
+ }
++ if (md->skb)
++ consume_skb(md->skb);
+
+ return free;
+ }
--- /dev/null
+From 9901c5d77e969d8215a8e8d087ef02e6feddc84c Mon Sep 17 00:00:00 2001
+From: John Fastabend <john.fastabend@gmail.com>
+Date: Sat, 30 Jun 2018 06:17:36 -0700
+Subject: bpf: sockmap, fix crash when ipv6 sock is added
+
+From: John Fastabend <john.fastabend@gmail.com>
+
+commit 9901c5d77e969d8215a8e8d087ef02e6feddc84c upstream.
+
+This fixes a crash where we assign tcp_prot to IPv6 sockets instead
+of tcpv6_prot.
+
+Previously we overwrote the sk->prot field with tcp_prot even in the
+AF_INET6 case. This patch ensures the correct tcp_prot and tcpv6_prot
+are used.
+
+Tested with 'netserver -6' and 'netperf -H [IPv6]' as well as
+'netperf -H [IPv4]'. The ESTABLISHED check resolves the previously
+crashing case here.
+
+Fixes: 174a79ff9515 ("bpf: sockmap with sk redirect support")
+Reported-by: syzbot+5c063698bdbfac19f363@syzkaller.appspotmail.com
+Acked-by: Martin KaFai Lau <kafai@fb.com>
+Signed-off-by: John Fastabend <john.fastabend@gmail.com>
+Signed-off-by: Wei Wang <weiwan@google.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/bpf/sockmap.c | 58 ++++++++++++++++++++++++++++++++++++++++++---------
+ 1 file changed, 48 insertions(+), 10 deletions(-)
+
+--- a/kernel/bpf/sockmap.c
++++ b/kernel/bpf/sockmap.c
+@@ -112,6 +112,7 @@ static int bpf_tcp_recvmsg(struct sock *
+ static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
+ static int bpf_tcp_sendpage(struct sock *sk, struct page *page,
+ int offset, size_t size, int flags);
++static void bpf_tcp_close(struct sock *sk, long timeout);
+
+ static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
+ {
+@@ -133,7 +134,42 @@ out:
+ return !empty;
+ }
+
+-static struct proto tcp_bpf_proto;
++enum {
++ SOCKMAP_IPV4,
++ SOCKMAP_IPV6,
++ SOCKMAP_NUM_PROTS,
++};
++
++enum {
++ SOCKMAP_BASE,
++ SOCKMAP_TX,
++ SOCKMAP_NUM_CONFIGS,
++};
++
++static struct proto *saved_tcpv6_prot __read_mostly;
++static DEFINE_SPINLOCK(tcpv6_prot_lock);
++static struct proto bpf_tcp_prots[SOCKMAP_NUM_PROTS][SOCKMAP_NUM_CONFIGS];
++static void build_protos(struct proto prot[SOCKMAP_NUM_CONFIGS],
++ struct proto *base)
++{
++ prot[SOCKMAP_BASE] = *base;
++ prot[SOCKMAP_BASE].close = bpf_tcp_close;
++ prot[SOCKMAP_BASE].recvmsg = bpf_tcp_recvmsg;
++ prot[SOCKMAP_BASE].stream_memory_read = bpf_tcp_stream_read;
++
++ prot[SOCKMAP_TX] = prot[SOCKMAP_BASE];
++ prot[SOCKMAP_TX].sendmsg = bpf_tcp_sendmsg;
++ prot[SOCKMAP_TX].sendpage = bpf_tcp_sendpage;
++}
++
++static void update_sk_prot(struct sock *sk, struct smap_psock *psock)
++{
++ int family = sk->sk_family == AF_INET6 ? SOCKMAP_IPV6 : SOCKMAP_IPV4;
++ int conf = psock->bpf_tx_msg ? SOCKMAP_TX : SOCKMAP_BASE;
++
++ sk->sk_prot = &bpf_tcp_prots[family][conf];
++}
++
+ static int bpf_tcp_init(struct sock *sk)
+ {
+ struct smap_psock *psock;
+@@ -153,14 +189,17 @@ static int bpf_tcp_init(struct sock *sk)
+ psock->save_close = sk->sk_prot->close;
+ psock->sk_proto = sk->sk_prot;
+
+- if (psock->bpf_tx_msg) {
+- tcp_bpf_proto.sendmsg = bpf_tcp_sendmsg;
+- tcp_bpf_proto.sendpage = bpf_tcp_sendpage;
+- tcp_bpf_proto.recvmsg = bpf_tcp_recvmsg;
+- tcp_bpf_proto.stream_memory_read = bpf_tcp_stream_read;
++ /* Build IPv6 sockmap whenever the address of tcpv6_prot changes */
++ if (sk->sk_family == AF_INET6 &&
++ unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) {
++ spin_lock_bh(&tcpv6_prot_lock);
++ if (likely(sk->sk_prot != saved_tcpv6_prot)) {
++ build_protos(bpf_tcp_prots[SOCKMAP_IPV6], sk->sk_prot);
++ smp_store_release(&saved_tcpv6_prot, sk->sk_prot);
++ }
++ spin_unlock_bh(&tcpv6_prot_lock);
+ }
+-
+- sk->sk_prot = &tcp_bpf_proto;
++ update_sk_prot(sk, psock);
+ rcu_read_unlock();
+ return 0;
+ }
+@@ -1070,8 +1109,7 @@ static void bpf_tcp_msg_add(struct smap_
+
+ static int bpf_tcp_ulp_register(void)
+ {
+- tcp_bpf_proto = tcp_prot;
+- tcp_bpf_proto.close = bpf_tcp_close;
++ build_protos(bpf_tcp_prots[SOCKMAP_IPV4], &tcp_prot);
+ /* Once BPF TX ULP is registered it is never unregistered. It
+ * will be in the ULP list for the lifetime of the system. Doing
+ * duplicate registers is not a problem.
--- /dev/null
+From 3aa1409a7b160f9444945c0df1cb079df82be84e Mon Sep 17 00:00:00 2001
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Mon, 23 Apr 2018 13:53:41 -0700
+Subject: ipvs: initialize tbl->entries after allocation
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+commit 3aa1409a7b160f9444945c0df1cb079df82be84e upstream.
+
+tbl->entries is not initialized after kmalloc(), therefore
+causes an uninit-value warning in ip_vs_lblc_check_expire()
+as reported by syzbot.
+
+Reported-by: <syzbot+3dfdea57819073a04f21@syzkaller.appspotmail.com>
+Cc: Simon Horman <horms@verge.net.au>
+Cc: Julian Anastasov <ja@ssi.bg>
+Cc: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Acked-by: Julian Anastasov <ja@ssi.bg>
+Acked-by: Simon Horman <horms@verge.net.au>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/netfilter/ipvs/ip_vs_lblcr.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/netfilter/ipvs/ip_vs_lblcr.c
++++ b/net/netfilter/ipvs/ip_vs_lblcr.c
+@@ -534,6 +534,7 @@ static int ip_vs_lblcr_init_svc(struct i
+ tbl->counter = 1;
+ tbl->dead = false;
+ tbl->svc = svc;
++ atomic_set(&tbl->entries, 0);
+
+ /*
+ * Hook periodic timer for garbage collection
--- /dev/null
+From 8b2ebb6cf064247d60cccbf1750610ac9bb2e672 Mon Sep 17 00:00:00 2001
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Mon, 23 Apr 2018 14:04:45 -0700
+Subject: ipvs: initialize tbl->entries in ip_vs_lblc_init_svc()
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+commit 8b2ebb6cf064247d60cccbf1750610ac9bb2e672 upstream.
+
+Similarly, tbl->entries is not initialized after kmalloc(),
+therefore causes an uninit-value warning in ip_vs_lblc_check_expire(),
+as reported by syzbot.
+
+Reported-by: <syzbot+3e9695f147fb529aa9bc@syzkaller.appspotmail.com>
+Cc: Simon Horman <horms@verge.net.au>
+Cc: Julian Anastasov <ja@ssi.bg>
+Cc: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Acked-by: Julian Anastasov <ja@ssi.bg>
+Acked-by: Simon Horman <horms@verge.net.au>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/netfilter/ipvs/ip_vs_lblc.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/netfilter/ipvs/ip_vs_lblc.c
++++ b/net/netfilter/ipvs/ip_vs_lblc.c
+@@ -371,6 +371,7 @@ static int ip_vs_lblc_init_svc(struct ip
+ tbl->counter = 1;
+ tbl->dead = false;
+ tbl->svc = svc;
++ atomic_set(&tbl->entries, 0);
+
+ /*
+ * Hook periodic timer for garbage collection
--- /dev/null
+From c604cb767049b78b3075497b80ebb8fd530ea2cc Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Wed, 11 Jul 2018 10:46:29 -0700
+Subject: KEYS: DNS: fix parsing multiple options
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit c604cb767049b78b3075497b80ebb8fd530ea2cc upstream.
+
+My recent fix for dns_resolver_preparse() printing very long strings was
+incomplete, as shown by syzbot which still managed to hit the
+WARN_ONCE() in set_precision() by adding a crafted "dns_resolver" key:
+
+ precision 50001 too large
+ WARNING: CPU: 7 PID: 864 at lib/vsprintf.c:2164 vsnprintf+0x48a/0x5a0
+
+The bug this time isn't just a printing bug, but also a logical error
+when multiple options ("#"-separated strings) are given in the key
+payload. Specifically, when separating an option string into name and
+value, if there is no value then the name is incorrectly considered to
+end at the end of the key payload, rather than the end of the current
+option. This bypasses validation of the option length, and also means
+that specifying multiple options is broken -- which presumably has gone
+unnoticed as there is currently only one valid option anyway.
+
+A similar problem also applied to option values, as the kstrtoul() when
+parsing the "dnserror" option will read past the end of the current
+option and into the next option.
+
+Fix these bugs by correctly computing the length of the option name and
+by copying the option value, null-terminated, into a temporary buffer.
+
+Reproducer for the WARN_ONCE() that syzbot hit:
+
+ perl -e 'print "#A#", "\0" x 50000' | keyctl padd dns_resolver desc @s
+
+Reproducer for "dnserror" option being parsed incorrectly (expected
+behavior is to fail when seeing the unknown option "foo", actual
+behavior was to read the dnserror value as "1#foo" and fail there):
+
+ perl -e 'print "#dnserror=1#foo\0"' | keyctl padd dns_resolver desc @s
+
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Fixes: 4a2d789267e0 ("DNS: If the DNS server returns an error, allow that to be cached [ver #2]")
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/dns_resolver/dns_key.c | 30 +++++++++++++++++-------------
+ 1 file changed, 17 insertions(+), 13 deletions(-)
+
+--- a/net/dns_resolver/dns_key.c
++++ b/net/dns_resolver/dns_key.c
+@@ -86,35 +86,39 @@ dns_resolver_preparse(struct key_prepars
+ opt++;
+ kdebug("options: '%s'", opt);
+ do {
++ int opt_len, opt_nlen;
+ const char *eq;
+- int opt_len, opt_nlen, opt_vlen, tmp;
++ char optval[128];
+
+ next_opt = memchr(opt, '#', end - opt) ?: end;
+ opt_len = next_opt - opt;
+- if (opt_len <= 0 || opt_len > 128) {
++ if (opt_len <= 0 || opt_len > sizeof(optval)) {
+ pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n",
+ opt_len);
+ return -EINVAL;
+ }
+
+- eq = memchr(opt, '=', opt_len) ?: end;
+- opt_nlen = eq - opt;
+- eq++;
+- opt_vlen = next_opt - eq; /* will be -1 if no value */
+-
+- tmp = opt_vlen >= 0 ? opt_vlen : 0;
+- kdebug("option '%*.*s' val '%*.*s'",
+- opt_nlen, opt_nlen, opt, tmp, tmp, eq);
++ eq = memchr(opt, '=', opt_len);
++ if (eq) {
++ opt_nlen = eq - opt;
++ eq++;
++ memcpy(optval, eq, next_opt - eq);
++ optval[next_opt - eq] = '\0';
++ } else {
++ opt_nlen = opt_len;
++ optval[0] = '\0';
++ }
++
++ kdebug("option '%*.*s' val '%s'",
++ opt_nlen, opt_nlen, opt, optval);
+
+ /* see if it's an error number representing a DNS error
+ * that's to be recorded as the result in this key */
+ if (opt_nlen == sizeof(DNS_ERRORNO_OPTION) - 1 &&
+ memcmp(opt, DNS_ERRORNO_OPTION, opt_nlen) == 0) {
+ kdebug("dns error number option");
+- if (opt_vlen <= 0)
+- goto bad_option_value;
+
+- ret = kstrtoul(eq, 10, &derrno);
++ ret = kstrtoul(optval, 10, &derrno);
+ if (ret < 0)
+ goto bad_option_value;
+
--- /dev/null
+From 3bc53be9db21040b5d2de4d455f023c8c494aa68 Mon Sep 17 00:00:00 2001
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Date: Wed, 18 Jul 2018 18:57:27 +0900
+Subject: net/nfc: Avoid stalls when nfc_alloc_send_skb() returned NULL.
+
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+
+commit 3bc53be9db21040b5d2de4d455f023c8c494aa68 upstream.
+
+syzbot is reporting stalls at nfc_llcp_send_ui_frame() [1]. This is
+because nfc_llcp_send_ui_frame() is retrying the loop without any delay
+when nonblocking nfc_alloc_send_skb() returned NULL.
+
+Since there is no need to use MSG_DONTWAIT if we retry until
+sock_alloc_send_pskb() succeeds, let's use blocking call.
+Also, in case an unexpected error occurred, let's break the loop
+if blocking nfc_alloc_send_skb() failed.
+
+[1] https://syzkaller.appspot.com/bug?id=4a131cc571c3733e0eff6bc673f4e36ae48f19c6
+
+Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Reported-by: syzbot <syzbot+d29d18215e477cfbfbdd@syzkaller.appspotmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/nfc/llcp_commands.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/net/nfc/llcp_commands.c
++++ b/net/nfc/llcp_commands.c
+@@ -752,11 +752,14 @@ int nfc_llcp_send_ui_frame(struct nfc_ll
+ pr_debug("Fragment %zd bytes remaining %zd",
+ frag_len, remaining_len);
+
+- pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, MSG_DONTWAIT,
++ pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, 0,
+ frag_len + LLCP_HEADER_SIZE, &err);
+ if (pdu == NULL) {
+- pr_err("Could not allocate PDU\n");
+- continue;
++ pr_err("Could not allocate PDU (error=%d)\n", err);
++ len -= remaining_len;
++ if (len == 0)
++ len = err;
++ break;
+ }
+
+ pdu = llcp_add_header(pdu, dsap, ssap, LLCP_PDU_UI);
--- /dev/null
+From 11ff7288beb2b7da889a014aff0a7b80bf8efcf3 Mon Sep 17 00:00:00 2001
+From: Florian Westphal <fw@strlen.de>
+Date: Wed, 6 Jun 2018 12:14:56 +0200
+Subject: netfilter: ebtables: reject non-bridge targets
+
+From: Florian Westphal <fw@strlen.de>
+
+commit 11ff7288beb2b7da889a014aff0a7b80bf8efcf3 upstream.
+
+the ebtables evaluation loop expects targets to return
+positive values (jumps), or negative values (absolute verdicts).
+
+This is completely different from what xtables does.
+In xtables, targets are expected to return the standard netfilter
+verdicts, i.e. NF_DROP, NF_ACCEPT, etc.
+
+ebtables will consider these as jumps.
+
+Therefore reject any target found due to unspec fallback.
+v2: also reject watchers. ebtables ignores their return value, so
+a target that assumes skb ownership (and returns NF_STOLEN) causes
+use-after-free.
+
+The only watchers in the 'ebtables' front-end are log and nflog;
+both have AF_BRIDGE specific wrappers on kernel side.
+
+Reported-by: syzbot+2b43f681169a2a0d306a@syzkaller.appspotmail.com
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/bridge/netfilter/ebtables.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/net/bridge/netfilter/ebtables.c
++++ b/net/bridge/netfilter/ebtables.c
+@@ -396,6 +396,12 @@ ebt_check_watcher(struct ebt_entry_watch
+ watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0);
+ if (IS_ERR(watcher))
+ return PTR_ERR(watcher);
++
++ if (watcher->family != NFPROTO_BRIDGE) {
++ module_put(watcher->me);
++ return -ENOENT;
++ }
++
+ w->u.watcher = watcher;
+
+ par->target = watcher;
+@@ -717,6 +723,13 @@ ebt_check_entry(struct ebt_entry *e, str
+ goto cleanup_watchers;
+ }
+
++ /* Reject UNSPEC, xtables verdicts/return values are incompatible */
++ if (target->family != NFPROTO_BRIDGE) {
++ module_put(target->me);
++ ret = -ENOENT;
++ goto cleanup_watchers;
++ }
++
+ t->u.target = target;
+ if (t->u.target == &ebt_standard_target) {
+ if (gap < sizeof(struct ebt_standard_target)) {
--- /dev/null
+From 84379c9afe011020e797e3f50a662b08a6355dcf Mon Sep 17 00:00:00 2001
+From: Florian Westphal <fw@strlen.de>
+Date: Mon, 9 Jul 2018 13:43:38 +0200
+Subject: netfilter: ipv6: nf_defrag: drop skb dst before queueing
+
+From: Florian Westphal <fw@strlen.de>
+
+commit 84379c9afe011020e797e3f50a662b08a6355dcf upstream.
+
+Eric Dumazet reports:
+ Here is a reproducer of an annoying bug detected by syzkaller on our production kernel
+ [..]
+ ./b78305423 enable_conntrack
+ Then :
+ sleep 60
+ dmesg | tail -10
+ [ 171.599093] unregister_netdevice: waiting for lo to become free. Usage count = 2
+ [ 181.631024] unregister_netdevice: waiting for lo to become free. Usage count = 2
+ [ 191.687076] unregister_netdevice: waiting for lo to become free. Usage count = 2
+ [ 201.703037] unregister_netdevice: waiting for lo to become free. Usage count = 2
+ [ 211.711072] unregister_netdevice: waiting for lo to become free. Usage count = 2
+ [ 221.959070] unregister_netdevice: waiting for lo to become free. Usage count = 2
+
+Reproducer sends ipv6 fragment that hits nfct defrag via LOCAL_OUT hook.
+skb gets queued until frag timer expiry -- 1 minute.
+
+Normally nf_conntrack_reasm gets called during prerouting, so skb has
+no dst yet which might explain why this wasn't spotted earlier.
+
+Reported-by: Eric Dumazet <eric.dumazet@gmail.com>
+Reported-by: John Sperbeck <jsperbeck@google.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Tested-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/ipv6/netfilter/nf_conntrack_reasm.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
+@@ -585,6 +585,8 @@ int nf_ct_frag6_gather(struct net *net,
+ fq->q.meat == fq->q.len &&
+ nf_ct_frag6_reasm(fq, skb, dev))
+ ret = 0;
++ else
++ skb_dst_drop(skb);
+
+ out_unlock:
+ spin_unlock_bh(&fq->q.lock);
--- /dev/null
+From bab2c80e5a6c855657482eac9e97f5f3eedb509a Mon Sep 17 00:00:00 2001
+From: Willem de Bruijn <willemb@google.com>
+Date: Wed, 11 Jul 2018 12:00:44 -0400
+Subject: nsh: set mac len based on inner packet
+
+From: Willem de Bruijn <willemb@google.com>
+
+commit bab2c80e5a6c855657482eac9e97f5f3eedb509a upstream.
+
+When pulling the NSH header in nsh_gso_segment, set the mac length
+based on the encapsulated packet type.
+
+skb_reset_mac_len computes an offset to the network header, which
+here still points to the outer packet:
+
+ > skb_reset_network_header(skb);
+ > [...]
+ > __skb_pull(skb, nsh_len);
+ > skb_reset_mac_header(skb); // now mac hdr starts nsh_len == 8B after net hdr
+ > skb_reset_mac_len(skb); // mac len = net hdr - mac hdr == (u16) -8 == 65528
+ > [..]
+ > skb_mac_gso_segment(skb, ..)
+
+Link: http://lkml.kernel.org/r/CAF=yD-KeAcTSOn4AxirAxL8m7QAS8GBBe1w09eziYwvPbbUeYA@mail.gmail.com
+Reported-by: syzbot+7b9ed9872dab8c32305d@syzkaller.appspotmail.com
+Fixes: c411ed854584 ("nsh: add GSO support")
+Signed-off-by: Willem de Bruijn <willemb@google.com>
+Acked-by: Jiri Benc <jbenc@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/nsh/nsh.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/nsh/nsh.c
++++ b/net/nsh/nsh.c
+@@ -104,7 +104,7 @@ static struct sk_buff *nsh_gso_segment(s
+ __skb_pull(skb, nsh_len);
+
+ skb_reset_mac_header(skb);
+- skb_reset_mac_len(skb);
++ skb->mac_len = proto == htons(ETH_P_TEB) ? ETH_HLEN : 0;
+ skb->protocol = proto;
+
+ features &= NETIF_F_SG;
--- /dev/null
+From f1693c63ab133d16994cc50f773982b5905af264 Mon Sep 17 00:00:00 2001
+From: Santosh Shilimkar <santosh.shilimkar@oracle.com>
+Date: Thu, 14 Jun 2018 11:52:34 -0700
+Subject: rds: avoid unenecessary cong_update in loop transport
+
+From: Santosh Shilimkar <santosh.shilimkar@oracle.com>
+
+commit f1693c63ab133d16994cc50f773982b5905af264 upstream.
+
+Loop transport which is self loopback, remote port congestion
+update isn't relevant. Infact the xmit path already ignores it.
+Receive path needs to do the same.
+
+Reported-by: syzbot+4c20b3866171ce8441d2@syzkaller.appspotmail.com
+Reviewed-by: Sowmini Varadhan <sowmini.varadhan@oracle.com>
+Signed-off-by: Santosh Shilimkar <santosh.shilimkar@oracle.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/rds/loop.c | 1 +
+ net/rds/rds.h | 5 +++++
+ net/rds/recv.c | 5 +++++
+ 3 files changed, 11 insertions(+)
+
+--- a/net/rds/loop.c
++++ b/net/rds/loop.c
+@@ -193,4 +193,5 @@ struct rds_transport rds_loop_transport
+ .inc_copy_to_user = rds_message_inc_copy_to_user,
+ .inc_free = rds_loop_inc_free,
+ .t_name = "loopback",
++ .t_type = RDS_TRANS_LOOP,
+ };
+--- a/net/rds/rds.h
++++ b/net/rds/rds.h
+@@ -479,6 +479,11 @@ struct rds_notifier {
+ int n_status;
+ };
+
++/* Available as part of RDS core, so doesn't need to participate
++ * in get_preferred transport etc
++ */
++#define RDS_TRANS_LOOP 3
++
+ /**
+ * struct rds_transport - transport specific behavioural hooks
+ *
+--- a/net/rds/recv.c
++++ b/net/rds/recv.c
+@@ -103,6 +103,11 @@ static void rds_recv_rcvbuf_delta(struct
+ rds_stats_add(s_recv_bytes_added_to_socket, delta);
+ else
+ rds_stats_add(s_recv_bytes_removed_from_socket, -delta);
++
++ /* loop transport doesn't send/recv congestion updates */
++ if (rs->rs_transport->t_type == RDS_TRANS_LOOP)
++ return;
++
+ now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs);
+
+ rdsdebug("rs %p (%pI4:%u) recv bytes %d buf %d "
--- /dev/null
+From fe10e398e860955bac4d28ec031b701d358465e4 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Fri, 13 Jul 2018 16:59:27 -0700
+Subject: reiserfs: fix buffer overflow with long warning messages
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit fe10e398e860955bac4d28ec031b701d358465e4 upstream.
+
+ReiserFS prepares log messages into a 1024-byte buffer with no bounds
+checks. Long messages, such as the "unknown mount option" warning when
+userspace passes a crafted mount options string, overflow this buffer.
+This causes KASAN to report a global-out-of-bounds write.
+
+Fix it by truncating messages to the buffer size.
+
+Link: http://lkml.kernel.org/r/20180707203621.30922-1-ebiggers3@gmail.com
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Reported-by: syzbot+b890b3335a4d8c608963@syzkaller.appspotmail.com
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/reiserfs/prints.c | 141 +++++++++++++++++++++++++++++----------------------
+ 1 file changed, 81 insertions(+), 60 deletions(-)
+
+--- a/fs/reiserfs/prints.c
++++ b/fs/reiserfs/prints.c
+@@ -76,83 +76,99 @@ static char *le_type(struct reiserfs_key
+ }
+
+ /* %k */
+-static void sprintf_le_key(char *buf, struct reiserfs_key *key)
++static int scnprintf_le_key(char *buf, size_t size, struct reiserfs_key *key)
+ {
+ if (key)
+- sprintf(buf, "[%d %d %s %s]", le32_to_cpu(key->k_dir_id),
+- le32_to_cpu(key->k_objectid), le_offset(key),
+- le_type(key));
++ return scnprintf(buf, size, "[%d %d %s %s]",
++ le32_to_cpu(key->k_dir_id),
++ le32_to_cpu(key->k_objectid), le_offset(key),
++ le_type(key));
+ else
+- sprintf(buf, "[NULL]");
++ return scnprintf(buf, size, "[NULL]");
+ }
+
+ /* %K */
+-static void sprintf_cpu_key(char *buf, struct cpu_key *key)
++static int scnprintf_cpu_key(char *buf, size_t size, struct cpu_key *key)
+ {
+ if (key)
+- sprintf(buf, "[%d %d %s %s]", key->on_disk_key.k_dir_id,
+- key->on_disk_key.k_objectid, reiserfs_cpu_offset(key),
+- cpu_type(key));
++ return scnprintf(buf, size, "[%d %d %s %s]",
++ key->on_disk_key.k_dir_id,
++ key->on_disk_key.k_objectid,
++ reiserfs_cpu_offset(key), cpu_type(key));
+ else
+- sprintf(buf, "[NULL]");
++ return scnprintf(buf, size, "[NULL]");
+ }
+
+-static void sprintf_de_head(char *buf, struct reiserfs_de_head *deh)
++static int scnprintf_de_head(char *buf, size_t size,
++ struct reiserfs_de_head *deh)
+ {
+ if (deh)
+- sprintf(buf,
+- "[offset=%d dir_id=%d objectid=%d location=%d state=%04x]",
+- deh_offset(deh), deh_dir_id(deh), deh_objectid(deh),
+- deh_location(deh), deh_state(deh));
++ return scnprintf(buf, size,
++ "[offset=%d dir_id=%d objectid=%d location=%d state=%04x]",
++ deh_offset(deh), deh_dir_id(deh),
++ deh_objectid(deh), deh_location(deh),
++ deh_state(deh));
+ else
+- sprintf(buf, "[NULL]");
++ return scnprintf(buf, size, "[NULL]");
+
+ }
+
+-static void sprintf_item_head(char *buf, struct item_head *ih)
++static int scnprintf_item_head(char *buf, size_t size, struct item_head *ih)
+ {
+ if (ih) {
+- strcpy(buf,
+- (ih_version(ih) == KEY_FORMAT_3_6) ? "*3.6* " : "*3.5*");
+- sprintf_le_key(buf + strlen(buf), &(ih->ih_key));
+- sprintf(buf + strlen(buf), ", item_len %d, item_location %d, "
+- "free_space(entry_count) %d",
+- ih_item_len(ih), ih_location(ih), ih_free_space(ih));
++ char *p = buf;
++ char * const end = buf + size;
++
++ p += scnprintf(p, end - p, "%s",
++ (ih_version(ih) == KEY_FORMAT_3_6) ?
++ "*3.6* " : "*3.5*");
++
++ p += scnprintf_le_key(p, end - p, &ih->ih_key);
++
++ p += scnprintf(p, end - p,
++ ", item_len %d, item_location %d, free_space(entry_count) %d",
++ ih_item_len(ih), ih_location(ih),
++ ih_free_space(ih));
++ return p - buf;
+ } else
+- sprintf(buf, "[NULL]");
++ return scnprintf(buf, size, "[NULL]");
+ }
+
+-static void sprintf_direntry(char *buf, struct reiserfs_dir_entry *de)
++static int scnprintf_direntry(char *buf, size_t size,
++ struct reiserfs_dir_entry *de)
+ {
+ char name[20];
+
+ memcpy(name, de->de_name, de->de_namelen > 19 ? 19 : de->de_namelen);
+ name[de->de_namelen > 19 ? 19 : de->de_namelen] = 0;
+- sprintf(buf, "\"%s\"==>[%d %d]", name, de->de_dir_id, de->de_objectid);
++ return scnprintf(buf, size, "\"%s\"==>[%d %d]",
++ name, de->de_dir_id, de->de_objectid);
+ }
+
+-static void sprintf_block_head(char *buf, struct buffer_head *bh)
++static int scnprintf_block_head(char *buf, size_t size, struct buffer_head *bh)
+ {
+- sprintf(buf, "level=%d, nr_items=%d, free_space=%d rdkey ",
+- B_LEVEL(bh), B_NR_ITEMS(bh), B_FREE_SPACE(bh));
++ return scnprintf(buf, size,
++ "level=%d, nr_items=%d, free_space=%d rdkey ",
++ B_LEVEL(bh), B_NR_ITEMS(bh), B_FREE_SPACE(bh));
+ }
+
+-static void sprintf_buffer_head(char *buf, struct buffer_head *bh)
++static int scnprintf_buffer_head(char *buf, size_t size, struct buffer_head *bh)
+ {
+- sprintf(buf,
+- "dev %pg, size %zd, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)",
+- bh->b_bdev, bh->b_size,
+- (unsigned long long)bh->b_blocknr, atomic_read(&(bh->b_count)),
+- bh->b_state, bh->b_page,
+- buffer_uptodate(bh) ? "UPTODATE" : "!UPTODATE",
+- buffer_dirty(bh) ? "DIRTY" : "CLEAN",
+- buffer_locked(bh) ? "LOCKED" : "UNLOCKED");
++ return scnprintf(buf, size,
++ "dev %pg, size %zd, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)",
++ bh->b_bdev, bh->b_size,
++ (unsigned long long)bh->b_blocknr,
++ atomic_read(&(bh->b_count)),
++ bh->b_state, bh->b_page,
++ buffer_uptodate(bh) ? "UPTODATE" : "!UPTODATE",
++ buffer_dirty(bh) ? "DIRTY" : "CLEAN",
++ buffer_locked(bh) ? "LOCKED" : "UNLOCKED");
+ }
+
+-static void sprintf_disk_child(char *buf, struct disk_child *dc)
++static int scnprintf_disk_child(char *buf, size_t size, struct disk_child *dc)
+ {
+- sprintf(buf, "[dc_number=%d, dc_size=%u]", dc_block_number(dc),
+- dc_size(dc));
++ return scnprintf(buf, size, "[dc_number=%d, dc_size=%u]",
++ dc_block_number(dc), dc_size(dc));
+ }
+
+ static char *is_there_reiserfs_struct(char *fmt, int *what)
+@@ -189,55 +205,60 @@ static void prepare_error_buf(const char
+ char *fmt1 = fmt_buf;
+ char *k;
+ char *p = error_buf;
++ char * const end = &error_buf[sizeof(error_buf)];
+ int what;
+
+ spin_lock(&error_lock);
+
+- strcpy(fmt1, fmt);
++ if (WARN_ON(strscpy(fmt_buf, fmt, sizeof(fmt_buf)) < 0)) {
++ strscpy(error_buf, "format string too long", end - error_buf);
++ goto out_unlock;
++ }
+
+ while ((k = is_there_reiserfs_struct(fmt1, &what)) != NULL) {
+ *k = 0;
+
+- p += vsprintf(p, fmt1, args);
++ p += vscnprintf(p, end - p, fmt1, args);
+
+ switch (what) {
+ case 'k':
+- sprintf_le_key(p, va_arg(args, struct reiserfs_key *));
++ p += scnprintf_le_key(p, end - p,
++ va_arg(args, struct reiserfs_key *));
+ break;
+ case 'K':
+- sprintf_cpu_key(p, va_arg(args, struct cpu_key *));
++ p += scnprintf_cpu_key(p, end - p,
++ va_arg(args, struct cpu_key *));
+ break;
+ case 'h':
+- sprintf_item_head(p, va_arg(args, struct item_head *));
++ p += scnprintf_item_head(p, end - p,
++ va_arg(args, struct item_head *));
+ break;
+ case 't':
+- sprintf_direntry(p,
+- va_arg(args,
+- struct reiserfs_dir_entry *));
++ p += scnprintf_direntry(p, end - p,
++ va_arg(args, struct reiserfs_dir_entry *));
+ break;
+ case 'y':
+- sprintf_disk_child(p,
+- va_arg(args, struct disk_child *));
++ p += scnprintf_disk_child(p, end - p,
++ va_arg(args, struct disk_child *));
+ break;
+ case 'z':
+- sprintf_block_head(p,
+- va_arg(args, struct buffer_head *));
++ p += scnprintf_block_head(p, end - p,
++ va_arg(args, struct buffer_head *));
+ break;
+ case 'b':
+- sprintf_buffer_head(p,
+- va_arg(args, struct buffer_head *));
++ p += scnprintf_buffer_head(p, end - p,
++ va_arg(args, struct buffer_head *));
+ break;
+ case 'a':
+- sprintf_de_head(p,
+- va_arg(args,
+- struct reiserfs_de_head *));
++ p += scnprintf_de_head(p, end - p,
++ va_arg(args, struct reiserfs_de_head *));
+ break;
+ }
+
+- p += strlen(p);
+ fmt1 = k + 2;
+ }
+- vsprintf(p, fmt1, args);
++ p += vscnprintf(p, end - p, fmt1, args);
++out_unlock:
+ spin_unlock(&error_lock);
+
+ }
--- /dev/null
+From a65925475571953da12a9bc2082aec29d4e2c0e7 Mon Sep 17 00:00:00 2001
+From: Xin Long <lucien.xin@gmail.com>
+Date: Tue, 3 Jul 2018 16:30:47 +0800
+Subject: sctp: fix the issue that pathmtu may be set lower than MINSEGMENT
+
+From: Xin Long <lucien.xin@gmail.com>
+
+commit a65925475571953da12a9bc2082aec29d4e2c0e7 upstream.
+
+After commit b6c5734db070 ("sctp: fix the handling of ICMP Frag Needed
+for too small MTUs"), sctp_transport_update_pmtu would refetch pathmtu
+from the dst and set it to transport's pathmtu without any check.
+
+The new pathmtu may be lower than MINSEGMENT if the dst is obsolete and
+updated by .get_dst() in sctp_transport_update_pmtu. In this case, it
+could have a smaller MTU as well, and thus we should validate it
+against MINSEGMENT instead.
+
+Syzbot reported a warning in sctp_mtu_payload caused by this.
+
+This patch refetches the pathmtu by calling sctp_dst_mtu where it does
+the check against MINSEGMENT.
+
+v1->v2:
+ - refetch the pathmtu by calling sctp_dst_mtu instead as Marcelo's
+ suggestion.
+
+Fixes: b6c5734db070 ("sctp: fix the handling of ICMP Frag Needed for too small MTUs")
+Reported-by: syzbot+f0d9d7cba052f9344b03@syzkaller.appspotmail.com
+Suggested-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+Signed-off-by: Xin Long <lucien.xin@gmail.com>
+Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+Acked-by: Neil Horman <nhorman@tuxdriver.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/sctp/transport.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/sctp/transport.c
++++ b/net/sctp/transport.c
+@@ -273,7 +273,7 @@ bool sctp_transport_update_pmtu(struct s
+
+ if (dst) {
+ /* Re-fetch, as under layers may have a higher minimum size */
+- pmtu = SCTP_TRUNC4(dst_mtu(dst));
++ pmtu = sctp_dst_mtu(dst);
+ change = t->pathmtu != pmtu;
+ }
+ t->pathmtu = pmtu;
net-lan78xx-fix-race-in-tx-pending-skb-size-calculation.patch
crypto-af_alg-initialize-sg_num_bytes-in-error-code-path.patch
pci-hv-disable-enable-irqs-rather-than-bh-in-hv_compose_msi_msg.patch
+netfilter-ebtables-reject-non-bridge-targets.patch
+reiserfs-fix-buffer-overflow-with-long-warning-messages.patch
+keys-dns-fix-parsing-multiple-options.patch
+tls-stricter-error-checking-in-zerocopy-sendmsg-path.patch
+autofs-fix-slab-out-of-bounds-read-in-getname_kernel.patch
+nsh-set-mac-len-based-on-inner-packet.patch
+netfilter-ipv6-nf_defrag-drop-skb-dst-before-queueing.patch
+sctp-fix-the-issue-that-pathmtu-may-be-set-lower-than-minsegment.patch
+bdi-fix-another-oops-in-wb_workfn.patch
+bpf-reject-any-prog-that-failed-read-only-lock.patch
+rds-avoid-unenecessary-cong_update-in-loop-transport.patch
+block-don-t-use-blocking-queue-entered-for-recursive-bio-submits.patch
+bpf-sockmap-fix-crash-when-ipv6-sock-is-added.patch
+bpf-sockmap-consume_skb-in-close-path.patch
+bpf-don-t-leave-partial-mangled-prog-in-jit_subprogs-error-path.patch
+net-nfc-avoid-stalls-when-nfc_alloc_send_skb-returned-null.patch
+ipvs-initialize-tbl-entries-after-allocation.patch
+ipvs-initialize-tbl-entries-in-ip_vs_lblc_init_svc.patch
--- /dev/null
+From 32da12216e467dea70a09cd7094c30779ce0f9db Mon Sep 17 00:00:00 2001
+From: Dave Watson <davejwatson@fb.com>
+Date: Thu, 12 Jul 2018 08:03:43 -0700
+Subject: tls: Stricter error checking in zerocopy sendmsg path
+
+From: Dave Watson <davejwatson@fb.com>
+
+commit 32da12216e467dea70a09cd7094c30779ce0f9db upstream.
+
+In the zerocopy sendmsg() path, there are error checks to revert
+the zerocopy if we get any error code. syzkaller has discovered
+that tls_push_record can return -ECONNRESET, which is fatal, and
+happens after the point at which it is safe to revert the iter,
+as we've already passed the memory to do_tcp_sendpages.
+
+Previously this code could return -ENOMEM and we would want to
+revert the iter, but AFAIK this no longer returns ENOMEM after
+a447da7d004 ("tls: fix waitall behavior in tls_sw_recvmsg"),
+so we fail for all error codes.
+
+Reported-by: syzbot+c226690f7b3126c5ee04@syzkaller.appspotmail.com
+Reported-by: syzbot+709f2810a6a05f11d4d3@syzkaller.appspotmail.com
+Signed-off-by: Dave Watson <davejwatson@fb.com>
+Fixes: 3c4d7559159b ("tls: kernel TLS support")
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/tls/tls_sw.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -440,7 +440,7 @@ alloc_encrypted:
+ ret = tls_push_record(sk, msg->msg_flags, record_type);
+ if (!ret)
+ continue;
+- if (ret == -EAGAIN)
++ if (ret < 0)
+ goto send_end;
+
+ copied -= try_to_copy;