--- /dev/null
+From 6815a0b444572527256f0d0efd8efe3ddede6018 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Wed, 4 Oct 2017 15:03:40 +0200
+Subject: ALSA: bcd2000: Add a sanity check for invalid EPs
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit 6815a0b444572527256f0d0efd8efe3ddede6018 upstream.
+
+As syzkaller spotted, currently bcd2000 driver submits a URB with the
+fixed EP without checking whether it's actually available, which may
+result in a kernel warning like:
+ usb 1-1: BOGUS urb xfer, pipe 1 != type 3
+ ------------[ cut here ]------------
+ WARNING: CPU: 0 PID: 1846 at drivers/usb/core/urb.c:449
+ usb_submit_urb+0xf8a/0x11d0
+ Modules linked in:
+ CPU: 0 PID: 1846 Comm: kworker/0:2 Not tainted
+ 4.14.0-rc2-42613-g1488251d1a98 #238
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011
+ Workqueue: usb_hub_wq hub_event
+ Call Trace:
+ bcd2000_init_device sound/usb/bcd2000/bcd2000.c:289
+ bcd2000_init_midi sound/usb/bcd2000/bcd2000.c:345
+ bcd2000_probe+0xe64/0x19e0 sound/usb/bcd2000/bcd2000.c:406
+ usb_probe_interface+0x35d/0x8e0 drivers/usb/core/driver.c:361
+ ....
+
+This patch adds a sanity check of validity of EPs at the device
+initialization phase for avoiding the call with an invalid EP.
+
+Reported-by: Andrey Konovalov <andreyknvl@google.com>
+Tested-by: Andrey Konovalov <andreyknvl@google.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/usb/bcd2000/bcd2000.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/sound/usb/bcd2000/bcd2000.c
++++ b/sound/usb/bcd2000/bcd2000.c
+@@ -342,6 +342,13 @@ static int bcd2000_init_midi(struct bcd2
+ bcd2k->midi_out_buf, BUFSIZE,
+ bcd2000_output_complete, bcd2k, 1);
+
++ /* sanity checks of EPs before actually submitting */
++ if (usb_urb_ep_type_check(bcd2k->midi_in_urb) ||
++ usb_urb_ep_type_check(bcd2k->midi_out_urb)) {
++ dev_err(&bcd2k->dev->dev, "invalid MIDI EP\n");
++ return -EINVAL;
++ }
++
+ bcd2000_init_device(bcd2k);
+
+ return 0;
--- /dev/null
+From 58fc7f73a85d45a47057dad2af53502fdf6cf778 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Wed, 4 Oct 2017 15:07:21 +0200
+Subject: ALSA: caiaq: Add a sanity check for invalid EPs
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit 58fc7f73a85d45a47057dad2af53502fdf6cf778 upstream.
+
+As syzkaller spotted, currently caiaq driver submits a URB with the
+fixed EP without checking whether it's actually available, which may
+result in a kernel warning like:
+ usb 1-1: BOGUS urb xfer, pipe 3 != type 1
+ ------------[ cut here ]------------
+ WARNING: CPU: 1 PID: 1150 at drivers/usb/core/urb.c:449
+ usb_submit_urb+0xf8a/0x11d0
+ Modules linked in:
+ CPU: 1 PID: 1150 Comm: kworker/1:1 Not tainted
+ 4.14.0-rc2-42660-g24b7bd59eec0 #277
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011
+ Workqueue: usb_hub_wq hub_event
+ Call Trace:
+ init_card sound/usb/caiaq/device.c:467
+ snd_probe+0x81c/0x1150 sound/usb/caiaq/device.c:525
+ usb_probe_interface+0x35d/0x8e0 drivers/usb/core/driver.c:361
+ ....
+
+This patch adds a sanity check of validity of EPs at the device
+initialization phase for avoiding the call with an invalid EP.
+
+Reported-by: Andrey Konovalov <andreyknvl@google.com>
+Tested-by: Andrey Konovalov <andreyknvl@google.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/usb/caiaq/device.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/sound/usb/caiaq/device.c
++++ b/sound/usb/caiaq/device.c
+@@ -461,6 +461,13 @@ static int init_card(struct snd_usb_caia
+ cdev->midi_out_buf, EP1_BUFSIZE,
+ snd_usb_caiaq_midi_output_done, cdev);
+
++ /* sanity checks of EPs before actually submitting */
++ if (usb_urb_ep_type_check(&cdev->ep1_in_urb) ||
++ usb_urb_ep_type_check(&cdev->midi_out_urb)) {
++ dev_err(dev, "invalid EPs\n");
++ return -EINVAL;
++ }
++
+ init_waitqueue_head(&cdev->ep1_wait_queue);
+ init_waitqueue_head(&cdev->prepare_wait_queue);
+
--- /dev/null
+From 2a4340c57717162c6bf07a0860d05711d4de994b Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Wed, 4 Oct 2017 15:09:24 +0200
+Subject: ALSA: line6: Add a sanity check for invalid EPs
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit 2a4340c57717162c6bf07a0860d05711d4de994b upstream.
+
+As syzkaller spotted, currently line6 drivers submit a URB with the
+fixed EP without checking whether it's actually available, which may
+result in a kernel warning like:
+ usb 1-1: BOGUS urb xfer, pipe 3 != type 1
+ ------------[ cut here ]------------
+ WARNING: CPU: 0 PID: 24 at drivers/usb/core/urb.c:449
+ usb_submit_urb+0xf8a/0x11d0
+ Modules linked in:
+ CPU: 0 PID: 24 Comm: kworker/0:1 Not tainted 4.14.0-rc2-42613-g1488251d1a98 #238
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011
+ Workqueue: usb_hub_wq hub_event
+ Call Trace:
+ line6_start_listen+0x55f/0x9e0 sound/usb/line6/driver.c:82
+ line6_init_cap_control sound/usb/line6/driver.c:690
+ line6_probe+0x7c9/0x1310 sound/usb/line6/driver.c:764
+ podhd_probe+0x64/0x70 sound/usb/line6/podhd.c:474
+ usb_probe_interface+0x35d/0x8e0 drivers/usb/core/driver.c:361
+ ....
+
+This patch adds a sanity check of validity of EPs at the device
+initialization phase for avoiding the call with an invalid EP.
+
+Reported-by: Andrey Konovalov <andreyknvl@google.com>
+Tested-by: Andrey Konovalov <andreyknvl@google.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/usb/line6/driver.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/sound/usb/line6/driver.c
++++ b/sound/usb/line6/driver.c
+@@ -78,6 +78,13 @@ static int line6_start_listen(struct usb
+ line6->buffer_listen, LINE6_BUFSIZE_LISTEN,
+ line6_data_received, line6);
+ }
++
++ /* sanity checks of EP before actually submitting */
++ if (usb_urb_ep_type_check(line6->urb_listen)) {
++ dev_err(line6->ifcdev, "invalid control EP\n");
++ return -EINVAL;
++ }
++
+ line6->urb_listen->actual_length = 0;
+ err = usb_submit_urb(line6->urb_listen, GFP_ATOMIC);
+ return err;
--- /dev/null
+From 69e0927b3774563c19b5fb32e91d75edc147fb62 Mon Sep 17 00:00:00 2001
+From: Douglas Gilbert <dgilbert@interlog.com>
+Date: Sun, 14 Jan 2018 17:00:48 -0500
+Subject: blk_rq_map_user_iov: fix error override
+
+From: Douglas Gilbert <dgilbert@interlog.com>
+
+commit 69e0927b3774563c19b5fb32e91d75edc147fb62 upstream.
+
+During stress tests by syzkaller on the sg driver the block layer
+infrequently returns EINVAL. Closer inspection shows the block
+layer was trying to return ENOMEM (which is much more
+understandable) but for some reason overroad that useful error.
+
+Patch below does not show this (unchanged) line:
+ ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy);
+That 'ret' was being overridden when that function failed.
+
+Signed-off-by: Douglas Gilbert <dgilbert@interlog.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/blk-map.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/block/blk-map.c
++++ b/block/blk-map.c
+@@ -126,7 +126,7 @@ int blk_rq_map_user_iov(struct request_q
+ unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
+ struct bio *bio = NULL;
+ struct iov_iter i;
+- int ret;
++ int ret = -EINVAL;
+
+ if (!iter_is_iovec(iter))
+ goto fail;
+@@ -155,7 +155,7 @@ unmap_rq:
+ __blk_rq_unmap_user(bio);
+ fail:
+ rq->bio = NULL;
+- return -EINVAL;
++ return ret;
+ }
+ EXPORT_SYMBOL(blk_rq_map_user_iov);
+
--- /dev/null
+From a6da0024ffc19e0d47712bb5ca4fd083f76b07df Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Sun, 5 Nov 2017 09:16:09 -0700
+Subject: blktrace: fix unlocked registration of tracepoints
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit a6da0024ffc19e0d47712bb5ca4fd083f76b07df upstream.
+
+We need to ensure that tracepoints are registered and unregistered
+with the users of them. The existing atomic count isn't enough for
+that. Add a lock around the tracepoints, so we serialize access
+to them.
+
+This fixes cases where we have multiple users setting up and
+tearing down tracepoints, like this:
+
+CPU: 0 PID: 2995 Comm: syzkaller857118 Not tainted
+4.14.0-rc5-next-20171018+ #36
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS
+Google 01/01/2011
+Call Trace:
+ __dump_stack lib/dump_stack.c:16 [inline]
+ dump_stack+0x194/0x257 lib/dump_stack.c:52
+ panic+0x1e4/0x41c kernel/panic.c:183
+ __warn+0x1c4/0x1e0 kernel/panic.c:546
+ report_bug+0x211/0x2d0 lib/bug.c:183
+ fixup_bug+0x40/0x90 arch/x86/kernel/traps.c:177
+ do_trap_no_signal arch/x86/kernel/traps.c:211 [inline]
+ do_trap+0x260/0x390 arch/x86/kernel/traps.c:260
+ do_error_trap+0x120/0x390 arch/x86/kernel/traps.c:297
+ do_invalid_op+0x1b/0x20 arch/x86/kernel/traps.c:310
+ invalid_op+0x18/0x20 arch/x86/entry/entry_64.S:905
+RIP: 0010:tracepoint_add_func kernel/tracepoint.c:210 [inline]
+RIP: 0010:tracepoint_probe_register_prio+0x397/0x9a0 kernel/tracepoint.c:283
+RSP: 0018:ffff8801d1d1f6c0 EFLAGS: 00010293
+RAX: ffff8801d22e8540 RBX: 00000000ffffffef RCX: ffffffff81710f07
+RDX: 0000000000000000 RSI: ffffffff85b679c0 RDI: ffff8801d5f19818
+RBP: ffff8801d1d1f7c8 R08: ffffffff81710c10 R09: 0000000000000004
+R10: ffff8801d1d1f6b0 R11: 0000000000000003 R12: ffffffff817597f0
+R13: 0000000000000000 R14: 00000000ffffffff R15: ffff8801d1d1f7a0
+ tracepoint_probe_register+0x2a/0x40 kernel/tracepoint.c:304
+ register_trace_block_rq_insert include/trace/events/block.h:191 [inline]
+ blk_register_tracepoints+0x1e/0x2f0 kernel/trace/blktrace.c:1043
+ do_blk_trace_setup+0xa10/0xcf0 kernel/trace/blktrace.c:542
+ blk_trace_setup+0xbd/0x180 kernel/trace/blktrace.c:564
+ sg_ioctl+0xc71/0x2d90 drivers/scsi/sg.c:1089
+ vfs_ioctl fs/ioctl.c:45 [inline]
+ do_vfs_ioctl+0x1b1/0x1520 fs/ioctl.c:685
+ SYSC_ioctl fs/ioctl.c:700 [inline]
+ SyS_ioctl+0x8f/0xc0 fs/ioctl.c:691
+ entry_SYSCALL_64_fastpath+0x1f/0xbe
+RIP: 0033:0x444339
+RSP: 002b:00007ffe05bb5b18 EFLAGS: 00000206 ORIG_RAX: 0000000000000010
+RAX: ffffffffffffffda RBX: 00000000006d66c0 RCX: 0000000000444339
+RDX: 000000002084cf90 RSI: 00000000c0481273 RDI: 0000000000000009
+RBP: 0000000000000082 R08: 0000000000000000 R09: 0000000000000000
+R10: 0000000000000000 R11: 0000000000000206 R12: ffffffffffffffff
+R13: 00000000c0481273 R14: 0000000000000000 R15: 0000000000000000
+
+since we can now run these in parallel. Ensure that the exported helpers
+for doing this are grabbing the queue trace mutex.
+
+Reported-by: Steven Rostedt <rostedt@goodmis.org>
+Tested-by: Dmitry Vyukov <dvyukov@google.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/blktrace.c | 32 ++++++++++++++++++++++----------
+ 1 file changed, 22 insertions(+), 10 deletions(-)
+
+--- a/kernel/trace/blktrace.c
++++ b/kernel/trace/blktrace.c
+@@ -66,7 +66,8 @@ static struct tracer_flags blk_tracer_fl
+ };
+
+ /* Global reference count of probes */
+-static atomic_t blk_probes_ref = ATOMIC_INIT(0);
++static DEFINE_MUTEX(blk_probe_mutex);
++static int blk_probes_ref;
+
+ static void blk_register_tracepoints(void);
+ static void blk_unregister_tracepoints(void);
+@@ -329,11 +330,26 @@ static void blk_trace_free(struct blk_tr
+ kfree(bt);
+ }
+
++static void get_probe_ref(void)
++{
++ mutex_lock(&blk_probe_mutex);
++ if (++blk_probes_ref == 1)
++ blk_register_tracepoints();
++ mutex_unlock(&blk_probe_mutex);
++}
++
++static void put_probe_ref(void)
++{
++ mutex_lock(&blk_probe_mutex);
++ if (!--blk_probes_ref)
++ blk_unregister_tracepoints();
++ mutex_unlock(&blk_probe_mutex);
++}
++
+ static void blk_trace_cleanup(struct blk_trace *bt)
+ {
+ blk_trace_free(bt);
+- if (atomic_dec_and_test(&blk_probes_ref))
+- blk_unregister_tracepoints();
++ put_probe_ref();
+ }
+
+ int blk_trace_remove(struct request_queue *q)
+@@ -538,8 +554,7 @@ static int do_blk_trace_setup(struct req
+ if (cmpxchg(&q->blk_trace, NULL, bt))
+ goto err;
+
+- if (atomic_inc_return(&blk_probes_ref) == 1)
+- blk_register_tracepoints();
++ get_probe_ref();
+
+ ret = 0;
+ err:
+@@ -1558,9 +1573,7 @@ static int blk_trace_remove_queue(struct
+ if (bt == NULL)
+ return -EINVAL;
+
+- if (atomic_dec_and_test(&blk_probes_ref))
+- blk_unregister_tracepoints();
+-
++ put_probe_ref();
+ blk_trace_free(bt);
+ return 0;
+ }
+@@ -1591,8 +1604,7 @@ static int blk_trace_setup_queue(struct
+ if (cmpxchg(&q->blk_trace, NULL, bt))
+ goto free_bt;
+
+- if (atomic_inc_return(&blk_probes_ref) == 1)
+- blk_register_tracepoints();
++ get_probe_ref();
+ return 0;
+
+ free_bt:
--- /dev/null
+From 6f16101e6a8b4324c36e58a29d9e0dbb287cdedb Mon Sep 17 00:00:00 2001
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Thu, 18 Jan 2018 01:15:21 +0100
+Subject: bpf: mark dst unknown on inconsistent {s, u}bounds adjustments
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+commit 6f16101e6a8b4324c36e58a29d9e0dbb287cdedb upstream.
+
+syzkaller generated a BPF proglet and triggered a warning with
+the following:
+
+ 0: (b7) r0 = 0
+ 1: (d5) if r0 s<= 0x0 goto pc+0
+ R0=inv0 R1=ctx(id=0,off=0,imm=0) R10=fp0
+ 2: (1f) r0 -= r1
+ R0=inv0 R1=ctx(id=0,off=0,imm=0) R10=fp0
+ verifier internal error: known but bad sbounds
+
+What happens is that in the first insn, r0's min/max value
+are both 0 due to the immediate assignment, later in the jsle
+test the bounds are updated for the min value in the false
+path, meaning, they yield smin_val = 1, smax_val = 0, and when
+ctx pointer is subtracted from r0, verifier bails out with the
+internal error and throwing a WARN since smin_val != smax_val
+for the known constant.
+
+For min_val > max_val scenario it means that reg_set_min_max()
+and reg_set_min_max_inv() (which both refine existing bounds)
+demonstrated that such branch cannot be taken at runtime.
+
+In above scenario for the case where it will be taken, the
+existing [0, 0] bounds are kept intact. Meaning, the rejection
+is not due to a verifier internal error, and therefore the
+WARN() is not necessary either.
+
+We could just reject such cases in adjust_{ptr,scalar}_min_max_vals()
+when either known scalars have smin_val != smax_val or
+umin_val != umax_val or any scalar reg with bounds
+smin_val > smax_val or umin_val > umax_val. However, there
+may be a small risk of breakage of buggy programs, so handle
+this more gracefully and in adjust_{ptr,scalar}_min_max_vals()
+just taint the dst reg as unknown scalar when we see ops with
+such kind of src reg.
+
+Reported-by: syzbot+6d362cadd45dc0a12ba4@syzkaller.appspotmail.com
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/bpf/verifier.c | 25 +++--
+ tools/testing/selftests/bpf/test_verifier.c | 123 +++++++++++++++++++++++++++-
+ 2 files changed, 138 insertions(+), 10 deletions(-)
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -1865,15 +1865,13 @@ static int adjust_ptr_min_max_vals(struc
+
+ dst_reg = ®s[dst];
+
+- if (WARN_ON_ONCE(known && (smin_val != smax_val))) {
+- print_verifier_state(&env->cur_state);
+- verbose("verifier internal error: known but bad sbounds\n");
+- return -EINVAL;
+- }
+- if (WARN_ON_ONCE(known && (umin_val != umax_val))) {
+- print_verifier_state(&env->cur_state);
+- verbose("verifier internal error: known but bad ubounds\n");
+- return -EINVAL;
++ if ((known && (smin_val != smax_val || umin_val != umax_val)) ||
++ smin_val > smax_val || umin_val > umax_val) {
++ /* Taint dst register if offset had invalid bounds derived from
++ * e.g. dead branches.
++ */
++ __mark_reg_unknown(dst_reg);
++ return 0;
+ }
+
+ if (BPF_CLASS(insn->code) != BPF_ALU64) {
+@@ -2075,6 +2073,15 @@ static int adjust_scalar_min_max_vals(st
+ src_known = tnum_is_const(src_reg.var_off);
+ dst_known = tnum_is_const(dst_reg->var_off);
+
++ if ((src_known && (smin_val != smax_val || umin_val != umax_val)) ||
++ smin_val > smax_val || umin_val > umax_val) {
++ /* Taint dst register if offset had invalid bounds derived from
++ * e.g. dead branches.
++ */
++ __mark_reg_unknown(dst_reg);
++ return 0;
++ }
++
+ if (!src_known &&
+ opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
+ __mark_reg_unknown(dst_reg);
+--- a/tools/testing/selftests/bpf/test_verifier.c
++++ b/tools/testing/selftests/bpf/test_verifier.c
+@@ -6534,7 +6534,7 @@ static struct bpf_test tests[] = {
+ BPF_JMP_IMM(BPF_JA, 0, 0, -7),
+ },
+ .fixup_map1 = { 4 },
+- .errstr = "unbounded min value",
++ .errstr = "R0 invalid mem access 'inv'",
+ .result = REJECT,
+ },
+ {
+@@ -7715,6 +7715,127 @@ static struct bpf_test tests[] = {
+ .prog_type = BPF_PROG_TYPE_XDP,
+ },
+ {
++ "check deducing bounds from const, 1",
++ .insns = {
++ BPF_MOV64_IMM(BPF_REG_0, 1),
++ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
++ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
++ BPF_EXIT_INSN(),
++ },
++ .result = REJECT,
++ .errstr = "R0 tried to subtract pointer from scalar",
++ },
++ {
++ "check deducing bounds from const, 2",
++ .insns = {
++ BPF_MOV64_IMM(BPF_REG_0, 1),
++ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
++ BPF_EXIT_INSN(),
++ BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
++ BPF_EXIT_INSN(),
++ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
++ BPF_EXIT_INSN(),
++ },
++ .result = ACCEPT,
++ },
++ {
++ "check deducing bounds from const, 3",
++ .insns = {
++ BPF_MOV64_IMM(BPF_REG_0, 0),
++ BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
++ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
++ BPF_EXIT_INSN(),
++ },
++ .result = REJECT,
++ .errstr = "R0 tried to subtract pointer from scalar",
++ },
++ {
++ "check deducing bounds from const, 4",
++ .insns = {
++ BPF_MOV64_IMM(BPF_REG_0, 0),
++ BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
++ BPF_EXIT_INSN(),
++ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
++ BPF_EXIT_INSN(),
++ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
++ BPF_EXIT_INSN(),
++ },
++ .result = ACCEPT,
++ },
++ {
++ "check deducing bounds from const, 5",
++ .insns = {
++ BPF_MOV64_IMM(BPF_REG_0, 0),
++ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
++ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
++ BPF_EXIT_INSN(),
++ },
++ .result = REJECT,
++ .errstr = "R0 tried to subtract pointer from scalar",
++ },
++ {
++ "check deducing bounds from const, 6",
++ .insns = {
++ BPF_MOV64_IMM(BPF_REG_0, 0),
++ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
++ BPF_EXIT_INSN(),
++ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
++ BPF_EXIT_INSN(),
++ },
++ .result = REJECT,
++ .errstr = "R0 tried to subtract pointer from scalar",
++ },
++ {
++ "check deducing bounds from const, 7",
++ .insns = {
++ BPF_MOV64_IMM(BPF_REG_0, ~0),
++ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
++ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
++ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
++ offsetof(struct __sk_buff, mark)),
++ BPF_EXIT_INSN(),
++ },
++ .result = REJECT,
++ .errstr = "dereference of modified ctx ptr",
++ },
++ {
++ "check deducing bounds from const, 8",
++ .insns = {
++ BPF_MOV64_IMM(BPF_REG_0, ~0),
++ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
++ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
++ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
++ offsetof(struct __sk_buff, mark)),
++ BPF_EXIT_INSN(),
++ },
++ .result = REJECT,
++ .errstr = "dereference of modified ctx ptr",
++ },
++ {
++ "check deducing bounds from const, 9",
++ .insns = {
++ BPF_MOV64_IMM(BPF_REG_0, 0),
++ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
++ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
++ BPF_EXIT_INSN(),
++ },
++ .result = REJECT,
++ .errstr = "R0 tried to subtract pointer from scalar",
++ },
++ {
++ "check deducing bounds from const, 10",
++ .insns = {
++ BPF_MOV64_IMM(BPF_REG_0, 0),
++ BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
++ /* Marks reg as unknown. */
++ BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
++ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
++ BPF_EXIT_INSN(),
++ },
++ .result = REJECT,
++ .errstr = "math between ctx pointer and register with unbounded min value is not allowed",
++ },
++ {
+ "XDP pkt read, pkt_end <= pkt_data', bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
--- /dev/null
+From 59b179b48ce2a6076448a44531242ac2b3f6cef2 Mon Sep 17 00:00:00 2001
+From: Johannes Berg <johannes.berg@intel.com>
+Date: Mon, 15 Jan 2018 09:58:27 +0100
+Subject: cfg80211: check dev_set_name() return value
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+commit 59b179b48ce2a6076448a44531242ac2b3f6cef2 upstream.
+
+syzbot reported a warning from rfkill_alloc(), and after a while
+I think that the reason is that it was doing fault injection and
+the dev_set_name() failed, leaving the name NULL, and we didn't
+check the return value and got to rfkill_alloc() with a NULL name.
+Since we really don't want a NULL name, we ought to check the
+return value.
+
+Fixes: fb28ad35906a ("net: struct device - replace bus_id with dev_name(), dev_set_name()")
+Reported-by: syzbot+1ddfb3357e1d7bb5b5d3@syzkaller.appspotmail.com
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/wireless/core.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -439,6 +439,8 @@ struct wiphy *wiphy_new_nm(const struct
+ if (rv)
+ goto use_default_name;
+ } else {
++ int rv;
++
+ use_default_name:
+ /* NOTE: This is *probably* safe w/out holding rtnl because of
+ * the restrictions on phy names. Probably this call could
+@@ -446,7 +448,11 @@ use_default_name:
+ * phyX. But, might should add some locking and check return
+ * value, and use a different name if this one exists?
+ */
+- dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx);
++ rv = dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx);
++ if (rv < 0) {
++ kfree(rdev);
++ return NULL;
++ }
+ }
+
+ INIT_LIST_HEAD(&rdev->wiphy.wdev_list);
--- /dev/null
+From d8c7fe9f2a486a6e5f0d5229ca43807af5ab22c6 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Mon, 18 Dec 2017 16:40:26 -0800
+Subject: crypto: x86/twofish-3way - Fix %rbp usage
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit d8c7fe9f2a486a6e5f0d5229ca43807af5ab22c6 upstream.
+
+Using %rbp as a temporary register breaks frame pointer convention and
+breaks stack traces when unwinding from an interrupt in the crypto code.
+
+In twofish-3way, we can't simply replace %rbp with another register
+because there are none available. Instead, we use the stack to hold the
+values that %rbp, %r11, and %r12 were holding previously. Each of these
+values represents the half of the output from the previous Feistel round
+that is being passed on unchanged to the following round. They are only
+used once per round, when they are exchanged with %rax, %rbx, and %rcx.
+
+As a result, we free up 3 registers (one per block) and can reassign
+them so that %rbp is not used, and additionally %r14 and %r15 are not
+used so they do not need to be saved/restored.
+
+There may be a small overhead caused by replacing 'xchg REG, REG' with
+the needed sequence 'mov MEM, REG; mov REG, MEM; mov REG, REG' once per
+round. But, counterintuitively, when I tested "ctr-twofish-3way" on a
+Haswell processor, the new version was actually about 2% faster.
+(Perhaps 'xchg' is not as well optimized as plain moves.)
+
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/crypto/twofish-x86_64-asm_64-3way.S | 118 ++++++++++++++-------------
+ 1 file changed, 63 insertions(+), 55 deletions(-)
+
+--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
++++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
+@@ -55,29 +55,31 @@
+ #define RAB1bl %bl
+ #define RAB2bl %cl
+
++#define CD0 0x0(%rsp)
++#define CD1 0x8(%rsp)
++#define CD2 0x10(%rsp)
++
++# used only before/after all rounds
+ #define RCD0 %r8
+ #define RCD1 %r9
+ #define RCD2 %r10
+
+-#define RCD0d %r8d
+-#define RCD1d %r9d
+-#define RCD2d %r10d
+-
+-#define RX0 %rbp
+-#define RX1 %r11
+-#define RX2 %r12
+-
+-#define RX0d %ebp
+-#define RX1d %r11d
+-#define RX2d %r12d
+-
+-#define RY0 %r13
+-#define RY1 %r14
+-#define RY2 %r15
+-
+-#define RY0d %r13d
+-#define RY1d %r14d
+-#define RY2d %r15d
++# used only during rounds
++#define RX0 %r8
++#define RX1 %r9
++#define RX2 %r10
++
++#define RX0d %r8d
++#define RX1d %r9d
++#define RX2d %r10d
++
++#define RY0 %r11
++#define RY1 %r12
++#define RY2 %r13
++
++#define RY0d %r11d
++#define RY1d %r12d
++#define RY2d %r13d
+
+ #define RT0 %rdx
+ #define RT1 %rsi
+@@ -85,6 +87,8 @@
+ #define RT0d %edx
+ #define RT1d %esi
+
++#define RT1bl %sil
++
+ #define do16bit_ror(rot, op1, op2, T0, T1, tmp1, tmp2, ab, dst) \
+ movzbl ab ## bl, tmp2 ## d; \
+ movzbl ab ## bh, tmp1 ## d; \
+@@ -92,6 +96,11 @@
+ op1##l T0(CTX, tmp2, 4), dst ## d; \
+ op2##l T1(CTX, tmp1, 4), dst ## d;
+
++#define swap_ab_with_cd(ab, cd, tmp) \
++ movq cd, tmp; \
++ movq ab, cd; \
++ movq tmp, ab;
++
+ /*
+ * Combined G1 & G2 function. Reordered with help of rotates to have moves
+ * at begining.
+@@ -110,15 +119,15 @@
+ /* G1,2 && G2,2 */ \
+ do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 0, x ## 0); \
+ do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 0, y ## 0); \
+- xchgq cd ## 0, ab ## 0; \
++ swap_ab_with_cd(ab ## 0, cd ## 0, RT0); \
+ \
+ do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 1, x ## 1); \
+ do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 1, y ## 1); \
+- xchgq cd ## 1, ab ## 1; \
++ swap_ab_with_cd(ab ## 1, cd ## 1, RT0); \
+ \
+ do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 2, x ## 2); \
+ do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 2, y ## 2); \
+- xchgq cd ## 2, ab ## 2;
++ swap_ab_with_cd(ab ## 2, cd ## 2, RT0);
+
+ #define enc_round_end(ab, x, y, n) \
+ addl y ## d, x ## d; \
+@@ -168,6 +177,16 @@
+ decrypt_round3(ba, dc, (n*2)+1); \
+ decrypt_round3(ba, dc, (n*2));
+
++#define push_cd() \
++ pushq RCD2; \
++ pushq RCD1; \
++ pushq RCD0;
++
++#define pop_cd() \
++ popq RCD0; \
++ popq RCD1; \
++ popq RCD2;
++
+ #define inpack3(in, n, xy, m) \
+ movq 4*(n)(in), xy ## 0; \
+ xorq w+4*m(CTX), xy ## 0; \
+@@ -223,11 +242,8 @@ ENTRY(__twofish_enc_blk_3way)
+ * %rdx: src, RIO
+ * %rcx: bool, if true: xor output
+ */
+- pushq %r15;
+- pushq %r14;
+ pushq %r13;
+ pushq %r12;
+- pushq %rbp;
+ pushq %rbx;
+
+ pushq %rcx; /* bool xor */
+@@ -235,40 +251,36 @@ ENTRY(__twofish_enc_blk_3way)
+
+ inpack_enc3();
+
+- encrypt_cycle3(RAB, RCD, 0);
+- encrypt_cycle3(RAB, RCD, 1);
+- encrypt_cycle3(RAB, RCD, 2);
+- encrypt_cycle3(RAB, RCD, 3);
+- encrypt_cycle3(RAB, RCD, 4);
+- encrypt_cycle3(RAB, RCD, 5);
+- encrypt_cycle3(RAB, RCD, 6);
+- encrypt_cycle3(RAB, RCD, 7);
++ push_cd();
++ encrypt_cycle3(RAB, CD, 0);
++ encrypt_cycle3(RAB, CD, 1);
++ encrypt_cycle3(RAB, CD, 2);
++ encrypt_cycle3(RAB, CD, 3);
++ encrypt_cycle3(RAB, CD, 4);
++ encrypt_cycle3(RAB, CD, 5);
++ encrypt_cycle3(RAB, CD, 6);
++ encrypt_cycle3(RAB, CD, 7);
++ pop_cd();
+
+ popq RIO; /* dst */
+- popq %rbp; /* bool xor */
++ popq RT1; /* bool xor */
+
+- testb %bpl, %bpl;
++ testb RT1bl, RT1bl;
+ jnz .L__enc_xor3;
+
+ outunpack_enc3(mov);
+
+ popq %rbx;
+- popq %rbp;
+ popq %r12;
+ popq %r13;
+- popq %r14;
+- popq %r15;
+ ret;
+
+ .L__enc_xor3:
+ outunpack_enc3(xor);
+
+ popq %rbx;
+- popq %rbp;
+ popq %r12;
+ popq %r13;
+- popq %r14;
+- popq %r15;
+ ret;
+ ENDPROC(__twofish_enc_blk_3way)
+
+@@ -278,35 +290,31 @@ ENTRY(twofish_dec_blk_3way)
+ * %rsi: dst
+ * %rdx: src, RIO
+ */
+- pushq %r15;
+- pushq %r14;
+ pushq %r13;
+ pushq %r12;
+- pushq %rbp;
+ pushq %rbx;
+
+ pushq %rsi; /* dst */
+
+ inpack_dec3();
+
+- decrypt_cycle3(RAB, RCD, 7);
+- decrypt_cycle3(RAB, RCD, 6);
+- decrypt_cycle3(RAB, RCD, 5);
+- decrypt_cycle3(RAB, RCD, 4);
+- decrypt_cycle3(RAB, RCD, 3);
+- decrypt_cycle3(RAB, RCD, 2);
+- decrypt_cycle3(RAB, RCD, 1);
+- decrypt_cycle3(RAB, RCD, 0);
++ push_cd();
++ decrypt_cycle3(RAB, CD, 7);
++ decrypt_cycle3(RAB, CD, 6);
++ decrypt_cycle3(RAB, CD, 5);
++ decrypt_cycle3(RAB, CD, 4);
++ decrypt_cycle3(RAB, CD, 3);
++ decrypt_cycle3(RAB, CD, 2);
++ decrypt_cycle3(RAB, CD, 1);
++ decrypt_cycle3(RAB, CD, 0);
++ pop_cd();
+
+ popq RIO; /* dst */
+
+ outunpack_dec3();
+
+ popq %rbx;
+- popq %rbp;
+ popq %r12;
+ popq %r13;
+- popq %r14;
+- popq %r15;
+ ret;
+ ENDPROC(twofish_dec_blk_3way)
--- /dev/null
+From b3a0066005821acdc0cdb092cb72587182ab583f Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Tue, 31 Oct 2017 09:53:28 +0100
+Subject: dnotify: Handle errors from fsnotify_add_mark_locked() in fcntl_dirnotify()
+
+From: Jan Kara <jack@suse.cz>
+
+commit b3a0066005821acdc0cdb092cb72587182ab583f upstream.
+
+fsnotify_add_mark_locked() can fail but we do not check its return
+value. This didn't matter before commit 9dd813c15b2c "fsnotify: Move
+mark list head from object into dedicated structure" as none of possible
+failures could happen for dnotify but after that commit -ENOMEM can be
+returned. Handle this error properly in fcntl_dirnotify() as
+otherwise we just hit BUG_ON(dn_mark->dn) in dnotify_free_mark().
+
+Reviewed-by: Amir Goldstein <amir73il@gmail.com>
+Reported-by: syzkaller
+Fixes: 9dd813c15b2c101168808d4f5941a29985758973
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/notify/dnotify/dnotify.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/fs/notify/dnotify/dnotify.c
++++ b/fs/notify/dnotify/dnotify.c
+@@ -319,7 +319,11 @@ int fcntl_dirnotify(int fd, struct file
+ dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark);
+ spin_lock(&fsn_mark->lock);
+ } else {
+- fsnotify_add_mark_locked(new_fsn_mark, inode, NULL, 0);
++ error = fsnotify_add_mark_locked(new_fsn_mark, inode, NULL, 0);
++ if (error) {
++ mutex_unlock(&dnotify_group->mark_mutex);
++ goto out_err;
++ }
+ spin_lock(&new_fsn_mark->lock);
+ fsn_mark = new_fsn_mark;
+ dn_mark = new_dn_mark;
+@@ -345,6 +349,7 @@ int fcntl_dirnotify(int fd, struct file
+ */
+ if (dn_mark == new_dn_mark)
+ destroy = 1;
++ error = 0;
+ goto out;
+ }
+
--- /dev/null
+From d18d1a5ac811d12f7ebc1129230312b5f2c50cb8 Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Tue, 31 Oct 2017 11:55:35 +0000
+Subject: drm: Require __GFP_NOFAIL for the legacy drm_modeset_lock_all
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+commit d18d1a5ac811d12f7ebc1129230312b5f2c50cb8 upstream.
+
+To acquire all modeset locks requires a ww_ctx to be allocated. As this
+is the legacy path and the allocation small, to reduce the changes
+required (and complex untested error handling) to the legacy drivers, we
+simply assume that the allocation succeeds. At present, it relies on the
+too-small-to-fail rule, but syzbot found that by injecting a failure
+here we would hit the WARN. Document that this allocation must succeed
+with __GFP_NOFAIL.
+
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20171031115535.15166-1-chris@chris-wilson.co.uk
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/drm_modeset_lock.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/drm_modeset_lock.c
++++ b/drivers/gpu/drm/drm_modeset_lock.c
+@@ -88,7 +88,7 @@ void drm_modeset_lock_all(struct drm_dev
+ struct drm_modeset_acquire_ctx *ctx;
+ int ret;
+
+- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
++ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL | __GFP_NOFAIL);
+ if (WARN_ON(!ctx))
+ return;
+
--- /dev/null
+From 374d1b5a81f7f9cc5e7f095ac3d5aff3f6600376 Mon Sep 17 00:00:00 2001
+From: Steffen Klassert <steffen.klassert@secunet.com>
+Date: Fri, 5 Jan 2018 08:35:47 +0100
+Subject: esp: Fix GRO when the headers not fully in the linear part of the skb.
+
+From: Steffen Klassert <steffen.klassert@secunet.com>
+
+commit 374d1b5a81f7f9cc5e7f095ac3d5aff3f6600376 upstream.
+
+The GRO layer does not necessarily pull the complete headers
+into the linear part of the skb, a part may remain on the
+first page fragment. This can lead to a crash if we try to
+pull the headers, so make sure we have them on the linear
+part before pulling.
+
+Fixes: 7785bba299a8 ("esp: Add a software GRO codepath")
+Reported-by: syzbot+82bbd65569c49c6c0c4d@syzkaller.appspotmail.com
+Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/ipv4/esp4_offload.c | 3 ++-
+ net/ipv6/esp6_offload.c | 3 ++-
+ 2 files changed, 4 insertions(+), 2 deletions(-)
+
+--- a/net/ipv4/esp4_offload.c
++++ b/net/ipv4/esp4_offload.c
+@@ -38,7 +38,8 @@ static struct sk_buff **esp4_gro_receive
+ __be32 spi;
+ int err;
+
+- skb_pull(skb, offset);
++ if (!pskb_pull(skb, offset))
++ return NULL;
+
+ if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
+ goto out;
+--- a/net/ipv6/esp6_offload.c
++++ b/net/ipv6/esp6_offload.c
+@@ -60,7 +60,8 @@ static struct sk_buff **esp6_gro_receive
+ int nhoff;
+ int err;
+
+- skb_pull(skb, offset);
++ if (!pskb_pull(skb, offset))
++ return NULL;
+
+ if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
+ goto out;
--- /dev/null
+From e5571240236c5652f3e079b1d5866716a7ad819c Mon Sep 17 00:00:00 2001
+From: Tom Herbert <tom@quantonium.net>
+Date: Wed, 24 Jan 2018 12:35:41 -0800
+Subject: kcm: Check if sk_user_data already set in kcm_attach
+
+From: Tom Herbert <tom@quantonium.net>
+
+commit e5571240236c5652f3e079b1d5866716a7ad819c upstream.
+
+This is needed to prevent sk_user_data being overwritten.
+The check is done under the callback lock. This should prevent
+a socket from being attached twice to a KCM mux. It also prevents
+a socket from being attached for other use cases of sk_user_data
+as long as the other cases set sk_user_data under the lock.
+Followup work is needed to unify all the use cases of sk_user_data
+to use the same locking.
+
+Reported-by: syzbot+114b15f2be420a8886c3@syzkaller.appspotmail.com
+Fixes: ab7ac4eb9832 ("kcm: Kernel Connection Multiplexor module")
+Signed-off-by: Tom Herbert <tom@quantonium.net>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/kcm/kcmsock.c | 16 ++++++++++++++--
+ 1 file changed, 14 insertions(+), 2 deletions(-)
+
+--- a/net/kcm/kcmsock.c
++++ b/net/kcm/kcmsock.c
+@@ -1405,9 +1405,18 @@ static int kcm_attach(struct socket *soc
+ return err;
+ }
+
+- sock_hold(csk);
+-
+ write_lock_bh(&csk->sk_callback_lock);
++
++ /* Check if sk_user_data is aready by KCM or someone else.
++ * Must be done under lock to prevent race conditions.
++ */
++ if (csk->sk_user_data) {
++ write_unlock_bh(&csk->sk_callback_lock);
++ strp_done(&psock->strp);
++ kmem_cache_free(kcm_psockp, psock);
++ return -EALREADY;
++ }
++
+ psock->save_data_ready = csk->sk_data_ready;
+ psock->save_write_space = csk->sk_write_space;
+ psock->save_state_change = csk->sk_state_change;
+@@ -1415,8 +1424,11 @@ static int kcm_attach(struct socket *soc
+ csk->sk_data_ready = psock_data_ready;
+ csk->sk_write_space = psock_write_space;
+ csk->sk_state_change = psock_state_change;
++
+ write_unlock_bh(&csk->sk_callback_lock);
+
++ sock_hold(csk);
++
+ /* Finished initialization, now add the psock to the MUX. */
+ spin_lock_bh(&mux->lock);
+ head = &mux->psocks;
--- /dev/null
+From 581e7226a5d43f629eb6399a121f85f6a15f81be Mon Sep 17 00:00:00 2001
+From: Tom Herbert <tom@quantonium.net>
+Date: Wed, 24 Jan 2018 12:35:40 -0800
+Subject: kcm: Only allow TCP sockets to be attached to a KCM mux
+
+From: Tom Herbert <tom@quantonium.net>
+
+commit 581e7226a5d43f629eb6399a121f85f6a15f81be upstream.
+
+TCP sockets for IPv4 and IPv6 that are not listeners or in closed
+stated are allowed to be attached to a KCM mux.
+
+Fixes: ab7ac4eb9832 ("kcm: Kernel Connection Multiplexor module")
+Reported-by: syzbot+8865eaff7f9acd593945@syzkaller.appspotmail.com
+Signed-off-by: Tom Herbert <tom@quantonium.net>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/kcm/kcmsock.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/net/kcm/kcmsock.c
++++ b/net/kcm/kcmsock.c
+@@ -1387,8 +1387,13 @@ static int kcm_attach(struct socket *soc
+ if (!csk)
+ return -EINVAL;
+
+- /* We must prevent loops or risk deadlock ! */
+- if (csk->sk_family == PF_KCM)
++ /* Only allow TCP sockets to be attached for now */
++ if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) ||
++ csk->sk_protocol != IPPROTO_TCP)
++ return -EOPNOTSUPP;
++
++ /* Don't allow listeners or closed sockets */
++ if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE)
+ return -EOPNOTSUPP;
+
+ psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
--- /dev/null
+From a77660d231f8b3d84fd23ed482e0964f7aa546d6 Mon Sep 17 00:00:00 2001
+From: Dmitry Vyukov <dvyukov@google.com>
+Date: Tue, 6 Feb 2018 15:40:28 -0800
+Subject: kcov: detect double association with a single task
+
+From: Dmitry Vyukov <dvyukov@google.com>
+
+commit a77660d231f8b3d84fd23ed482e0964f7aa546d6 upstream.
+
+Currently KCOV_ENABLE does not check if the current task is already
+associated with another kcov descriptor. As the result it is possible
+to associate a single task with more than one kcov descriptor, which
+later leads to a memory leak of the old descriptor. This relation is
+really meant to be one-to-one (task has only one back link).
+
+Extend validation to detect such misuse.
+
+Link: http://lkml.kernel.org/r/20180122082520.15716-1-dvyukov@google.com
+Fixes: 5c9a8750a640 ("kernel: add kcov code coverage")
+Signed-off-by: Dmitry Vyukov <dvyukov@google.com>
+Reported-by: Shankara Pailoor <sp3485@columbia.edu>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Cc: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/kcov.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/kernel/kcov.c
++++ b/kernel/kcov.c
+@@ -225,9 +225,9 @@ static int kcov_ioctl_locked(struct kcov
+ if (unused != 0 || kcov->mode == KCOV_MODE_DISABLED ||
+ kcov->area == NULL)
+ return -EINVAL;
+- if (kcov->t != NULL)
+- return -EBUSY;
+ t = current;
++ if (kcov->t != NULL || t->kcov != NULL)
++ return -EBUSY;
+ /* Cache in task struct for performance. */
+ t->kcov_size = kcov->size;
+ t->kcov_area = kcov->area;
--- /dev/null
+From f29810335965ac1f7bcb501ee2af5f039f792416 Mon Sep 17 00:00:00 2001
+From: Lan Tianyu <tianyu.lan@intel.com>
+Date: Thu, 14 Dec 2017 03:01:52 -0500
+Subject: KVM/x86: Check input paging mode when cs.l is set
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Lan Tianyu <tianyu.lan@intel.com>
+
+commit f29810335965ac1f7bcb501ee2af5f039f792416 upstream.
+
+Reported by syzkaller:
+ WARNING: CPU: 0 PID: 27962 at arch/x86/kvm/emulate.c:5631 x86_emulate_insn+0x557/0x15f0 [kvm]
+ Modules linked in: kvm_intel kvm [last unloaded: kvm]
+ CPU: 0 PID: 27962 Comm: syz-executor Tainted: G B W 4.15.0-rc2-next-20171208+ #32
+ Hardware name: Intel Corporation S1200SP/S1200SP, BIOS S1200SP.86B.01.03.0006.040720161253 04/07/2016
+ RIP: 0010:x86_emulate_insn+0x557/0x15f0 [kvm]
+ RSP: 0018:ffff8807234476d0 EFLAGS: 00010282
+ RAX: 0000000000000000 RBX: ffff88072d0237a0 RCX: ffffffffa0065c4d
+ RDX: 1ffff100e5a046f9 RSI: 0000000000000003 RDI: ffff88072d0237c8
+ RBP: ffff880723447728 R08: ffff88072d020000 R09: ffffffffa008d240
+ R10: 0000000000000002 R11: ffffed00e7d87db3 R12: ffff88072d0237c8
+ R13: ffff88072d023870 R14: ffff88072d0238c2 R15: ffffffffa008d080
+ FS: 00007f8a68666700(0000) GS:ffff880802200000(0000) knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 000000002009506c CR3: 000000071fec4005 CR4: 00000000003626f0
+ Call Trace:
+ x86_emulate_instruction+0x3bc/0xb70 [kvm]
+ ? reexecute_instruction.part.162+0x130/0x130 [kvm]
+ vmx_handle_exit+0x46d/0x14f0 [kvm_intel]
+ ? trace_event_raw_event_kvm_entry+0xe7/0x150 [kvm]
+ ? handle_vmfunc+0x2f0/0x2f0 [kvm_intel]
+ ? wait_lapic_expire+0x25/0x270 [kvm]
+ vcpu_enter_guest+0x720/0x1ef0 [kvm]
+ ...
+
+When CS.L is set, vcpu should run in the 64 bit paging mode.
+Current kvm set_sregs function doesn't have such check when
+userspace inputs sreg values. This will lead unexpected behavior.
+This patch is to add checks for CS.L, EFER.LME, EFER.LMA and
+CR4.PAE when get SREG inputs from userspace in order to avoid
+unexpected behavior.
+
+Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
+Reported-by: Dmitry Vyukov <dvyukov@google.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Radim Krčmář <rkrcmar@redhat.com>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Cc: Jim Mattson <jmattson@google.com>
+Signed-off-by: Tianyu Lan <tianyu.lan@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/x86.c | 26 ++++++++++++++++++++++++++
+ 1 file changed, 26 insertions(+)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -7474,6 +7474,29 @@ int kvm_task_switch(struct kvm_vcpu *vcp
+ }
+ EXPORT_SYMBOL_GPL(kvm_task_switch);
+
++int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
++{
++ if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG_BIT)) {
++ /*
++ * When EFER.LME and CR0.PG are set, the processor is in
++ * 64-bit mode (though maybe in a 32-bit code segment).
++ * CR4.PAE and EFER.LMA must be set.
++ */
++ if (!(sregs->cr4 & X86_CR4_PAE_BIT)
++ || !(sregs->efer & EFER_LMA))
++ return -EINVAL;
++ } else {
++ /*
++ * Not in 64-bit mode: EFER.LMA is clear and the code
++ * segment cannot be 64-bit.
++ */
++ if (sregs->efer & EFER_LMA || sregs->cs.l)
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
+ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
+ {
+@@ -7486,6 +7509,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct
+ (sregs->cr4 & X86_CR4_OSXSAVE))
+ return -EINVAL;
+
++ if (kvm_valid_sregs(vcpu, sregs))
++ return -EINVAL;
++
+ apic_base_msr.data = sregs->apic_base;
+ apic_base_msr.host_initiated = true;
+ if (kvm_set_apic_base(vcpu, &apic_base_msr))
--- /dev/null
+From efdab992813fb2ed825745625b83c05032e9cda2 Mon Sep 17 00:00:00 2001
+From: Wanpeng Li <wanpeng.li@hotmail.com>
+Date: Wed, 13 Dec 2017 10:46:40 +0100
+Subject: KVM: x86: fix escape of guest dr6 to the host
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Wanpeng Li <wanpeng.li@hotmail.com>
+
+commit efdab992813fb2ed825745625b83c05032e9cda2 upstream.
+
+syzkaller reported:
+
+ WARNING: CPU: 0 PID: 12927 at arch/x86/kernel/traps.c:780 do_debug+0x222/0x250
+ CPU: 0 PID: 12927 Comm: syz-executor Tainted: G OE 4.15.0-rc2+ #16
+ RIP: 0010:do_debug+0x222/0x250
+ Call Trace:
+ <#DB>
+ debug+0x3e/0x70
+ RIP: 0010:copy_user_enhanced_fast_string+0x10/0x20
+ </#DB>
+ _copy_from_user+0x5b/0x90
+ SyS_timer_create+0x33/0x80
+ entry_SYSCALL_64_fastpath+0x23/0x9a
+
+The testcase sets a watchpoint (with perf_event_open) on a buffer that is
+passed to timer_create() as the struct sigevent argument. In timer_create(),
+copy_from_user()'s rep movsb triggers the BP. The testcase also sets
+the debug registers for the guest.
+
+However, KVM only restores host debug registers when the host has active
+watchpoints, which triggers a race condition when running the testcase with
+multiple threads. The guest's DR6.BS bit can escape to the host before
+another thread invokes timer_create(), and do_debug() complains.
+
+The fix is to respect do_debug()'s dr6 invariant when leaving KVM.
+
+Reported-by: Dmitry Vyukov <dvyukov@google.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Radim Krčmář <rkrcmar@redhat.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Signed-off-by: Wanpeng Li <wanpeng.li@hotmail.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/x86.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -2926,6 +2926,12 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *
+ kvm_x86_ops->vcpu_put(vcpu);
+ kvm_put_guest_fpu(vcpu);
+ vcpu->arch.last_host_tsc = rdtsc();
++ /*
++ * If userspace has set any breakpoints or watchpoints, dr6 is restored
++ * on every vmexit, but if not, we might have a stale dr6 from the
++ * guest. do_debug expects dr6 to be cleared after it runs, do the same.
++ */
++ set_debugreg(0, 6);
+ }
+
+ static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
--- /dev/null
+From 51a1aaa631c90223888d8beac4d649dc11d2ca55 Mon Sep 17 00:00:00 2001
+From: Johannes Berg <johannes.berg@intel.com>
+Date: Mon, 15 Jan 2018 09:32:36 +0100
+Subject: mac80211_hwsim: validate number of different channels
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+commit 51a1aaa631c90223888d8beac4d649dc11d2ca55 upstream.
+
+When creating a new radio on the fly, hwsim allows this
+to be done with an arbitrary number of channels, but
+cfg80211 only supports a limited number of simultaneous
+channels, leading to a warning.
+
+Fix this by validating the number - this requires moving
+the define for the maximum out to a visible header file.
+
+Reported-by: syzbot+8dd9051ff19940290931@syzkaller.appspotmail.com
+Fixes: b59ec8dd4394 ("mac80211_hwsim: fix number of channels in interface combinations")
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/mac80211_hwsim.c | 5 +++++
+ include/net/cfg80211.h | 2 ++
+ net/wireless/core.h | 2 --
+ 3 files changed, 7 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/wireless/mac80211_hwsim.c
++++ b/drivers/net/wireless/mac80211_hwsim.c
+@@ -3119,6 +3119,11 @@ static int hwsim_new_radio_nl(struct sk_
+ if (info->attrs[HWSIM_ATTR_CHANNELS])
+ param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]);
+
++ if (param.channels > CFG80211_MAX_NUM_DIFFERENT_CHANNELS) {
++ GENL_SET_ERR_MSG(info, "too many channels specified");
++ return -EINVAL;
++ }
++
+ if (info->attrs[HWSIM_ATTR_NO_VIF])
+ param.no_vif = true;
+
+--- a/include/net/cfg80211.h
++++ b/include/net/cfg80211.h
+@@ -815,6 +815,8 @@ struct cfg80211_csa_settings {
+ u8 count;
+ };
+
++#define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10
++
+ /**
+ * struct iface_combination_params - input parameters for interface combinations
+ *
+--- a/net/wireless/core.h
++++ b/net/wireless/core.h
+@@ -502,8 +502,6 @@ void cfg80211_stop_p2p_device(struct cfg
+ void cfg80211_stop_nan(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev);
+
+-#define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10
+-
+ #ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS
+ #define CFG80211_DEV_WARN_ON(cond) WARN_ON(cond)
+ #else
--- /dev/null
+From 72c27a68a2a3f650f0dc7891ee98f02283fc11af Mon Sep 17 00:00:00 2001
+From: Andrey Konovalov <andreyknvl@google.com>
+Date: Thu, 2 Nov 2017 09:52:27 -0400
+Subject: media: pvrusb2: properly check endpoint types
+
+From: Andrey Konovalov <andreyknvl@google.com>
+
+commit 72c27a68a2a3f650f0dc7891ee98f02283fc11af upstream.
+
+As syzkaller detected, pvrusb2 driver submits bulk urb withount checking
+the the endpoint type is actually blunk. Add a check.
+
+usb 1-1: BOGUS urb xfer, pipe 3 != type 1
+------------[ cut here ]------------
+WARNING: CPU: 1 PID: 2713 at drivers/usb/core/urb.c:449 usb_submit_urb+0xf8a/0x11d0
+Modules linked in:
+CPU: 1 PID: 2713 Comm: pvrusb2-context Not tainted
+4.14.0-rc1-42251-gebb2c2437d80 #210
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011
+task: ffff88006b7a18c0 task.stack: ffff880069978000
+RIP: 0010:usb_submit_urb+0xf8a/0x11d0 drivers/usb/core/urb.c:448
+RSP: 0018:ffff88006997f990 EFLAGS: 00010286
+RAX: 0000000000000029 RBX: ffff880063661900 RCX: 0000000000000000
+RDX: 0000000000000029 RSI: ffffffff86876d60 RDI: ffffed000d32ff24
+RBP: ffff88006997fa90 R08: 1ffff1000d32fdca R09: 0000000000000000
+R10: 0000000000000000 R11: 0000000000000000 R12: 1ffff1000d32ff39
+R13: 0000000000000001 R14: 0000000000000003 R15: ffff880068bbed68
+FS: 0000000000000000(0000) GS:ffff88006c600000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 0000000001032000 CR3: 000000006a0ff000 CR4: 00000000000006f0
+Call Trace:
+ pvr2_send_request_ex+0xa57/0x1d80 drivers/media/usb/pvrusb2/pvrusb2-hdw.c:3645
+ pvr2_hdw_check_firmware drivers/media/usb/pvrusb2/pvrusb2-hdw.c:1812
+ pvr2_hdw_setup_low drivers/media/usb/pvrusb2/pvrusb2-hdw.c:2107
+ pvr2_hdw_setup drivers/media/usb/pvrusb2/pvrusb2-hdw.c:2250
+ pvr2_hdw_initialize+0x548/0x3c10 drivers/media/usb/pvrusb2/pvrusb2-hdw.c:2327
+ pvr2_context_check drivers/media/usb/pvrusb2/pvrusb2-context.c:118
+ pvr2_context_thread_func+0x361/0x8c0 drivers/media/usb/pvrusb2/pvrusb2-context.c:167
+ kthread+0x3a1/0x470 kernel/kthread.c:231
+ ret_from_fork+0x2a/0x40 arch/x86/entry/entry_64.S:431
+Code: 48 8b 85 30 ff ff ff 48 8d b8 98 00 00 00 e8 ee 82 89 fe 45 89
+e8 44 89 f1 4c 89 fa 48 89 c6 48 c7 c7 40 c0 ea 86 e8 30 1b dc fc <0f>
+ff e9 9b f7 ff ff e8 aa 95 25 fd e9 80 f7 ff ff e8 50 74 f3
+---[ end trace 6919030503719da6 ]---
+
+Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+Signed-off-by: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+
+---
+ drivers/media/usb/pvrusb2/pvrusb2-hdw.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
++++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+@@ -3642,6 +3642,12 @@ static int pvr2_send_request_ex(struct p
+ hdw);
+ hdw->ctl_write_urb->actual_length = 0;
+ hdw->ctl_write_pend_flag = !0;
++ if (usb_urb_ep_type_check(hdw->ctl_write_urb)) {
++ pvr2_trace(
++ PVR2_TRACE_ERROR_LEGS,
++ "Invalid write control endpoint");
++ return -EINVAL;
++ }
+ status = usb_submit_urb(hdw->ctl_write_urb,GFP_KERNEL);
+ if (status < 0) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+@@ -3666,6 +3672,12 @@ status);
+ hdw);
+ hdw->ctl_read_urb->actual_length = 0;
+ hdw->ctl_read_pend_flag = !0;
++ if (usb_urb_ep_type_check(hdw->ctl_read_urb)) {
++ pvr2_trace(
++ PVR2_TRACE_ERROR_LEGS,
++ "Invalid read control endpoint");
++ return -EINVAL;
++ }
+ status = usb_submit_urb(hdw->ctl_read_urb,GFP_KERNEL);
+ if (status < 0) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
--- /dev/null
+From bb422a738f6566f7439cd347d54e321e4fe92a9f Mon Sep 17 00:00:00 2001
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Date: Mon, 18 Dec 2017 20:31:41 +0900
+Subject: mm,vmscan: Make unregister_shrinker() no-op if register_shrinker() failed.
+
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+
+commit bb422a738f6566f7439cd347d54e321e4fe92a9f upstream.
+
+Syzbot caught an oops at unregister_shrinker() because combination of
+commit 1d3d4437eae1bb29 ("vmscan: per-node deferred work") and fault
+injection made register_shrinker() fail and the caller of
+register_shrinker() did not check for failure.
+
+----------
+[ 554.881422] FAULT_INJECTION: forcing a failure.
+[ 554.881422] name failslab, interval 1, probability 0, space 0, times 0
+[ 554.881438] CPU: 1 PID: 13231 Comm: syz-executor1 Not tainted 4.14.0-rc8+ #82
+[ 554.881443] Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+[ 554.881445] Call Trace:
+[ 554.881459] dump_stack+0x194/0x257
+[ 554.881474] ? arch_local_irq_restore+0x53/0x53
+[ 554.881486] ? find_held_lock+0x35/0x1d0
+[ 554.881507] should_fail+0x8c0/0xa40
+[ 554.881522] ? fault_create_debugfs_attr+0x1f0/0x1f0
+[ 554.881537] ? check_noncircular+0x20/0x20
+[ 554.881546] ? find_next_zero_bit+0x2c/0x40
+[ 554.881560] ? ida_get_new_above+0x421/0x9d0
+[ 554.881577] ? find_held_lock+0x35/0x1d0
+[ 554.881594] ? __lock_is_held+0xb6/0x140
+[ 554.881628] ? check_same_owner+0x320/0x320
+[ 554.881634] ? lock_downgrade+0x990/0x990
+[ 554.881649] ? find_held_lock+0x35/0x1d0
+[ 554.881672] should_failslab+0xec/0x120
+[ 554.881684] __kmalloc+0x63/0x760
+[ 554.881692] ? lock_downgrade+0x990/0x990
+[ 554.881712] ? register_shrinker+0x10e/0x2d0
+[ 554.881721] ? trace_event_raw_event_module_request+0x320/0x320
+[ 554.881737] register_shrinker+0x10e/0x2d0
+[ 554.881747] ? prepare_kswapd_sleep+0x1f0/0x1f0
+[ 554.881755] ? _down_write_nest_lock+0x120/0x120
+[ 554.881765] ? memcpy+0x45/0x50
+[ 554.881785] sget_userns+0xbcd/0xe20
+(...snipped...)
+[ 554.898693] kasan: CONFIG_KASAN_INLINE enabled
+[ 554.898724] kasan: GPF could be caused by NULL-ptr deref or user memory access
+[ 554.898732] general protection fault: 0000 [#1] SMP KASAN
+[ 554.898737] Dumping ftrace buffer:
+[ 554.898741] (ftrace buffer empty)
+[ 554.898743] Modules linked in:
+[ 554.898752] CPU: 1 PID: 13231 Comm: syz-executor1 Not tainted 4.14.0-rc8+ #82
+[ 554.898755] Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+[ 554.898760] task: ffff8801d1dbe5c0 task.stack: ffff8801c9e38000
+[ 554.898772] RIP: 0010:__list_del_entry_valid+0x7e/0x150
+[ 554.898775] RSP: 0018:ffff8801c9e3f108 EFLAGS: 00010246
+[ 554.898780] RAX: dffffc0000000000 RBX: 0000000000000000 RCX: 0000000000000000
+[ 554.898784] RDX: 0000000000000000 RSI: ffff8801c53c6f98 RDI: ffff8801c53c6fa0
+[ 554.898788] RBP: ffff8801c9e3f120 R08: 1ffff100393c7d55 R09: 0000000000000004
+[ 554.898791] R10: ffff8801c9e3ef70 R11: 0000000000000000 R12: 0000000000000000
+[ 554.898795] R13: dffffc0000000000 R14: 1ffff100393c7e45 R15: ffff8801c53c6f98
+[ 554.898800] FS: 0000000000000000(0000) GS:ffff8801db300000(0000) knlGS:0000000000000000
+[ 554.898804] CS: 0010 DS: 002b ES: 002b CR0: 0000000080050033
+[ 554.898807] CR2: 00000000dbc23000 CR3: 00000001c7269000 CR4: 00000000001406e0
+[ 554.898813] DR0: 0000000020000000 DR1: 0000000020000000 DR2: 0000000000000000
+[ 554.898816] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000600
+[ 554.898818] Call Trace:
+[ 554.898828] unregister_shrinker+0x79/0x300
+[ 554.898837] ? perf_trace_mm_vmscan_writepage+0x750/0x750
+[ 554.898844] ? down_write+0x87/0x120
+[ 554.898851] ? deactivate_super+0x139/0x1b0
+[ 554.898857] ? down_read+0x150/0x150
+[ 554.898864] ? check_same_owner+0x320/0x320
+[ 554.898875] deactivate_locked_super+0x64/0xd0
+[ 554.898883] deactivate_super+0x141/0x1b0
+----------
+
+Since allowing register_shrinker() callers to call unregister_shrinker()
+when register_shrinker() failed can simplify error recovery path, this
+patch makes unregister_shrinker() no-op when register_shrinker() failed.
+Also, reset shrinker->nr_deferred in case unregister_shrinker() was
+by error called twice.
+
+Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Signed-off-by: Aliaksei Karaliou <akaraliou.dev@gmail.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Cc: Glauber Costa <glauber@scylladb.com>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/vmscan.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -297,10 +297,13 @@ EXPORT_SYMBOL(register_shrinker);
+ */
+ void unregister_shrinker(struct shrinker *shrinker)
+ {
++ if (!shrinker->nr_deferred)
++ return;
+ down_write(&shrinker_rwsem);
+ list_del(&shrinker->list);
+ up_write(&shrinker_rwsem);
+ kfree(shrinker->nr_deferred);
++ shrinker->nr_deferred = NULL;
+ }
+ EXPORT_SYMBOL(unregister_shrinker);
+
--- /dev/null
+From 8d74e9f88d65af8bb2e095aff506aa6eac755ada Mon Sep 17 00:00:00 2001
+From: Willem de Bruijn <willemb@google.com>
+Date: Tue, 12 Dec 2017 11:39:04 -0500
+Subject: net: avoid skb_warn_bad_offload on IS_ERR
+
+From: Willem de Bruijn <willemb@google.com>
+
+commit 8d74e9f88d65af8bb2e095aff506aa6eac755ada upstream.
+
+skb_warn_bad_offload warns when packets enter the GSO stack that
+require skb_checksum_help or vice versa. Do not warn on arbitrary
+bad packets. Packet sockets can craft many. Syzkaller was able to
+demonstrate another one with eth_type games.
+
+In particular, suppress the warning when segmentation returns an
+error, which is for reasons other than checksum offload.
+
+See also commit 36c92474498a ("net: WARN if skb_checksum_help() is
+called on skb requiring segmentation") for context on this warning.
+
+Signed-off-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/core/dev.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2792,7 +2792,7 @@ struct sk_buff *__skb_gso_segment(struct
+
+ segs = skb_mac_gso_segment(skb, features);
+
+- if (unlikely(skb_needs_check(skb, tx_path)))
++ if (unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
+ skb_warn_bad_offload(skb);
+
+ return segs;
--- /dev/null
+From 40ca54e3a686f13117f3de0c443f8026dadf7c44 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Sat, 27 Jan 2018 10:58:43 -0800
+Subject: net_sched: gen_estimator: fix lockdep splat
+
+From: Eric Dumazet <edumazet@google.com>
+
+commit 40ca54e3a686f13117f3de0c443f8026dadf7c44 upstream.
+
+syzbot reported a lockdep splat in gen_new_estimator() /
+est_fetch_counters() when attempting to lock est->stats_lock.
+
+Since est_fetch_counters() is called from BH context from timer
+interrupt, we need to block BH as well when calling it from process
+context.
+
+Most qdiscs use per cpu counters and are immune to the problem,
+but net/sched/act_api.c and net/netfilter/xt_RATEEST.c are using
+a spinlock to protect their data. They both call gen_new_estimator()
+while object is created and not yet alive, so this bug could
+not trigger a deadlock, only a lockdep splat.
+
+Fixes: 1c0d32fde5bd ("net_sched: gen_estimator: complete rewrite of rate estimators")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Acked-by: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/core/gen_estimator.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/net/core/gen_estimator.c
++++ b/net/core/gen_estimator.c
+@@ -159,7 +159,11 @@ int gen_new_estimator(struct gnet_stats_
+ est->intvl_log = intvl_log;
+ est->cpu_bstats = cpu_bstats;
+
++ if (stats_lock)
++ local_bh_disable();
+ est_fetch_counters(est, &b);
++ if (stats_lock)
++ local_bh_enable();
+ est->last_bytes = b.bytes;
+ est->last_packets = b.packets;
+ old = rcu_dereference_protected(*rate_est, 1);
--- /dev/null
+From 1a38956cce5eabd7b74f94bab70265e4df83165e Mon Sep 17 00:00:00 2001
+From: Dmitry Vyukov <dvyukov@google.com>
+Date: Tue, 30 Jan 2018 15:21:34 +0100
+Subject: netfilter: ipt_CLUSTERIP: fix out-of-bounds accesses in clusterip_tg_check()
+
+From: Dmitry Vyukov <dvyukov@google.com>
+
+commit 1a38956cce5eabd7b74f94bab70265e4df83165e upstream.
+
+Commit 136e92bbec0a switched local_nodes from an array to a bitmask
+but did not add proper bounds checks. As the result
+clusterip_config_init_nodelist() can both over-read
+ipt_clusterip_tgt_info.local_nodes and over-write
+clusterip_config.local_nodes.
+
+Add bounds checks for both.
+
+Fixes: 136e92bbec0a ("[NETFILTER] CLUSTERIP: use a bitmap to store node responsibility data")
+Signed-off-by: Dmitry Vyukov <dvyukov@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/ipv4/netfilter/ipt_CLUSTERIP.c | 16 +++++++++++++---
+ 1 file changed, 13 insertions(+), 3 deletions(-)
+
+--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
++++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
+@@ -431,7 +431,7 @@ static int clusterip_tg_check(const stru
+ struct ipt_clusterip_tgt_info *cipinfo = par->targinfo;
+ const struct ipt_entry *e = par->entryinfo;
+ struct clusterip_config *config;
+- int ret;
++ int ret, i;
+
+ if (par->nft_compat) {
+ pr_err("cannot use CLUSTERIP target from nftables compat\n");
+@@ -450,8 +450,18 @@ static int clusterip_tg_check(const stru
+ pr_info("Please specify destination IP\n");
+ return -EINVAL;
+ }
+-
+- /* FIXME: further sanity checks */
++ if (cipinfo->num_local_nodes > ARRAY_SIZE(cipinfo->local_nodes)) {
++ pr_info("bad num_local_nodes %u\n", cipinfo->num_local_nodes);
++ return -EINVAL;
++ }
++ for (i = 0; i < cipinfo->num_local_nodes; i++) {
++ if (cipinfo->local_nodes[i] - 1 >=
++ sizeof(config->local_nodes) * 8) {
++ pr_info("bad local_nodes[%d] %u\n",
++ i, cipinfo->local_nodes[i]);
++ return -EINVAL;
++ }
++ }
+
+ config = clusterip_config_find_get(par->net, e->ip.dst.s_addr, 1);
+ if (!config) {
--- /dev/null
+From 3f34cfae1238848fd53f25e5c8fd59da57901f4b Mon Sep 17 00:00:00 2001
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Tue, 30 Jan 2018 19:01:40 +0100
+Subject: netfilter: on sockopt() acquire sock lock only in the required scope
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+commit 3f34cfae1238848fd53f25e5c8fd59da57901f4b upstream.
+
+Syzbot reported several deadlocks in the netfilter area caused by
+rtnl lock and socket lock being acquired with a different order on
+different code paths, leading to backtraces like the following one:
+
+======================================================
+WARNING: possible circular locking dependency detected
+4.15.0-rc9+ #212 Not tainted
+------------------------------------------------------
+syzkaller041579/3682 is trying to acquire lock:
+ (sk_lock-AF_INET6){+.+.}, at: [<000000008775e4dd>] lock_sock
+include/net/sock.h:1463 [inline]
+ (sk_lock-AF_INET6){+.+.}, at: [<000000008775e4dd>]
+do_ipv6_setsockopt.isra.8+0x3c5/0x39d0 net/ipv6/ipv6_sockglue.c:167
+
+but task is already holding lock:
+ (rtnl_mutex){+.+.}, at: [<000000004342eaa9>] rtnl_lock+0x17/0x20
+net/core/rtnetlink.c:74
+
+which lock already depends on the new lock.
+
+the existing dependency chain (in reverse order) is:
+
+-> #1 (rtnl_mutex){+.+.}:
+ __mutex_lock_common kernel/locking/mutex.c:756 [inline]
+ __mutex_lock+0x16f/0x1a80 kernel/locking/mutex.c:893
+ mutex_lock_nested+0x16/0x20 kernel/locking/mutex.c:908
+ rtnl_lock+0x17/0x20 net/core/rtnetlink.c:74
+ register_netdevice_notifier+0xad/0x860 net/core/dev.c:1607
+ tee_tg_check+0x1a0/0x280 net/netfilter/xt_TEE.c:106
+ xt_check_target+0x22c/0x7d0 net/netfilter/x_tables.c:845
+ check_target net/ipv6/netfilter/ip6_tables.c:538 [inline]
+ find_check_entry.isra.7+0x935/0xcf0
+net/ipv6/netfilter/ip6_tables.c:580
+ translate_table+0xf52/0x1690 net/ipv6/netfilter/ip6_tables.c:749
+ do_replace net/ipv6/netfilter/ip6_tables.c:1165 [inline]
+ do_ip6t_set_ctl+0x370/0x5f0 net/ipv6/netfilter/ip6_tables.c:1691
+ nf_sockopt net/netfilter/nf_sockopt.c:106 [inline]
+ nf_setsockopt+0x67/0xc0 net/netfilter/nf_sockopt.c:115
+ ipv6_setsockopt+0x115/0x150 net/ipv6/ipv6_sockglue.c:928
+ udpv6_setsockopt+0x45/0x80 net/ipv6/udp.c:1422
+ sock_common_setsockopt+0x95/0xd0 net/core/sock.c:2978
+ SYSC_setsockopt net/socket.c:1849 [inline]
+ SyS_setsockopt+0x189/0x360 net/socket.c:1828
+ entry_SYSCALL_64_fastpath+0x29/0xa0
+
+-> #0 (sk_lock-AF_INET6){+.+.}:
+ lock_acquire+0x1d5/0x580 kernel/locking/lockdep.c:3914
+ lock_sock_nested+0xc2/0x110 net/core/sock.c:2780
+ lock_sock include/net/sock.h:1463 [inline]
+ do_ipv6_setsockopt.isra.8+0x3c5/0x39d0 net/ipv6/ipv6_sockglue.c:167
+ ipv6_setsockopt+0xd7/0x150 net/ipv6/ipv6_sockglue.c:922
+ udpv6_setsockopt+0x45/0x80 net/ipv6/udp.c:1422
+ sock_common_setsockopt+0x95/0xd0 net/core/sock.c:2978
+ SYSC_setsockopt net/socket.c:1849 [inline]
+ SyS_setsockopt+0x189/0x360 net/socket.c:1828
+ entry_SYSCALL_64_fastpath+0x29/0xa0
+
+other info that might help us debug this:
+
+ Possible unsafe locking scenario:
+
+ CPU0 CPU1
+ ---- ----
+ lock(rtnl_mutex);
+ lock(sk_lock-AF_INET6);
+ lock(rtnl_mutex);
+ lock(sk_lock-AF_INET6);
+
+ *** DEADLOCK ***
+
+1 lock held by syzkaller041579/3682:
+ #0: (rtnl_mutex){+.+.}, at: [<000000004342eaa9>] rtnl_lock+0x17/0x20
+net/core/rtnetlink.c:74
+
+The problem, as Florian noted, is that nf_setsockopt() is always
+called with the socket held, even if the lock itself is required only
+for very tight scopes and only for some operation.
+
+This patch addresses the issues moving the lock_sock() call only
+where really needed, namely in ipv*_getorigdst(), so that nf_setsockopt()
+does not need anymore to acquire both locks.
+
+Fixes: 22265a5c3c10 ("netfilter: xt_TEE: resolve oif using netdevice notifiers")
+Reported-by: syzbot+a4c2dc980ac1af699b36@syzkaller.appspotmail.com
+Suggested-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/ipv4/ip_sockglue.c | 14 ++++----------
+ net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c | 6 +++++-
+ net/ipv6/ipv6_sockglue.c | 17 +++++------------
+ net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c | 18 ++++++++++++------
+ 4 files changed, 26 insertions(+), 29 deletions(-)
+
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -1251,11 +1251,8 @@ int ip_setsockopt(struct sock *sk, int l
+ if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
+ optname != IP_IPSEC_POLICY &&
+ optname != IP_XFRM_POLICY &&
+- !ip_mroute_opt(optname)) {
+- lock_sock(sk);
++ !ip_mroute_opt(optname))
+ err = nf_setsockopt(sk, PF_INET, optname, optval, optlen);
+- release_sock(sk);
+- }
+ #endif
+ return err;
+ }
+@@ -1280,12 +1277,9 @@ int compat_ip_setsockopt(struct sock *sk
+ if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
+ optname != IP_IPSEC_POLICY &&
+ optname != IP_XFRM_POLICY &&
+- !ip_mroute_opt(optname)) {
+- lock_sock(sk);
+- err = compat_nf_setsockopt(sk, PF_INET, optname,
+- optval, optlen);
+- release_sock(sk);
+- }
++ !ip_mroute_opt(optname))
++ err = compat_nf_setsockopt(sk, PF_INET, optname, optval,
++ optlen);
+ #endif
+ return err;
+ }
+--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
++++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+@@ -218,15 +218,19 @@ getorigdst(struct sock *sk, int optval,
+ struct nf_conntrack_tuple tuple;
+
+ memset(&tuple, 0, sizeof(tuple));
++
++ lock_sock(sk);
+ tuple.src.u3.ip = inet->inet_rcv_saddr;
+ tuple.src.u.tcp.port = inet->inet_sport;
+ tuple.dst.u3.ip = inet->inet_daddr;
+ tuple.dst.u.tcp.port = inet->inet_dport;
+ tuple.src.l3num = PF_INET;
+ tuple.dst.protonum = sk->sk_protocol;
++ release_sock(sk);
+
+ /* We only do TCP and SCTP at the moment: is there a better way? */
+- if (sk->sk_protocol != IPPROTO_TCP && sk->sk_protocol != IPPROTO_SCTP) {
++ if (tuple.dst.protonum != IPPROTO_TCP &&
++ tuple.dst.protonum != IPPROTO_SCTP) {
+ pr_debug("SO_ORIGINAL_DST: Not a TCP/SCTP socket\n");
+ return -ENOPROTOOPT;
+ }
+--- a/net/ipv6/ipv6_sockglue.c
++++ b/net/ipv6/ipv6_sockglue.c
+@@ -915,12 +915,8 @@ int ipv6_setsockopt(struct sock *sk, int
+ #ifdef CONFIG_NETFILTER
+ /* we need to exclude all possible ENOPROTOOPTs except default case */
+ if (err == -ENOPROTOOPT && optname != IPV6_IPSEC_POLICY &&
+- optname != IPV6_XFRM_POLICY) {
+- lock_sock(sk);
+- err = nf_setsockopt(sk, PF_INET6, optname, optval,
+- optlen);
+- release_sock(sk);
+- }
++ optname != IPV6_XFRM_POLICY)
++ err = nf_setsockopt(sk, PF_INET6, optname, optval, optlen);
+ #endif
+ return err;
+ }
+@@ -950,12 +946,9 @@ int compat_ipv6_setsockopt(struct sock *
+ #ifdef CONFIG_NETFILTER
+ /* we need to exclude all possible ENOPROTOOPTs except default case */
+ if (err == -ENOPROTOOPT && optname != IPV6_IPSEC_POLICY &&
+- optname != IPV6_XFRM_POLICY) {
+- lock_sock(sk);
+- err = compat_nf_setsockopt(sk, PF_INET6, optname,
+- optval, optlen);
+- release_sock(sk);
+- }
++ optname != IPV6_XFRM_POLICY)
++ err = compat_nf_setsockopt(sk, PF_INET6, optname, optval,
++ optlen);
+ #endif
+ return err;
+ }
+--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
++++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+@@ -226,20 +226,27 @@ static const struct nf_hook_ops ipv6_con
+ static int
+ ipv6_getorigdst(struct sock *sk, int optval, void __user *user, int *len)
+ {
+- const struct inet_sock *inet = inet_sk(sk);
++ struct nf_conntrack_tuple tuple = { .src.l3num = NFPROTO_IPV6 };
+ const struct ipv6_pinfo *inet6 = inet6_sk(sk);
++ const struct inet_sock *inet = inet_sk(sk);
+ const struct nf_conntrack_tuple_hash *h;
+ struct sockaddr_in6 sin6;
+- struct nf_conntrack_tuple tuple = { .src.l3num = NFPROTO_IPV6 };
+ struct nf_conn *ct;
++ __be32 flow_label;
++ int bound_dev_if;
+
++ lock_sock(sk);
+ tuple.src.u3.in6 = sk->sk_v6_rcv_saddr;
+ tuple.src.u.tcp.port = inet->inet_sport;
+ tuple.dst.u3.in6 = sk->sk_v6_daddr;
+ tuple.dst.u.tcp.port = inet->inet_dport;
+ tuple.dst.protonum = sk->sk_protocol;
++ bound_dev_if = sk->sk_bound_dev_if;
++ flow_label = inet6->flow_label;
++ release_sock(sk);
+
+- if (sk->sk_protocol != IPPROTO_TCP && sk->sk_protocol != IPPROTO_SCTP)
++ if (tuple.dst.protonum != IPPROTO_TCP &&
++ tuple.dst.protonum != IPPROTO_SCTP)
+ return -ENOPROTOOPT;
+
+ if (*len < 0 || (unsigned int) *len < sizeof(sin6))
+@@ -257,14 +264,13 @@ ipv6_getorigdst(struct sock *sk, int opt
+
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_port = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.tcp.port;
+- sin6.sin6_flowinfo = inet6->flow_label & IPV6_FLOWINFO_MASK;
++ sin6.sin6_flowinfo = flow_label & IPV6_FLOWINFO_MASK;
+ memcpy(&sin6.sin6_addr,
+ &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.in6,
+ sizeof(sin6.sin6_addr));
+
+ nf_ct_put(ct);
+- sin6.sin6_scope_id = ipv6_iface_scope_id(&sin6.sin6_addr,
+- sk->sk_bound_dev_if);
++ sin6.sin6_scope_id = ipv6_iface_scope_id(&sin6.sin6_addr, bound_dev_if);
+ return copy_to_user(user, &sin6, sizeof(sin6)) ? -EFAULT : 0;
+ }
+
--- /dev/null
+From da17c73b6eb74aad3c3c0654394635675b623b3e Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 24 Jan 2018 17:16:09 -0800
+Subject: netfilter: x_tables: avoid out-of-bounds reads in xt_request_find_{match|target}
+
+From: Eric Dumazet <edumazet@google.com>
+
+commit da17c73b6eb74aad3c3c0654394635675b623b3e upstream.
+
+It looks like syzbot found its way into netfilter territory.
+
+Issue here is that @name comes from user space and might
+not be null terminated.
+
+Out-of-bound reads happen, KASAN is not happy.
+
+v2 added similar fix for xt_request_find_target(),
+as Florian advised.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Acked-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/netfilter/x_tables.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/net/netfilter/x_tables.c
++++ b/net/netfilter/x_tables.c
+@@ -209,6 +209,9 @@ xt_request_find_match(uint8_t nfproto, c
+ {
+ struct xt_match *match;
+
++ if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
++ return ERR_PTR(-EINVAL);
++
+ match = xt_find_match(nfproto, name, revision);
+ if (IS_ERR(match)) {
+ request_module("%st_%s", xt_prefix[nfproto], name);
+@@ -251,6 +254,9 @@ struct xt_target *xt_request_find_target
+ {
+ struct xt_target *target;
+
++ if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
++ return ERR_PTR(-EINVAL);
++
+ target = xt_find_target(af, name, revision);
+ if (IS_ERR(target)) {
+ request_module("%st_%s", xt_prefix[af], name);
--- /dev/null
+From 889c604fd0b5f6d3b8694ade229ee44124de1127 Mon Sep 17 00:00:00 2001
+From: Dmitry Vyukov <dvyukov@google.com>
+Date: Thu, 28 Dec 2017 09:48:54 +0100
+Subject: netfilter: x_tables: fix int overflow in xt_alloc_table_info()
+
+From: Dmitry Vyukov <dvyukov@google.com>
+
+commit 889c604fd0b5f6d3b8694ade229ee44124de1127 upstream.
+
+syzkaller triggered OOM kills by passing ipt_replace.size = -1
+to IPT_SO_SET_REPLACE. The root cause is that SMP_ALIGN() in
+xt_alloc_table_info() causes int overflow and the size check passes
+when it should not. SMP_ALIGN() is no longer needed leftover.
+
+Remove SMP_ALIGN() call in xt_alloc_table_info().
+
+Reported-by: syzbot+4396883fa8c4f64e0175@syzkaller.appspotmail.com
+Signed-off-by: Dmitry Vyukov <dvyukov@google.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/netfilter/x_tables.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/net/netfilter/x_tables.c
++++ b/net/netfilter/x_tables.c
+@@ -39,7 +39,6 @@ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
+ MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
+
+-#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
+ #define XT_PCPU_BLOCK_SIZE 4096
+
+ struct compat_delta {
+@@ -1000,7 +999,7 @@ struct xt_table_info *xt_alloc_table_inf
+ return NULL;
+
+ /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
+- if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages)
++ if ((size >> PAGE_SHIFT) + 2 > totalram_pages)
+ return NULL;
+
+ info = kvmalloc(sz, GFP_KERNEL);
--- /dev/null
+From ba7cd5d95f25cc6005f687dabdb4e7a6063adda9 Mon Sep 17 00:00:00 2001
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Wed, 31 Jan 2018 15:02:47 -0800
+Subject: netfilter: xt_cgroup: initialize info->priv in cgroup_mt_check_v1()
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+commit ba7cd5d95f25cc6005f687dabdb4e7a6063adda9 upstream.
+
+xt_cgroup_info_v1->priv is an internal pointer only used for kernel,
+we should not trust what user-space provides.
+
+Reported-by: <syzbot+4fbcfcc0d2e6592bd641@syzkaller.appspotmail.com>
+Fixes: c38c4597e4bf ("netfilter: implement xt_cgroup cgroup2 path match")
+Cc: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/netfilter/xt_cgroup.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/netfilter/xt_cgroup.c
++++ b/net/netfilter/xt_cgroup.c
+@@ -52,6 +52,7 @@ static int cgroup_mt_check_v1(const stru
+ return -EINVAL;
+ }
+
++ info->priv = NULL;
+ if (info->has_path) {
+ cgrp = cgroup_get_from_path(info->path);
+ if (IS_ERR(cgrp)) {
--- /dev/null
+From 7dc68e98757a8eccf8ca7a53a29b896f1eef1f76 Mon Sep 17 00:00:00 2001
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Mon, 5 Feb 2018 14:41:45 -0800
+Subject: netfilter: xt_RATEEST: acquire xt_rateest_mutex for hash insert
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+commit 7dc68e98757a8eccf8ca7a53a29b896f1eef1f76 upstream.
+
+rateest_hash is supposed to be protected by xt_rateest_mutex,
+and, as suggested by Eric, lookup and insert should be atomic,
+so we should acquire the xt_rateest_mutex once for both.
+
+So introduce a non-locking helper for internal use and keep the
+locking one for external.
+
+Reported-by: <syzbot+5cb189720978275e4c75@syzkaller.appspotmail.com>
+Fixes: 5859034d7eb8 ("[NETFILTER]: x_tables: add RATEEST target")
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Reviewed-by: Florian Westphal <fw@strlen.de>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/netfilter/xt_RATEEST.c | 22 +++++++++++++++++-----
+ 1 file changed, 17 insertions(+), 5 deletions(-)
+
+--- a/net/netfilter/xt_RATEEST.c
++++ b/net/netfilter/xt_RATEEST.c
+@@ -39,23 +39,31 @@ static void xt_rateest_hash_insert(struc
+ hlist_add_head(&est->list, &rateest_hash[h]);
+ }
+
+-struct xt_rateest *xt_rateest_lookup(const char *name)
++static struct xt_rateest *__xt_rateest_lookup(const char *name)
+ {
+ struct xt_rateest *est;
+ unsigned int h;
+
+ h = xt_rateest_hash(name);
+- mutex_lock(&xt_rateest_mutex);
+ hlist_for_each_entry(est, &rateest_hash[h], list) {
+ if (strcmp(est->name, name) == 0) {
+ est->refcnt++;
+- mutex_unlock(&xt_rateest_mutex);
+ return est;
+ }
+ }
+- mutex_unlock(&xt_rateest_mutex);
++
+ return NULL;
+ }
++
++struct xt_rateest *xt_rateest_lookup(const char *name)
++{
++ struct xt_rateest *est;
++
++ mutex_lock(&xt_rateest_mutex);
++ est = __xt_rateest_lookup(name);
++ mutex_unlock(&xt_rateest_mutex);
++ return est;
++}
+ EXPORT_SYMBOL_GPL(xt_rateest_lookup);
+
+ void xt_rateest_put(struct xt_rateest *est)
+@@ -100,8 +108,10 @@ static int xt_rateest_tg_checkentry(cons
+
+ net_get_random_once(&jhash_rnd, sizeof(jhash_rnd));
+
+- est = xt_rateest_lookup(info->name);
++ mutex_lock(&xt_rateest_mutex);
++ est = __xt_rateest_lookup(info->name);
+ if (est) {
++ mutex_unlock(&xt_rateest_mutex);
+ /*
+ * If estimator parameters are specified, they must match the
+ * existing estimator.
+@@ -139,11 +149,13 @@ static int xt_rateest_tg_checkentry(cons
+
+ info->est = est;
+ xt_rateest_hash_insert(est);
++ mutex_unlock(&xt_rateest_mutex);
+ return 0;
+
+ err2:
+ kfree(est);
+ err1:
++ mutex_unlock(&xt_rateest_mutex);
+ return ret;
+ }
+
--- /dev/null
+From 6e6e41c3112276288ccaf80c70916779b84bb276 Mon Sep 17 00:00:00 2001
+From: Jason Wang <jasowang@redhat.com>
+Date: Fri, 9 Feb 2018 17:45:49 +0800
+Subject: ptr_ring: fail early if queue occupies more than KMALLOC_MAX_SIZE
+
+From: Jason Wang <jasowang@redhat.com>
+
+commit 6e6e41c3112276288ccaf80c70916779b84bb276 upstream.
+
+To avoid slab to warn about exceeded size, fail early if queue
+occupies more than KMALLOC_MAX_SIZE.
+
+Reported-by: syzbot+e4d4f9ddd4295539735d@syzkaller.appspotmail.com
+Fixes: 2e0ab8ca83c12 ("ptr_ring: array based FIFO for pointers")
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+Acked-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/ptr_ring.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/include/linux/ptr_ring.h
++++ b/include/linux/ptr_ring.h
+@@ -447,6 +447,8 @@ static inline int ptr_ring_consume_batch
+
+ static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp)
+ {
++ if (size * sizeof(void *) > KMALLOC_MAX_SIZE)
++ return NULL;
+ return kcalloc(size, sizeof(void *), gfp);
+ }
+
--- /dev/null
+From 0bf7800f1799b5b1fd7d4f024e9ece53ac489011 Mon Sep 17 00:00:00 2001
+From: Jason Wang <jasowang@redhat.com>
+Date: Fri, 9 Feb 2018 17:45:50 +0800
+Subject: ptr_ring: try vmalloc() when kmalloc() fails
+
+From: Jason Wang <jasowang@redhat.com>
+
+commit 0bf7800f1799b5b1fd7d4f024e9ece53ac489011 upstream.
+
+This patch switch to use kvmalloc_array() for using a vmalloc()
+fallback to help in case kmalloc() fails.
+
+Reported-by: syzbot+e4d4f9ddd4295539735d@syzkaller.appspotmail.com
+Fixes: 2e0ab8ca83c12 ("ptr_ring: array based FIFO for pointers")
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+Acked-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/ptr_ring.h | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+--- a/include/linux/ptr_ring.h
++++ b/include/linux/ptr_ring.h
+@@ -445,11 +445,14 @@ static inline int ptr_ring_consume_batch
+ __PTR_RING_PEEK_CALL_v; \
+ })
+
++/* Not all gfp_t flags (besides GFP_KERNEL) are allowed. See
++ * documentation for vmalloc for which of them are legal.
++ */
+ static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp)
+ {
+ if (size * sizeof(void *) > KMALLOC_MAX_SIZE)
+ return NULL;
+- return kcalloc(size, sizeof(void *), gfp);
++ return kvmalloc_array(size, sizeof(void *), gfp | __GFP_ZERO);
+ }
+
+ static inline void __ptr_ring_set_size(struct ptr_ring *r, int size)
+@@ -582,7 +585,7 @@ static inline int ptr_ring_resize(struct
+ spin_unlock(&(r)->producer_lock);
+ spin_unlock_irqrestore(&(r)->consumer_lock, flags);
+
+- kfree(old);
++ kvfree(old);
+
+ return 0;
+ }
+@@ -622,7 +625,7 @@ static inline int ptr_ring_resize_multip
+ }
+
+ for (i = 0; i < nrings; ++i)
+- kfree(queues[i]);
++ kvfree(queues[i]);
+
+ kfree(queues);
+
+@@ -630,7 +633,7 @@ static inline int ptr_ring_resize_multip
+
+ nomem:
+ while (--i >= 0)
+- kfree(queues[i]);
++ kvfree(queues[i]);
+
+ kfree(queues);
+
+@@ -645,7 +648,7 @@ static inline void ptr_ring_cleanup(stru
+ if (destroy)
+ while ((ptr = ptr_ring_consume(r)))
+ destroy(ptr);
+- kfree(r->queue);
++ kvfree(r->queue);
+ }
+
+ #endif /* _LINUX_PTR_RING_H */
--- /dev/null
+From d0e312fe3d34c1bc014a7f8ec6540d05e8077483 Mon Sep 17 00:00:00 2001
+From: Leon Romanovsky <leonro@mellanox.com>
+Date: Tue, 5 Dec 2017 22:30:04 +0200
+Subject: RDMA/netlink: Fix general protection fault
+
+From: Leon Romanovsky <leonro@mellanox.com>
+
+commit d0e312fe3d34c1bc014a7f8ec6540d05e8077483 upstream.
+
+The RDMA netlink core code checks validity of messages by ensuring
+that type and operand are in range. It works well for almost all
+clients except NLDEV, which has cb_table less than number of operands.
+
+Request to access such operand will trigger the following kernel panic.
+
+This patch updates all places where cb_table is declared for the
+consistency, but only NLDEV is actually need it.
+
+general protection fault: 0000 [#1] SMP DEBUG_PAGEALLOC KASAN
+Modules linked in:
+CPU: 0 PID: 522 Comm: syz-executor6 Not tainted 4.13.0+ #4
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.7.5-0-ge51488c-20140602_164612-nilsson.home.kraxel.org 04/01/2014
+task: ffff8800657799c0 task.stack: ffff8800695d000
+RIP: 0010:rdma_nl_rcv_msg+0x13a/0x4c0
+RSP: 0018:ffff8800695d7838 EFLAGS: 00010207
+RAX: dffffc0000000000 RBX: 1ffff1000d2baf0b RCX: 00000000704ff4d7
+RDX: 0000000000000000 RSI: ffffffff81ddb03c RDI: 00000003827fa6bc
+RBP: ffff8800695d7900 R08: ffffffff82ec0578 R09: 0000000000000000
+R10: ffff8800695d7900 R11: 0000000000000001 R12: 000000000000001c
+R13: ffff880069d31e00 R14: 00000000ffffffff R15: ffff880069d357c0
+FS: 00007fee6acb8700(0000) GS:ffff88006ca00000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00000000201a9000 CR3: 0000000059766000 CR4: 00000000000006b0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+Call Trace:
+ ? rdma_nl_multicast+0x80/0x80
+ rdma_nl_rcv+0x36b/0x4d0
+ ? ibnl_put_attr+0xc0/0xc0
+ netlink_unicast+0x4bd/0x6d0
+ ? netlink_sendskb+0x50/0x50
+ ? drop_futex_key_refs.isra.4+0x68/0xb0
+ netlink_sendmsg+0x9ab/0xbd0
+ ? nlmsg_notify+0x140/0x140
+ ? wake_up_q+0xa1/0xf0
+ ? drop_futex_key_refs.isra.4+0x68/0xb0
+ sock_sendmsg+0x88/0xd0
+ sock_write_iter+0x228/0x3c0
+ ? sock_sendmsg+0xd0/0xd0
+ ? do_futex+0x3e5/0xb20
+ ? iov_iter_init+0xaf/0x1d0
+ __vfs_write+0x46e/0x640
+ ? sched_clock_cpu+0x1b/0x190
+ ? __vfs_read+0x620/0x620
+ ? __fget+0x23a/0x390
+ ? rw_verify_area+0xca/0x290
+ vfs_write+0x192/0x490
+ SyS_write+0xde/0x1c0
+ ? SyS_read+0x1c0/0x1c0
+ ? trace_hardirqs_on_thunk+0x1a/0x1c
+ entry_SYSCALL_64_fastpath+0x18/0xad
+RIP: 0033:0x7fee6a74a219
+RSP: 002b:00007fee6acb7d58 EFLAGS: 00000212 ORIG_RAX: 0000000000000001
+RAX: ffffffffffffffda RBX: 0000000000638000 RCX: 00007fee6a74a219
+RDX: 0000000000000078 RSI: 0000000020141000 RDI: 0000000000000006
+RBP: 0000000000000046 R08: 0000000000000000 R09: 0000000000000000
+R10: 0000000000000000 R11: 0000000000000212 R12: ffff8800695d7f98
+R13: 0000000020141000 R14: 0000000000000006 R15: 00000000ffffffff
+Code: d6 48 b8 00 00 00 00 00 fc ff df 66 41 81 e4 ff 03 44 8d 72 ff 4a 8d 3c b5 c0 a6 7f 82 44 89 b5 4c ff ff ff 48 89 f9 48 c1 e9 03 <0f> b6 0c 01 48 89 f8 83 e0 07 83 c0 03 38 c8 7c 08 84 c9 0f 85
+RIP: rdma_nl_rcv_msg+0x13a/0x4c0 RSP: ffff8800695d7838
+---[ end trace ba085d123959c8ec ]---
+Kernel panic - not syncing: Fatal exception
+
+Cc: syzkaller <syzkaller@googlegroups.com>
+Fixes: b4c598a67ea1 ("RDMA/netlink: Implement nldev device dumpit calback")
+Reviewed-by: Mark Bloch <markb@mellanox.com>
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+
+---
+ drivers/infiniband/core/cma.c | 2 +-
+ drivers/infiniband/core/device.c | 2 +-
+ drivers/infiniband/core/iwcm.c | 2 +-
+ drivers/infiniband/core/nldev.c | 2 +-
+ 4 files changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -4461,7 +4461,7 @@ out:
+ return skb->len;
+ }
+
+-static const struct rdma_nl_cbs cma_cb_table[] = {
++static const struct rdma_nl_cbs cma_cb_table[RDMA_NL_RDMA_CM_NUM_OPS] = {
+ [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats},
+ };
+
+--- a/drivers/infiniband/core/device.c
++++ b/drivers/infiniband/core/device.c
+@@ -1154,7 +1154,7 @@ struct net_device *ib_get_net_dev_by_par
+ }
+ EXPORT_SYMBOL(ib_get_net_dev_by_params);
+
+-static const struct rdma_nl_cbs ibnl_ls_cb_table[] = {
++static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {
+ [RDMA_NL_LS_OP_RESOLVE] = {
+ .doit = ib_nl_handle_resolve_resp,
+ .flags = RDMA_NL_ADMIN_PERM,
+--- a/drivers/infiniband/core/iwcm.c
++++ b/drivers/infiniband/core/iwcm.c
+@@ -80,7 +80,7 @@ const char *__attribute_const__ iwcm_rej
+ }
+ EXPORT_SYMBOL(iwcm_reject_msg);
+
+-static struct rdma_nl_cbs iwcm_nl_cb_table[] = {
++static struct rdma_nl_cbs iwcm_nl_cb_table[RDMA_NL_IWPM_NUM_OPS] = {
+ [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
+ [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
+ [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
+--- a/drivers/infiniband/core/nldev.c
++++ b/drivers/infiniband/core/nldev.c
+@@ -303,7 +303,7 @@ out: cb->args[0] = idx;
+ return skb->len;
+ }
+
+-static const struct rdma_nl_cbs nldev_cb_table[] = {
++static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
+ [RDMA_NLDEV_CMD_GET] = {
+ .doit = nldev_get_doit,
+ .dump = nldev_get_dumpit,
--- /dev/null
+From f10b4cff98c6977668434fbf5dd58695eeca2897 Mon Sep 17 00:00:00 2001
+From: Sowmini Varadhan <sowmini.varadhan@oracle.com>
+Date: Thu, 30 Nov 2017 11:11:29 -0800
+Subject: rds: tcp: atomically purge entries from rds_tcp_conn_list during netns delete
+
+From: Sowmini Varadhan <sowmini.varadhan@oracle.com>
+
+commit f10b4cff98c6977668434fbf5dd58695eeca2897 upstream.
+
+The rds_tcp_kill_sock() function parses the rds_tcp_conn_list
+to find the rds_connection entries marked for deletion as part
+of the netns deletion under the protection of the rds_tcp_conn_lock.
+Since the rds_tcp_conn_list tracks rds_tcp_connections (which
+have a 1:1 mapping with rds_conn_path), multiple tc entries in
+the rds_tcp_conn_list will map to a single rds_connection, and will
+be deleted as part of the rds_conn_destroy() operation that is
+done outside the rds_tcp_conn_lock.
+
+The rds_tcp_conn_list traversal done under the protection of
+rds_tcp_conn_lock should not leave any doomed tc entries in
+the list after the rds_tcp_conn_lock is released, else another
+concurrently executiong netns delete (for a differnt netns) thread
+may trip on these entries.
+
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Sowmini Varadhan <sowmini.varadhan@oracle.com>
+Acked-by: Santosh Shilimkar <santosh.shilimkar@oracle.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/rds/tcp.c | 9 +++++++--
+ net/rds/tcp.h | 1 +
+ 2 files changed, 8 insertions(+), 2 deletions(-)
+
+--- a/net/rds/tcp.c
++++ b/net/rds/tcp.c
+@@ -306,7 +306,8 @@ static void rds_tcp_conn_free(void *arg)
+ rdsdebug("freeing tc %p\n", tc);
+
+ spin_lock_irqsave(&rds_tcp_conn_lock, flags);
+- list_del(&tc->t_tcp_node);
++ if (!tc->t_tcp_node_detached)
++ list_del(&tc->t_tcp_node);
+ spin_unlock_irqrestore(&rds_tcp_conn_lock, flags);
+
+ kmem_cache_free(rds_tcp_conn_slab, tc);
+@@ -531,8 +532,12 @@ static void rds_tcp_kill_sock(struct net
+
+ if (net != c_net || !tc->t_sock)
+ continue;
+- if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn))
++ if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) {
+ list_move_tail(&tc->t_tcp_node, &tmp_list);
++ } else {
++ list_del(&tc->t_tcp_node);
++ tc->t_tcp_node_detached = true;
++ }
+ }
+ spin_unlock_irq(&rds_tcp_conn_lock);
+ list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) {
+--- a/net/rds/tcp.h
++++ b/net/rds/tcp.h
+@@ -12,6 +12,7 @@ struct rds_tcp_incoming {
+ struct rds_tcp_connection {
+
+ struct list_head t_tcp_node;
++ bool t_tcp_node_detached;
+ struct rds_conn_path *t_cpath;
+ /* t_conn_path_lock synchronizes the connection establishment between
+ * rds_tcp_accept_one and rds_tcp_conn_path_connect
--- /dev/null
+From 681648e67d43cf269c5590ecf021ed481f4551fc Mon Sep 17 00:00:00 2001
+From: Sowmini Varadhan <sowmini.varadhan@oracle.com>
+Date: Thu, 30 Nov 2017 11:11:28 -0800
+Subject: rds: tcp: correctly sequence cleanup on netns deletion.
+
+From: Sowmini Varadhan <sowmini.varadhan@oracle.com>
+
+commit 681648e67d43cf269c5590ecf021ed481f4551fc upstream.
+
+Commit 8edc3affc077 ("rds: tcp: Take explicit refcounts on struct net")
+introduces a regression in rds-tcp netns cleanup. The cleanup_net(),
+(and thus rds_tcp_dev_event notification) is only called from put_net()
+when all netns refcounts go to 0, but this cannot happen if the
+rds_connection itself is holding a c_net ref that it expects to
+release in rds_tcp_kill_sock.
+
+Instead, the rds_tcp_kill_sock callback should make sure to
+tear down state carefully, ensuring that the socket teardown
+is only done after all data-structures and workqs that depend
+on it are quiesced.
+
+The original motivation for commit 8edc3affc077 ("rds: tcp: Take explicit
+refcounts on struct net") was to resolve a race condition reported by
+syzkaller where workqs for tx/rx/connect were triggered after the
+namespace was deleted. Those worker threads should have been
+cancelled/flushed before socket tear-down and indeed,
+rds_conn_path_destroy() does try to sequence this by doing
+ /* cancel cp_send_w */
+ /* cancel cp_recv_w */
+ /* flush cp_down_w */
+ /* free data structures */
+Here the "flush cp_down_w" will trigger rds_conn_shutdown and thus
+invoke rds_tcp_conn_path_shutdown() to close the tcp socket, so that
+we ought to have satisfied the requirement that "socket-close is
+done after all other dependent state is quiesced". However,
+rds_conn_shutdown has a bug in that it *always* triggers the reconnect
+workq (and if connection is successful, we always restart tx/rx
+workqs so with the right timing, we risk the race conditions reported
+by syzkaller).
+
+Netns deletion is like module teardown- no need to restart a
+reconnect in this case. We can use the c_destroy_in_prog bit
+to avoid restarting the reconnect.
+
+Fixes: 8edc3affc077 ("rds: tcp: Take explicit refcounts on struct net")
+Signed-off-by: Sowmini Varadhan <sowmini.varadhan@oracle.com>
+Acked-by: Santosh Shilimkar <santosh.shilimkar@oracle.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/rds/connection.c | 3 ++-
+ net/rds/rds.h | 6 +++---
+ net/rds/tcp.c | 4 ++--
+ 3 files changed, 7 insertions(+), 6 deletions(-)
+
+--- a/net/rds/connection.c
++++ b/net/rds/connection.c
+@@ -366,6 +366,8 @@ void rds_conn_shutdown(struct rds_conn_p
+ * to the conn hash, so we never trigger a reconnect on this
+ * conn - the reconnect is always triggered by the active peer. */
+ cancel_delayed_work_sync(&cp->cp_conn_w);
++ if (conn->c_destroy_in_prog)
++ return;
+ rcu_read_lock();
+ if (!hlist_unhashed(&conn->c_hash_node)) {
+ rcu_read_unlock();
+@@ -445,7 +447,6 @@ void rds_conn_destroy(struct rds_connect
+ */
+ rds_cong_remove_conn(conn);
+
+- put_net(conn->c_net);
+ kfree(conn->c_path);
+ kmem_cache_free(rds_conn_slab, conn);
+
+--- a/net/rds/rds.h
++++ b/net/rds/rds.h
+@@ -150,7 +150,7 @@ struct rds_connection {
+
+ /* Protocol version */
+ unsigned int c_version;
+- struct net *c_net;
++ possible_net_t c_net;
+
+ struct list_head c_map_item;
+ unsigned long c_map_queued;
+@@ -165,13 +165,13 @@ struct rds_connection {
+ static inline
+ struct net *rds_conn_net(struct rds_connection *conn)
+ {
+- return conn->c_net;
++ return read_pnet(&conn->c_net);
+ }
+
+ static inline
+ void rds_conn_net_set(struct rds_connection *conn, struct net *net)
+ {
+- conn->c_net = get_net(net);
++ write_pnet(&conn->c_net, net);
+ }
+
+ #define RDS_FLAG_CONG_BITMAP 0x01
+--- a/net/rds/tcp.c
++++ b/net/rds/tcp.c
+@@ -527,7 +527,7 @@ static void rds_tcp_kill_sock(struct net
+ rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
+ spin_lock_irq(&rds_tcp_conn_lock);
+ list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
+- struct net *c_net = tc->t_cpath->cp_conn->c_net;
++ struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
+
+ if (net != c_net || !tc->t_sock)
+ continue;
+@@ -586,7 +586,7 @@ static void rds_tcp_sysctl_reset(struct
+
+ spin_lock_irq(&rds_tcp_conn_lock);
+ list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
+- struct net *c_net = tc->t_cpath->cp_conn->c_net;
++ struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
+
+ if (net != c_net || !tc->t_sock)
+ continue;
--- /dev/null
+From ecca8f88da5c4260cc2bccfefd2a24976704c366 Mon Sep 17 00:00:00 2001
+From: Xin Long <lucien.xin@gmail.com>
+Date: Fri, 17 Nov 2017 14:11:11 +0800
+Subject: sctp: set frag_point in sctp_setsockopt_maxseg correctly
+
+From: Xin Long <lucien.xin@gmail.com>
+
+commit ecca8f88da5c4260cc2bccfefd2a24976704c366 upstream.
+
+Now in sctp_setsockopt_maxseg user_frag or frag_point can be set with
+val >= 8 and val <= SCTP_MAX_CHUNK_LEN. But both checks are incorrect.
+
+val >= 8 means frag_point can even be less than SCTP_DEFAULT_MINSEGMENT.
+Then in sctp_datamsg_from_user(), when it's value is greater than cookie
+echo len and trying to bundle with cookie echo chunk, the first_len will
+overflow.
+
+The worse case is when it's value is equal as cookie echo len, first_len
+becomes 0, it will go into a dead loop for fragment later on. In Hangbin
+syzkaller testing env, oom was even triggered due to consecutive memory
+allocation in that loop.
+
+Besides, SCTP_MAX_CHUNK_LEN is the max size of the whole chunk, it should
+deduct the data header for frag_point or user_frag check.
+
+This patch does a proper check with SCTP_DEFAULT_MINSEGMENT subtracting
+the sctphdr and datahdr, SCTP_MAX_CHUNK_LEN subtracting datahdr when
+setting frag_point via sockopt. It also improves sctp_setsockopt_maxseg
+codes.
+
+Suggested-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+Reported-by: Hangbin Liu <liuhangbin@gmail.com>
+Signed-off-by: Xin Long <lucien.xin@gmail.com>
+Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/net/sctp/sctp.h | 3 ++-
+ net/sctp/socket.c | 29 +++++++++++++++++++----------
+ 2 files changed, 21 insertions(+), 11 deletions(-)
+
+--- a/include/net/sctp/sctp.h
++++ b/include/net/sctp/sctp.h
+@@ -444,7 +444,8 @@ static inline int sctp_frag_point(const
+ if (asoc->user_frag)
+ frag = min_t(int, frag, asoc->user_frag);
+
+- frag = SCTP_TRUNC4(min_t(int, frag, SCTP_MAX_CHUNK_LEN));
++ frag = SCTP_TRUNC4(min_t(int, frag, SCTP_MAX_CHUNK_LEN -
++ sizeof(struct sctp_data_chunk)));
+
+ return frag;
+ }
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -3136,9 +3136,9 @@ static int sctp_setsockopt_mappedv4(stru
+ */
+ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen)
+ {
++ struct sctp_sock *sp = sctp_sk(sk);
+ struct sctp_assoc_value params;
+ struct sctp_association *asoc;
+- struct sctp_sock *sp = sctp_sk(sk);
+ int val;
+
+ if (optlen == sizeof(int)) {
+@@ -3154,26 +3154,35 @@ static int sctp_setsockopt_maxseg(struct
+ if (copy_from_user(¶ms, optval, optlen))
+ return -EFAULT;
+ val = params.assoc_value;
+- } else
++ } else {
+ return -EINVAL;
++ }
+
+- if ((val != 0) && ((val < 8) || (val > SCTP_MAX_CHUNK_LEN)))
+- return -EINVAL;
++ if (val) {
++ int min_len, max_len;
+
+- asoc = sctp_id2assoc(sk, params.assoc_id);
+- if (!asoc && params.assoc_id && sctp_style(sk, UDP))
+- return -EINVAL;
++ min_len = SCTP_DEFAULT_MINSEGMENT - sp->pf->af->net_header_len;
++ min_len -= sizeof(struct sctphdr) +
++ sizeof(struct sctp_data_chunk);
++
++ max_len = SCTP_MAX_CHUNK_LEN - sizeof(struct sctp_data_chunk);
+
++ if (val < min_len || val > max_len)
++ return -EINVAL;
++ }
++
++ asoc = sctp_id2assoc(sk, params.assoc_id);
+ if (asoc) {
+ if (val == 0) {
+- val = asoc->pathmtu;
+- val -= sp->pf->af->net_header_len;
++ val = asoc->pathmtu - sp->pf->af->net_header_len;
+ val -= sizeof(struct sctphdr) +
+- sizeof(struct sctp_data_chunk);
++ sizeof(struct sctp_data_chunk);
+ }
+ asoc->user_frag = val;
+ asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu);
+ } else {
++ if (params.assoc_id && sctp_style(sk, UDP))
++ return -EINVAL;
+ sp->user_frag = val;
+ }
+
--- /dev/null
+From ef28df55ac27e1e5cd122e19fa311d886d47a756 Mon Sep 17 00:00:00 2001
+From: Paul Moore <paul@paul-moore.com>
+Date: Tue, 28 Nov 2017 18:51:12 -0500
+Subject: selinux: ensure the context is NUL terminated in security_context_to_sid_core()
+
+From: Paul Moore <paul@paul-moore.com>
+
+commit ef28df55ac27e1e5cd122e19fa311d886d47a756 upstream.
+
+The syzbot/syzkaller automated tests found a problem in
+security_context_to_sid_core() during early boot (before we load the
+SELinux policy) where we could potentially feed context strings without
+NUL terminators into the strcmp() function.
+
+We already guard against this during normal operation (after the SELinux
+policy has been loaded) by making a copy of the context strings and
+explicitly adding a NUL terminator to the end. The patch extends this
+protection to the early boot case (no loaded policy) by moving the context
+copy earlier in security_context_to_sid_core().
+
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Paul Moore <paul@paul-moore.com>
+Reviewed-By: William Roberts <william.c.roberts@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ security/selinux/ss/services.c | 18 ++++++++----------
+ 1 file changed, 8 insertions(+), 10 deletions(-)
+
+--- a/security/selinux/ss/services.c
++++ b/security/selinux/ss/services.c
+@@ -1413,27 +1413,25 @@ static int security_context_to_sid_core(
+ if (!scontext_len)
+ return -EINVAL;
+
++ /* Copy the string to allow changes and ensure a NUL terminator */
++ scontext2 = kmemdup_nul(scontext, scontext_len, gfp_flags);
++ if (!scontext2)
++ return -ENOMEM;
++
+ if (!ss_initialized) {
+ int i;
+
+ for (i = 1; i < SECINITSID_NUM; i++) {
+- if (!strcmp(initial_sid_to_string[i], scontext)) {
++ if (!strcmp(initial_sid_to_string[i], scontext2)) {
+ *sid = i;
+- return 0;
++ goto out;
+ }
+ }
+ *sid = SECINITSID_KERNEL;
+- return 0;
++ goto out;
+ }
+ *sid = SECSID_NULL;
+
+- /* Copy the string so that we can modify the copy as we parse it. */
+- scontext2 = kmalloc(scontext_len + 1, gfp_flags);
+- if (!scontext2)
+- return -ENOMEM;
+- memcpy(scontext2, scontext, scontext_len);
+- scontext2[scontext_len] = 0;
+-
+ if (force) {
+ /* Save another copy for storing in uninterpreted form */
+ rc = -ENOMEM;
--- /dev/null
+From 4b14752ec4e0d87126e636384cf37c8dd9df157c Mon Sep 17 00:00:00 2001
+From: Paul Moore <paul@paul-moore.com>
+Date: Tue, 5 Dec 2017 17:17:43 -0500
+Subject: selinux: skip bounded transition processing if the policy isn't loaded
+
+From: Paul Moore <paul@paul-moore.com>
+
+commit 4b14752ec4e0d87126e636384cf37c8dd9df157c upstream.
+
+We can't do anything reasonable in security_bounded_transition() if we
+don't have a policy loaded, and in fact we could run into problems
+with some of the code inside expecting a policy. Fix these problems
+like we do many others in security/selinux/ss/services.c by checking
+to see if the policy is loaded (ss_initialized) and returning quickly
+if it isn't.
+
+Reported-by: syzbot <syzkaller-bugs@googlegroups.com>
+Signed-off-by: Paul Moore <paul@paul-moore.com>
+Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
+Reviewed-by: James Morris <james.l.morris@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ security/selinux/ss/services.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/security/selinux/ss/services.c
++++ b/security/selinux/ss/services.c
+@@ -867,6 +867,9 @@ int security_bounded_transition(u32 old_
+ int index;
+ int rc;
+
++ if (!ss_initialized)
++ return 0;
++
+ read_lock(&policy_rwlock);
+
+ rc = -EINVAL;
--- /dev/null
+usb-core-add-a-helper-function-to-check-the-validity-of-ep-type-in-urb.patch
+vhost-use-mutex_lock_nested-in-vhost_dev_lock_vqs.patch
+kcm-check-if-sk_user_data-already-set-in-kcm_attach.patch
+kcm-only-allow-tcp-sockets-to-be-attached-to-a-kcm-mux.patch
+bpf-mark-dst-unknown-on-inconsistent-s-u-bounds-adjustments.patch
+cfg80211-check-dev_set_name-return-value.patch
+mac80211_hwsim-validate-number-of-different-channels.patch
+esp-fix-gro-when-the-headers-not-fully-in-the-linear-part-of-the-skb.patch
+xfrm-don-t-call-xfrm_policy_cache_flush-while-holding-spinlock.patch
+xfrm-fix-rcu-usage-in-xfrm_get_type_offload.patch
+xfrm-skip-policies-marked-as-dead-while-rehashing.patch
+mm-vmscan-make-unregister_shrinker-no-op-if-register_shrinker-failed.patch
+kvm-x86-check-input-paging-mode-when-cs.l-is-set.patch
+rdma-netlink-fix-general-protection-fault.patch
+xfrm-fix-stack-out-of-bounds-read-on-socket-policy-lookup.patch
+xfrm-check-id-proto-in-validate_tmpl.patch
+sctp-set-frag_point-in-sctp_setsockopt_maxseg-correctly.patch
+blktrace-fix-unlocked-registration-of-tracepoints.patch
+dnotify-handle-errors-from-fsnotify_add_mark_locked-in-fcntl_dirnotify.patch
+drm-require-__gfp_nofail-for-the-legacy-drm_modeset_lock_all.patch
+alsa-line6-add-a-sanity-check-for-invalid-eps.patch
+alsa-caiaq-add-a-sanity-check-for-invalid-eps.patch
+alsa-bcd2000-add-a-sanity-check-for-invalid-eps.patch
+ptr_ring-fail-early-if-queue-occupies-more-than-kmalloc_max_size.patch
+ptr_ring-try-vmalloc-when-kmalloc-fails.patch
+selinux-ensure-the-context-is-nul-terminated-in-security_context_to_sid_core.patch
+selinux-skip-bounded-transition-processing-if-the-policy-isn-t-loaded.patch
+media-pvrusb2-properly-check-endpoint-types.patch
+crypto-x86-twofish-3way-fix-rbp-usage.patch
+staging-android-ion-add-__gfp_nowarn-for-system-contig-heap.patch
+staging-android-ion-switch-from-warn-to-pr_warn.patch
+blk_rq_map_user_iov-fix-error-override.patch
+kvm-x86-fix-escape-of-guest-dr6-to-the-host.patch
+kcov-detect-double-association-with-a-single-task.patch
+netfilter-x_tables-fix-int-overflow-in-xt_alloc_table_info.patch
+netfilter-x_tables-avoid-out-of-bounds-reads-in-xt_request_find_-match-target.patch
+netfilter-ipt_clusterip-fix-out-of-bounds-accesses-in-clusterip_tg_check.patch
+netfilter-on-sockopt-acquire-sock-lock-only-in-the-required-scope.patch
+netfilter-xt_cgroup-initialize-info-priv-in-cgroup_mt_check_v1.patch
+netfilter-xt_rateest-acquire-xt_rateest_mutex-for-hash-insert.patch
+rds-tcp-correctly-sequence-cleanup-on-netns-deletion.patch
+rds-tcp-atomically-purge-entries-from-rds_tcp_conn_list-during-netns-delete.patch
+net-avoid-skb_warn_bad_offload-on-is_err.patch
+net_sched-gen_estimator-fix-lockdep-splat.patch
--- /dev/null
+From 0c75f10312a35b149b2cebb1832316b35c2337ca Mon Sep 17 00:00:00 2001
+From: Laura Abbott <labbott@redhat.com>
+Date: Fri, 5 Jan 2018 11:14:08 -0800
+Subject: staging: android: ion: Add __GFP_NOWARN for system contig heap
+
+From: Laura Abbott <labbott@redhat.com>
+
+commit 0c75f10312a35b149b2cebb1832316b35c2337ca upstream.
+
+syzbot reported a warning from Ion:
+
+ WARNING: CPU: 1 PID: 3485 at mm/page_alloc.c:3926
+
+ ...
+ __alloc_pages_nodemask+0x9fb/0xd80 mm/page_alloc.c:4252
+ alloc_pages_current+0xb6/0x1e0 mm/mempolicy.c:2036
+ alloc_pages include/linux/gfp.h:492 [inline]
+ ion_system_contig_heap_allocate+0x40/0x2c0
+ drivers/staging/android/ion/ion_system_heap.c:374
+ ion_buffer_create drivers/staging/android/ion/ion.c:93 [inline]
+ ion_alloc+0x2c1/0x9e0 drivers/staging/android/ion/ion.c:420
+ ion_ioctl+0x26d/0x380 drivers/staging/android/ion/ion-ioctl.c:84
+ vfs_ioctl fs/ioctl.c:46 [inline]
+ do_vfs_ioctl+0x1b1/0x1520 fs/ioctl.c:686
+ SYSC_ioctl fs/ioctl.c:701 [inline]
+ SyS_ioctl+0x8f/0xc0 fs/ioctl.c:692
+
+This is a warning about attempting to allocate order > MAX_ORDER. This
+is coming from a userspace Ion allocation request. Since userspace is
+free to request however much memory it wants (and the kernel is free to
+deny its allocation), silence the allocation attempt with __GFP_NOWARN
+in case it fails.
+
+Reported-by: syzbot+76e7efc4748495855a4d@syzkaller.appspotmail.com
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Laura Abbott <labbott@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/android/ion/ion_system_heap.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/staging/android/ion/ion_system_heap.c
++++ b/drivers/staging/android/ion/ion_system_heap.c
+@@ -371,7 +371,7 @@ static int ion_system_contig_heap_alloca
+ unsigned long i;
+ int ret;
+
+- page = alloc_pages(low_order_gfp_flags, order);
++ page = alloc_pages(low_order_gfp_flags | __GFP_NOWARN, order);
+ if (!page)
+ return -ENOMEM;
+
--- /dev/null
+From e4e179a844f52e907e550f887d0a2171f1508af1 Mon Sep 17 00:00:00 2001
+From: Laura Abbott <labbott@redhat.com>
+Date: Fri, 5 Jan 2018 11:14:09 -0800
+Subject: staging: android: ion: Switch from WARN to pr_warn
+
+From: Laura Abbott <labbott@redhat.com>
+
+commit e4e179a844f52e907e550f887d0a2171f1508af1 upstream.
+
+Syzbot reported a warning with Ion:
+
+WARNING: CPU: 0 PID: 3502 at drivers/staging/android/ion/ion-ioctl.c:73 ion_ioctl+0x2db/0x380 drivers/staging/android/ion/ion-ioctl.c:73
+Kernel panic - not syncing: panic_on_warn set ...
+
+This is a warning that validation of the ioctl fields failed. This was
+deliberately added as a warning to make it very obvious to developers that
+something needed to be fixed. In reality, this is overkill and disturbs
+fuzzing. Switch to pr_warn for a message instead.
+
+Reported-by: syzbot+fa2d5f63ee5904a0115a@syzkaller.appspotmail.com
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Laura Abbott <labbott@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/android/ion/ion-ioctl.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/staging/android/ion/ion-ioctl.c
++++ b/drivers/staging/android/ion/ion-ioctl.c
+@@ -71,8 +71,10 @@ long ion_ioctl(struct file *filp, unsign
+ return -EFAULT;
+
+ ret = validate_ioctl_arg(cmd, &data);
+- if (WARN_ON_ONCE(ret))
++ if (ret) {
++ pr_warn_once("%s: ioctl validate failed\n", __func__);
+ return ret;
++ }
+
+ if (!(dir & _IOC_WRITE))
+ memset(&data, 0, sizeof(data));
--- /dev/null
+From e901b9873876ca30a09253731bd3a6b00c44b5b0 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Wed, 4 Oct 2017 16:15:59 +0200
+Subject: usb: core: Add a helper function to check the validity of EP type in URB
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit e901b9873876ca30a09253731bd3a6b00c44b5b0 upstream.
+
+This patch adds a new helper function to perform a sanity check of the
+given URB to see whether it contains a valid endpoint. It's a light-
+weight version of what usb_submit_urb() does, but without the kernel
+warning followed by the stack trace, just returns an error code.
+
+Especially for a driver that doesn't parse the descriptor but fills
+the URB with the fixed endpoint (e.g. some quirks for non-compliant
+devices), this kind of check is preferable at the probe phase before
+actually submitting the urb.
+
+Tested-by: Andrey Konovalov <andreyknvl@google.com>
+Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/core/urb.c | 30 ++++++++++++++++++++++++++----
+ include/linux/usb.h | 2 ++
+ 2 files changed, 28 insertions(+), 4 deletions(-)
+
+--- a/drivers/usb/core/urb.c
++++ b/drivers/usb/core/urb.c
+@@ -187,6 +187,31 @@ EXPORT_SYMBOL_GPL(usb_unanchor_urb);
+
+ /*-------------------------------------------------------------------*/
+
++static const int pipetypes[4] = {
++ PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT
++};
++
++/**
++ * usb_urb_ep_type_check - sanity check of endpoint in the given urb
++ * @urb: urb to be checked
++ *
++ * This performs a light-weight sanity check for the endpoint in the
++ * given urb. It returns 0 if the urb contains a valid endpoint, otherwise
++ * a negative error code.
++ */
++int usb_urb_ep_type_check(const struct urb *urb)
++{
++ const struct usb_host_endpoint *ep;
++
++ ep = usb_pipe_endpoint(urb->dev, urb->pipe);
++ if (!ep)
++ return -EINVAL;
++ if (usb_pipetype(urb->pipe) != pipetypes[usb_endpoint_type(&ep->desc)])
++ return -EINVAL;
++ return 0;
++}
++EXPORT_SYMBOL_GPL(usb_urb_ep_type_check);
++
+ /**
+ * usb_submit_urb - issue an asynchronous transfer request for an endpoint
+ * @urb: pointer to the urb describing the request
+@@ -326,9 +351,6 @@ EXPORT_SYMBOL_GPL(usb_unanchor_urb);
+ */
+ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
+ {
+- static int pipetypes[4] = {
+- PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT
+- };
+ int xfertype, max;
+ struct usb_device *dev;
+ struct usb_host_endpoint *ep;
+@@ -444,7 +466,7 @@ int usb_submit_urb(struct urb *urb, gfp_
+ */
+
+ /* Check that the pipe's type matches the endpoint's type */
+- if (usb_pipetype(urb->pipe) != pipetypes[xfertype])
++ if (usb_urb_ep_type_check(urb))
+ dev_WARN(&dev->dev, "BOGUS urb xfer, pipe %x != type %x\n",
+ usb_pipetype(urb->pipe), pipetypes[xfertype]);
+
+--- a/include/linux/usb.h
++++ b/include/linux/usb.h
+@@ -1729,6 +1729,8 @@ static inline int usb_urb_dir_out(struct
+ return (urb->transfer_flags & URB_DIR_MASK) == URB_DIR_OUT;
+ }
+
++int usb_urb_ep_type_check(const struct urb *urb);
++
+ void *usb_alloc_coherent(struct usb_device *dev, size_t size,
+ gfp_t mem_flags, dma_addr_t *dma);
+ void usb_free_coherent(struct usb_device *dev, size_t size,
--- /dev/null
+From e9cb4239134c860e5f92c75bf5321bd377bb505b Mon Sep 17 00:00:00 2001
+From: Jason Wang <jasowang@redhat.com>
+Date: Tue, 23 Jan 2018 17:27:25 +0800
+Subject: vhost: use mutex_lock_nested() in vhost_dev_lock_vqs()
+
+From: Jason Wang <jasowang@redhat.com>
+
+commit e9cb4239134c860e5f92c75bf5321bd377bb505b upstream.
+
+We used to call mutex_lock() in vhost_dev_lock_vqs() which tries to
+hold mutexes of all virtqueues. This may confuse lockdep to report a
+possible deadlock because of trying to hold locks belong to same
+class. Switch to use mutex_lock_nested() to avoid false positive.
+
+Fixes: 6b1e6cc7855b0 ("vhost: new device IOTLB API")
+Reported-by: syzbot+dbb7c1161485e61b0241@syzkaller.appspotmail.com
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+Acked-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/vhost/vhost.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -904,7 +904,7 @@ static void vhost_dev_lock_vqs(struct vh
+ {
+ int i = 0;
+ for (i = 0; i < d->nvqs; ++i)
+- mutex_lock(&d->vqs[i]->mutex);
++ mutex_lock_nested(&d->vqs[i]->mutex, i);
+ }
+
+ static void vhost_dev_unlock_vqs(struct vhost_dev *d)
--- /dev/null
+From 6a53b7593233ab9e4f96873ebacc0f653a55c3e1 Mon Sep 17 00:00:00 2001
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Mon, 27 Nov 2017 11:15:16 -0800
+Subject: xfrm: check id proto in validate_tmpl()
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+commit 6a53b7593233ab9e4f96873ebacc0f653a55c3e1 upstream.
+
+syzbot reported a kernel warning in xfrm_state_fini(), which
+indicates that we have entries left in the list
+net->xfrm.state_all whose proto is zero. And
+xfrm_id_proto_match() doesn't consider them as a match with
+IPSEC_PROTO_ANY in this case.
+
+Proto with value 0 is probably not a valid value, at least
+verify_newsa_info() doesn't consider it valid either.
+
+This patch fixes it by checking the proto value in
+validate_tmpl() and rejecting invalid ones, like what iproute2
+does in xfrm_xfrmproto_getbyname().
+
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Cc: Steffen Klassert <steffen.klassert@secunet.com>
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/xfrm/xfrm_user.c | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -1443,6 +1443,21 @@ static int validate_tmpl(int nr, struct
+ default:
+ return -EINVAL;
+ }
++
++ switch (ut[i].id.proto) {
++ case IPPROTO_AH:
++ case IPPROTO_ESP:
++ case IPPROTO_COMP:
++#if IS_ENABLED(CONFIG_IPV6)
++ case IPPROTO_ROUTING:
++ case IPPROTO_DSTOPTS:
++#endif
++ case IPSEC_PROTO_ANY:
++ break;
++ default:
++ return -EINVAL;
++ }
++
+ }
+
+ return 0;
--- /dev/null
+From b1bdcb59b64f806ef08d25a85c39ffb3ad841ce6 Mon Sep 17 00:00:00 2001
+From: Florian Westphal <fw@strlen.de>
+Date: Sat, 6 Jan 2018 01:13:08 +0100
+Subject: xfrm: don't call xfrm_policy_cache_flush while holding spinlock
+
+From: Florian Westphal <fw@strlen.de>
+
+commit b1bdcb59b64f806ef08d25a85c39ffb3ad841ce6 upstream.
+
+xfrm_policy_cache_flush can sleep, so it cannot be called while holding
+a spinlock. We could release the lock first, but I don't see why we need
+to invoke this function here in first place, the packet path won't reuse
+an xdst entry unless its still valid.
+
+While at it, add an annotation to xfrm_policy_cache_flush, it would
+have probably caught this bug sooner.
+
+Fixes: ec30d78c14a813 ("xfrm: add xdst pcpu cache")
+Reported-by: syzbot+e149f7d1328c26f9c12f@syzkaller.appspotmail.com
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/xfrm/xfrm_policy.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -975,8 +975,6 @@ int xfrm_policy_flush(struct net *net, u
+ }
+ if (!cnt)
+ err = -ESRCH;
+- else
+- xfrm_policy_cache_flush();
+ out:
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ return err;
+@@ -1738,6 +1736,8 @@ void xfrm_policy_cache_flush(void)
+ bool found = 0;
+ int cpu;
+
++ might_sleep();
++
+ local_bh_disable();
+ rcu_read_lock();
+ for_each_possible_cpu(cpu) {
--- /dev/null
+From 2f10a61cee8fdb9f8da90f5db687e1862b22cf06 Mon Sep 17 00:00:00 2001
+From: Sabrina Dubroca <sd@queasysnail.net>
+Date: Sun, 31 Dec 2017 16:18:56 +0100
+Subject: xfrm: fix rcu usage in xfrm_get_type_offload
+
+From: Sabrina Dubroca <sd@queasysnail.net>
+
+commit 2f10a61cee8fdb9f8da90f5db687e1862b22cf06 upstream.
+
+request_module can sleep, thus we cannot hold rcu_read_lock() while
+calling it. The function also jumps back and takes rcu_read_lock()
+again (in xfrm_state_get_afinfo()), resulting in an imbalance.
+
+This codepath is triggered whenever a new offloaded state is created.
+
+Fixes: ffdb5211da1c ("xfrm: Auto-load xfrm offload modules")
+Reported-by: syzbot+ca425f44816d749e8eb49755567a75ee48cf4a30@syzkaller.appspotmail.com
+Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
+Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/xfrm/xfrm_state.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/xfrm/xfrm_state.c
++++ b/net/xfrm/xfrm_state.c
+@@ -313,13 +313,14 @@ retry:
+ if ((type && !try_module_get(type->owner)))
+ type = NULL;
+
++ rcu_read_unlock();
++
+ if (!type && try_load) {
+ request_module("xfrm-offload-%d-%d", family, proto);
+ try_load = 0;
+ goto retry;
+ }
+
+- rcu_read_unlock();
+ return type;
+ }
+
--- /dev/null
+From ddc47e4404b58f03e98345398fb12d38fe291512 Mon Sep 17 00:00:00 2001
+From: Steffen Klassert <steffen.klassert@secunet.com>
+Date: Wed, 29 Nov 2017 06:53:55 +0100
+Subject: xfrm: Fix stack-out-of-bounds read on socket policy lookup.
+
+From: Steffen Klassert <steffen.klassert@secunet.com>
+
+commit ddc47e4404b58f03e98345398fb12d38fe291512 upstream.
+
+When we do tunnel or beet mode, we pass saddr and daddr from the
+template to xfrm_state_find(), this is ok. On transport mode,
+we pass the addresses from the flowi, assuming that the IP
+addresses (and address family) don't change during transformation.
+This assumption is wrong in the IPv4 mapped IPv6 case, packet
+is IPv4 and template is IPv6.
+
+Fix this by catching address family missmatches of the policy
+and the flow already before we do the lookup.
+
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/xfrm/xfrm_policy.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -1168,9 +1168,15 @@ static struct xfrm_policy *xfrm_sk_polic
+ again:
+ pol = rcu_dereference(sk->sk_policy[dir]);
+ if (pol != NULL) {
+- bool match = xfrm_selector_match(&pol->selector, fl, family);
++ bool match;
+ int err = 0;
+
++ if (pol->family != family) {
++ pol = NULL;
++ goto out;
++ }
++
++ match = xfrm_selector_match(&pol->selector, fl, family);
+ if (match) {
+ if ((sk->sk_mark & pol->mark.m) != pol->mark.v) {
+ pol = NULL;
--- /dev/null
+From 862591bf4f519d1b8d859af720fafeaebdd0162a Mon Sep 17 00:00:00 2001
+From: Florian Westphal <fw@strlen.de>
+Date: Wed, 27 Dec 2017 23:25:45 +0100
+Subject: xfrm: skip policies marked as dead while rehashing
+
+From: Florian Westphal <fw@strlen.de>
+
+commit 862591bf4f519d1b8d859af720fafeaebdd0162a upstream.
+
+syzkaller triggered following KASAN splat:
+
+BUG: KASAN: slab-out-of-bounds in xfrm_hash_rebuild+0xdbe/0xf00 net/xfrm/xfrm_policy.c:618
+read of size 2 at addr ffff8801c8e92fe4 by task kworker/1:1/23 [..]
+Workqueue: events xfrm_hash_rebuild [..]
+ __asan_report_load2_noabort+0x14/0x20 mm/kasan/report.c:428
+ xfrm_hash_rebuild+0xdbe/0xf00 net/xfrm/xfrm_policy.c:618
+ process_one_work+0xbbf/0x1b10 kernel/workqueue.c:2112
+ worker_thread+0x223/0x1990 kernel/workqueue.c:2246 [..]
+
+The reproducer triggers:
+1016 if (error) {
+1017 list_move_tail(&walk->walk.all, &x->all);
+1018 goto out;
+1019 }
+
+in xfrm_policy_walk() via pfkey (it sets tiny rcv space, dump
+callback returns -ENOBUFS).
+
+In this case, *walk is located the pfkey socket struct, so this socket
+becomes visible in the global policy list.
+
+It looks like this is intentional -- phony walker has walk.dead set to 1
+and all other places skip such "policies".
+
+Ccing original authors of the two commits that seem to expose this
+issue (first patch missed ->dead check, second patch adds pfkey
+sockets to policies dumper list).
+
+Fixes: 880a6fab8f6ba5b ("xfrm: configure policy hash table thresholds by netlink")
+Fixes: 12a169e7d8f4b1c ("ipsec: Put dumpers on the dump list")
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: Timo Teras <timo.teras@iki.fi>
+Cc: Christophe Gouault <christophe.gouault@6wind.com>
+Reported-by: syzbot <bot+c028095236fcb6f4348811565b75084c754dc729@syzkaller.appspotmail.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/xfrm/xfrm_policy.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -610,7 +610,8 @@ static void xfrm_hash_rebuild(struct wor
+
+ /* re-insert all policies by order of creation */
+ list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
+- if (xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) {
++ if (policy->walk.dead ||
++ xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) {
+ /* skip socket policies */
+ continue;
+ }