--- /dev/null
+From foo@baz Sun Nov 19 11:12:05 CET 2017
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Mon, 8 May 2017 00:04:09 +0200
+Subject: bpf: don't let ldimm64 leak map addresses on unprivileged
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+
+commit 0d0e57697f162da4aa218b5feafe614fb666db07 upstream.
+
+The patch fixes two things at once:
+
+1) It checks the env->allow_ptr_leaks and only prints the map address to
+ the log if we have the privileges to do so, otherwise it just dumps 0
+ as we would when kptr_restrict is enabled on %pK. Given the latter is
+ off by default and not every distro sets it, I don't want to rely on
+ this, hence the 0 by default for unprivileged.
+
+2) Printing of ldimm64 in the verifier log is currently broken in that
+ we don't print the full immediate, but only the 32 bit part of the
+ first insn part for ldimm64. Thus, fix this up as well; it's okay to
+ access, since we verified all ldimm64 earlier already (including just
+ constants) through replace_map_fd_with_map_ptr().
+
+Fixes: 1be7f75d1668 ("bpf: enable non-root eBPF programs")
+Fixes: cbd357008604 ("bpf: verifier (add ability to receive verification log)")
+Reported-by: Jann Horn <jannh@google.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+[bwh: Backported to 4.4: s/bpf_verifier_env/verifier_env/]
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/verifier.c | 21 ++++++++++++++++-----
+ 1 file changed, 16 insertions(+), 5 deletions(-)
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -313,7 +313,8 @@ static const char *const bpf_jmp_string[
+ [BPF_EXIT >> 4] = "exit",
+ };
+
+-static void print_bpf_insn(struct bpf_insn *insn)
++static void print_bpf_insn(const struct verifier_env *env,
++ const struct bpf_insn *insn)
+ {
+ u8 class = BPF_CLASS(insn->code);
+
+@@ -377,9 +378,19 @@ static void print_bpf_insn(struct bpf_in
+ insn->code,
+ bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
+ insn->src_reg, insn->imm);
+- } else if (BPF_MODE(insn->code) == BPF_IMM) {
+- verbose("(%02x) r%d = 0x%x\n",
+- insn->code, insn->dst_reg, insn->imm);
++ } else if (BPF_MODE(insn->code) == BPF_IMM &&
++ BPF_SIZE(insn->code) == BPF_DW) {
++ /* At this point, we already made sure that the second
++ * part of the ldimm64 insn is accessible.
++ */
++ u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
++ bool map_ptr = insn->src_reg == BPF_PSEUDO_MAP_FD;
++
++ if (map_ptr && !env->allow_ptr_leaks)
++ imm = 0;
++
++ verbose("(%02x) r%d = 0x%llx\n", insn->code,
++ insn->dst_reg, (unsigned long long)imm);
+ } else {
+ verbose("BUG_ld_%02x\n", insn->code);
+ return;
+@@ -1764,7 +1775,7 @@ static int do_check(struct verifier_env
+
+ if (log_level) {
+ verbose("%d: ", insn_idx);
+- print_bpf_insn(insn);
++ print_bpf_insn(env, insn);
+ }
+
+ if (class == BPF_ALU || class == BPF_ALU64) {
--- /dev/null
+From foo@baz Sun Nov 19 11:12:05 CET 2017
+From: Jan Kara <jack@suse.cz>
+Date: Sun, 24 Apr 2016 00:56:03 -0400
+Subject: ext4: fix data exposure after a crash
+
+From: Jan Kara <jack@suse.cz>
+
+
+commit 06bd3c36a733ac27962fea7d6f47168841376824 upstream.
+
+Huang has reported that in his powerfail testing he is seeing stale
+block contents in some of recently allocated blocks although he mounts
+ext4 in data=ordered mode. After some investigation I have found out
+that indeed when delayed allocation is used, we don't add inode to
+transaction's list of inodes needing flushing before commit. Originally
+we were doing that but commit f3b59291a69d removed the logic with a
+flawed argument that it is not needed.
+
+The problem is that although for delayed allocated blocks we write their
+contents immediately after allocating them, there is no guarantee that
+the IO scheduler or device doesn't reorder things and thus transaction
+allocating blocks and attaching them to inode can reach stable storage
+before actual block contents. Actually whenever we attach freshly
+allocated blocks to inode using a written extent, we should add inode to
+transaction's ordered inode list to make sure we properly wait for block
+contents to be written before committing the transaction. So that is
+what we do in this patch. This also handles other cases where stale data
+exposure was possible - like filling hole via mmap in
+data=ordered,nodelalloc mode.
+
+The only exception to the above rule are extending direct IO writes where
+blkdev_direct_IO() waits for IO to complete before increasing i_size and
+thus stale data exposure is not possible. For now we don't complicate
+the code with optimizing this special case since the overhead is pretty
+low. In case this is observed to be a performance problem we can always
+handle it using a special flag to ext4_map_blocks().
+
+Fixes: f3b59291a69d0b734be1fc8be489fef2dd846d3d
+Reported-by: "HUANG Weller (CM/ESW12-CN)" <Weller.Huang@cn.bosch.com>
+Tested-by: "HUANG Weller (CM/ESW12-CN)" <Weller.Huang@cn.bosch.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+[bwh: Backported to 4.4:
+ - Drop check for EXT4_GET_BLOCKS_ZERO flag
+ - Adjust context]
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/inode.c | 23 ++++++++++++++---------
+ 1 file changed, 14 insertions(+), 9 deletions(-)
+
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -658,6 +658,20 @@ has_zeroout:
+ ret = check_block_validity(inode, map);
+ if (ret != 0)
+ return ret;
++
++ /*
++ * Inodes with freshly allocated blocks where contents will be
++ * visible after transaction commit must be on transaction's
++ * ordered data list.
++ */
++ if (map->m_flags & EXT4_MAP_NEW &&
++ !(map->m_flags & EXT4_MAP_UNWRITTEN) &&
++ !IS_NOQUOTA(inode) &&
++ ext4_should_order_data(inode)) {
++ ret = ext4_jbd2_file_inode(handle, inode);
++ if (ret)
++ return ret;
++ }
+ }
+ return retval;
+ }
+@@ -1152,15 +1166,6 @@ static int ext4_write_end(struct file *f
+ int i_size_changed = 0;
+
+ trace_ext4_write_end(inode, pos, len, copied);
+- if (ext4_test_inode_state(inode, EXT4_STATE_ORDERED_MODE)) {
+- ret = ext4_jbd2_file_inode(handle, inode);
+- if (ret) {
+- unlock_page(page);
+- page_cache_release(page);
+- goto errout;
+- }
+- }
+-
+ if (ext4_has_inline_data(inode)) {
+ ret = ext4_write_inline_data_end(inode, pos, len,
+ copied, page);
--- /dev/null
+From foo@baz Sun Nov 19 11:12:05 CET 2017
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Wed, 7 Jun 2017 15:13:14 +0200
+Subject: KVM: x86: fix singlestepping over syscall
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+
+commit c8401dda2f0a00cd25c0af6a95ed50e478d25de4 upstream.
+
+TF is handled a bit differently for syscall and sysret, compared
+to the other instructions: TF is checked after the instruction completes,
+so that the OS can disable #DB at a syscall by adding TF to FMASK.
+When the sysret is executed the #DB is taken "as if" the syscall insn
+just completed.
+
+KVM emulates syscall so that it can trap 32-bit syscall on Intel processors.
+Fix the behavior, otherwise you could get #DB on a user stack which is not
+nice. This does not affect Linux guests, as they use an IST or task gate
+for #DB.
+
+This fixes CVE-2017-7518.
+
+Reported-by: Andy Lutomirski <luto@kernel.org>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+[bwh: Backported to 4.4:
+ - kvm_vcpu_check_singlestep() sets some flags differently
+ - Drop changes to kvm_skip_emulated_instruction()]
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/kvm_emulate.h | 1
+ arch/x86/kvm/emulate.c | 1
+ arch/x86/kvm/x86.c | 52 +++++++++++++++----------------------
+ 3 files changed, 24 insertions(+), 30 deletions(-)
+
+--- a/arch/x86/include/asm/kvm_emulate.h
++++ b/arch/x86/include/asm/kvm_emulate.h
+@@ -296,6 +296,7 @@ struct x86_emulate_ctxt {
+
+ bool perm_ok; /* do not check permissions if true */
+ bool ud; /* inject an #UD if host doesn't support insn */
++ bool tf; /* TF value before instruction (after for syscall/sysret) */
+
+ bool have_exception;
+ struct x86_exception exception;
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -2726,6 +2726,7 @@ static int em_syscall(struct x86_emulate
+ ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
+ }
+
++ ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
+ return X86EMUL_CONTINUE;
+ }
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -5095,6 +5095,8 @@ static void init_emulate_ctxt(struct kvm
+ kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
+
+ ctxt->eflags = kvm_get_rflags(vcpu);
++ ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
++
+ ctxt->eip = kvm_rip_read(vcpu);
+ ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
+ (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 :
+@@ -5315,37 +5317,26 @@ static int kvm_vcpu_check_hw_bp(unsigned
+ return dr6;
+ }
+
+-static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r)
++static void kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu, int *r)
+ {
+ struct kvm_run *kvm_run = vcpu->run;
+
+- /*
+- * rflags is the old, "raw" value of the flags. The new value has
+- * not been saved yet.
+- *
+- * This is correct even for TF set by the guest, because "the
+- * processor will not generate this exception after the instruction
+- * that sets the TF flag".
+- */
+- if (unlikely(rflags & X86_EFLAGS_TF)) {
+- if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
+- kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 |
+- DR6_RTM;
+- kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
+- kvm_run->debug.arch.exception = DB_VECTOR;
+- kvm_run->exit_reason = KVM_EXIT_DEBUG;
+- *r = EMULATE_USER_EXIT;
+- } else {
+- vcpu->arch.emulate_ctxt.eflags &= ~X86_EFLAGS_TF;
+- /*
+- * "Certain debug exceptions may clear bit 0-3. The
+- * remaining contents of the DR6 register are never
+- * cleared by the processor".
+- */
+- vcpu->arch.dr6 &= ~15;
+- vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
+- kvm_queue_exception(vcpu, DB_VECTOR);
+- }
++ if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
++ kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | DR6_RTM;
++ kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
++ kvm_run->debug.arch.exception = DB_VECTOR;
++ kvm_run->exit_reason = KVM_EXIT_DEBUG;
++ *r = EMULATE_USER_EXIT;
++ } else {
++ vcpu->arch.emulate_ctxt.eflags &= ~X86_EFLAGS_TF;
++ /*
++ * "Certain debug exceptions may clear bit 0-3. The
++ * remaining contents of the DR6 register are never
++ * cleared by the processor".
++ */
++ vcpu->arch.dr6 &= ~15;
++ vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
++ kvm_queue_exception(vcpu, DB_VECTOR);
+ }
+ }
+
+@@ -5500,8 +5491,9 @@ restart:
+ toggle_interruptibility(vcpu, ctxt->interruptibility);
+ vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
+ kvm_rip_write(vcpu, ctxt->eip);
+- if (r == EMULATE_DONE)
+- kvm_vcpu_check_singlestep(vcpu, rflags, &r);
++ if (r == EMULATE_DONE &&
++ (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
++ kvm_vcpu_do_singlestep(vcpu, &r);
+ if (!ctxt->have_exception ||
+ exception_type(ctxt->exception.vector) == EXCPT_TRAP)
+ __kvm_set_rflags(vcpu, ctxt->eflags);
--- /dev/null
+From eb0c19942288569e0ae492476534d5a485fb8ab4 Mon Sep 17 00:00:00 2001
+From: Andrey Konovalov <andreyknvl@google.com>
+Date: Thu, 2 Nov 2017 10:38:21 -0400
+Subject: media: dib0700: fix invalid dvb_detach argument
+
+From: Andrey Konovalov <andreyknvl@google.com>
+
+commit eb0c19942288569e0ae492476534d5a485fb8ab4 upstream.
+
+dvb_detach(arg) calls symbol_put_addr(arg), where arg should be a pointer
+to a function. Right now a pointer to state->dib7000p_ops is passed to
+dvb_detach(), which causes a BUG() in symbol_put_addr() as discovered by
+syzkaller. Pass state->dib7000p_ops.set_wbd_ref instead.
+
+------------[ cut here ]------------
+kernel BUG at kernel/module.c:1081!
+invalid opcode: 0000 [#1] PREEMPT SMP KASAN
+Modules linked in:
+CPU: 1 PID: 1151 Comm: kworker/1:1 Tainted: G W
+4.14.0-rc1-42251-gebb2c2437d80 #224
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011
+Workqueue: usb_hub_wq hub_event
+task: ffff88006a336300 task.stack: ffff88006a7c8000
+RIP: 0010:symbol_put_addr+0x54/0x60 kernel/module.c:1083
+RSP: 0018:ffff88006a7ce210 EFLAGS: 00010246
+RAX: 0000000000000000 RBX: ffff880062a8d190 RCX: 0000000000000000
+RDX: dffffc0000000020 RSI: ffffffff85876d60 RDI: ffff880062a8d190
+RBP: ffff88006a7ce218 R08: 1ffff1000d4f9c12 R09: 1ffff1000d4f9ae4
+R10: 1ffff1000d4f9bed R11: 0000000000000000 R12: ffff880062a8d180
+R13: 00000000ffffffed R14: ffff880062a8d190 R15: ffff88006947c000
+FS: 0000000000000000(0000) GS:ffff88006c900000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007f6416532000 CR3: 00000000632f5000 CR4: 00000000000006e0
+Call Trace:
+ stk7070p_frontend_attach+0x515/0x610
+drivers/media/usb/dvb-usb/dib0700_devices.c:1013
+ dvb_usb_adapter_frontend_init+0x32b/0x660
+drivers/media/usb/dvb-usb/dvb-usb-dvb.c:286
+ dvb_usb_adapter_init drivers/media/usb/dvb-usb/dvb-usb-init.c:86
+ dvb_usb_init drivers/media/usb/dvb-usb/dvb-usb-init.c:162
+ dvb_usb_device_init+0xf70/0x17f0 drivers/media/usb/dvb-usb/dvb-usb-init.c:277
+ dib0700_probe+0x171/0x5a0 drivers/media/usb/dvb-usb/dib0700_core.c:886
+ usb_probe_interface+0x35d/0x8e0 drivers/usb/core/driver.c:361
+ really_probe drivers/base/dd.c:413
+ driver_probe_device+0x610/0xa00 drivers/base/dd.c:557
+ __device_attach_driver+0x230/0x290 drivers/base/dd.c:653
+ bus_for_each_drv+0x161/0x210 drivers/base/bus.c:463
+ __device_attach+0x26e/0x3d0 drivers/base/dd.c:710
+ device_initial_probe+0x1f/0x30 drivers/base/dd.c:757
+ bus_probe_device+0x1eb/0x290 drivers/base/bus.c:523
+ device_add+0xd0b/0x1660 drivers/base/core.c:1835
+ usb_set_configuration+0x104e/0x1870 drivers/usb/core/message.c:1932
+ generic_probe+0x73/0xe0 drivers/usb/core/generic.c:174
+ usb_probe_device+0xaf/0xe0 drivers/usb/core/driver.c:266
+ really_probe drivers/base/dd.c:413
+ driver_probe_device+0x610/0xa00 drivers/base/dd.c:557
+ __device_attach_driver+0x230/0x290 drivers/base/dd.c:653
+ bus_for_each_drv+0x161/0x210 drivers/base/bus.c:463
+ __device_attach+0x26e/0x3d0 drivers/base/dd.c:710
+ device_initial_probe+0x1f/0x30 drivers/base/dd.c:757
+ bus_probe_device+0x1eb/0x290 drivers/base/bus.c:523
+ device_add+0xd0b/0x1660 drivers/base/core.c:1835
+ usb_new_device+0x7b8/0x1020 drivers/usb/core/hub.c:2457
+ hub_port_connect drivers/usb/core/hub.c:4903
+ hub_port_connect_change drivers/usb/core/hub.c:5009
+ port_event drivers/usb/core/hub.c:5115
+ hub_event+0x194d/0x3740 drivers/usb/core/hub.c:5195
+ process_one_work+0xc7f/0x1db0 kernel/workqueue.c:2119
+ worker_thread+0x221/0x1850 kernel/workqueue.c:2253
+ kthread+0x3a1/0x470 kernel/kthread.c:231
+ ret_from_fork+0x2a/0x40 arch/x86/entry/entry_64.S:431
+Code: ff ff 48 85 c0 74 24 48 89 c7 e8 48 ea ff ff bf 01 00 00 00 e8
+de 20 e3 ff 65 8b 05 b7 2f c2 7e 85 c0 75 c9 e8 f9 0b c1 ff eb c2 <0f>
+0b 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 44 00 00 48 b8 00 00
+RIP: symbol_put_addr+0x54/0x60 RSP: ffff88006a7ce210
+---[ end trace b75b357739e7e116 ]---
+
+Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
+Cc: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/media/usb/dvb-usb/dib0700_devices.c | 24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
+
+--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
++++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
+@@ -292,7 +292,7 @@ static int stk7700P2_frontend_attach(str
+ stk7700d_dib7000p_mt2266_config)
+ != 0) {
+ err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__);
+- dvb_detach(&state->dib7000p_ops);
++ dvb_detach(state->dib7000p_ops.set_wbd_ref);
+ return -ENODEV;
+ }
+ }
+@@ -326,7 +326,7 @@ static int stk7700d_frontend_attach(stru
+ stk7700d_dib7000p_mt2266_config)
+ != 0) {
+ err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__);
+- dvb_detach(&state->dib7000p_ops);
++ dvb_detach(state->dib7000p_ops.set_wbd_ref);
+ return -ENODEV;
+ }
+ }
+@@ -479,7 +479,7 @@ static int stk7700ph_frontend_attach(str
+ &stk7700ph_dib7700_xc3028_config) != 0) {
+ err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n",
+ __func__);
+- dvb_detach(&state->dib7000p_ops);
++ dvb_detach(state->dib7000p_ops.set_wbd_ref);
+ return -ENODEV;
+ }
+
+@@ -1010,7 +1010,7 @@ static int stk7070p_frontend_attach(stru
+ &dib7070p_dib7000p_config) != 0) {
+ err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n",
+ __func__);
+- dvb_detach(&state->dib7000p_ops);
++ dvb_detach(state->dib7000p_ops.set_wbd_ref);
+ return -ENODEV;
+ }
+
+@@ -1068,7 +1068,7 @@ static int stk7770p_frontend_attach(stru
+ &dib7770p_dib7000p_config) != 0) {
+ err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n",
+ __func__);
+- dvb_detach(&state->dib7000p_ops);
++ dvb_detach(state->dib7000p_ops.set_wbd_ref);
+ return -ENODEV;
+ }
+
+@@ -3036,7 +3036,7 @@ static int nim7090_frontend_attach(struc
+
+ if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 0x10, &nim7090_dib7000p_config) != 0) {
+ err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__);
+- dvb_detach(&state->dib7000p_ops);
++ dvb_detach(state->dib7000p_ops.set_wbd_ref);
+ return -ENODEV;
+ }
+ adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, 0x80, &nim7090_dib7000p_config);
+@@ -3089,7 +3089,7 @@ static int tfe7090pvr_frontend0_attach(s
+ /* initialize IC 0 */
+ if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 0x20, &tfe7090pvr_dib7000p_config[0]) != 0) {
+ err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__);
+- dvb_detach(&state->dib7000p_ops);
++ dvb_detach(state->dib7000p_ops.set_wbd_ref);
+ return -ENODEV;
+ }
+
+@@ -3119,7 +3119,7 @@ static int tfe7090pvr_frontend1_attach(s
+ i2c = state->dib7000p_ops.get_i2c_master(adap->dev->adapter[0].fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_6_7, 1);
+ if (state->dib7000p_ops.i2c_enumeration(i2c, 1, 0x10, &tfe7090pvr_dib7000p_config[1]) != 0) {
+ err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__);
+- dvb_detach(&state->dib7000p_ops);
++ dvb_detach(state->dib7000p_ops.set_wbd_ref);
+ return -ENODEV;
+ }
+
+@@ -3194,7 +3194,7 @@ static int tfe7790p_frontend_attach(stru
+ 1, 0x10, &tfe7790p_dib7000p_config) != 0) {
+ err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n",
+ __func__);
+- dvb_detach(&state->dib7000p_ops);
++ dvb_detach(state->dib7000p_ops.set_wbd_ref);
+ return -ENODEV;
+ }
+ adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap,
+@@ -3289,7 +3289,7 @@ static int stk7070pd_frontend_attach0(st
+ stk7070pd_dib7000p_config) != 0) {
+ err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n",
+ __func__);
+- dvb_detach(&state->dib7000p_ops);
++ dvb_detach(state->dib7000p_ops.set_wbd_ref);
+ return -ENODEV;
+ }
+
+@@ -3364,7 +3364,7 @@ static int novatd_frontend_attach(struct
+ stk7070pd_dib7000p_config) != 0) {
+ err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n",
+ __func__);
+- dvb_detach(&state->dib7000p_ops);
++ dvb_detach(state->dib7000p_ops.set_wbd_ref);
+ return -ENODEV;
+ }
+ }
+@@ -3600,7 +3600,7 @@ static int pctv340e_frontend_attach(stru
+
+ if (state->dib7000p_ops.dib7000pc_detection(&adap->dev->i2c_adap) == 0) {
+ /* Demodulator not found for some reason? */
+- dvb_detach(&state->dib7000p_ops);
++ dvb_detach(state->dib7000p_ops.set_wbd_ref);
+ return -ENODEV;
+ }
+
--- /dev/null
+From 58fd55e838276a0c13d1dc7c387f90f25063cbf3 Mon Sep 17 00:00:00 2001
+From: Arvind Yadav <arvind.yadav.cs@gmail.com>
+Date: Mon, 9 Oct 2017 20:14:48 +0200
+Subject: media: imon: Fix null-ptr-deref in imon_probe
+
+From: Arvind Yadav <arvind.yadav.cs@gmail.com>
+
+commit 58fd55e838276a0c13d1dc7c387f90f25063cbf3 upstream.
+
+It seems that the return value of usb_ifnum_to_if() can be NULL and
+needs to be checked.
+
+Signed-off-by: Arvind Yadav <arvind.yadav.cs@gmail.com>
+Tested-by: Andrey Konovalov <andreyknvl@google.com>
+Signed-off-by: Sean Young <sean@mess.org>
+Signed-off-by: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+Cc: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/media/rc/imon.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/media/rc/imon.c
++++ b/drivers/media/rc/imon.c
+@@ -2419,6 +2419,11 @@ static int imon_probe(struct usb_interfa
+ mutex_lock(&driver_lock);
+
+ first_if = usb_ifnum_to_if(usbdev, 0);
++ if (!first_if) {
++ ret = -ENODEV;
++ goto fail;
++ }
++
+ first_if_ctx = usb_get_intfdata(first_if);
+
+ if (ifnum == 0) {
--- /dev/null
+From foo@baz Sun Nov 19 11:12:05 CET 2017
+From: Bjørn Mork <bjorn@mork.no>
+Date: Mon, 6 Nov 2017 15:37:22 +0100
+Subject: net: cdc_ether: fix divide by 0 on bad descriptors
+
+From: Bjørn Mork <bjorn@mork.no>
+
+
+commit 2cb80187ba065d7decad7c6614e35e07aec8a974 upstream.
+
+Setting dev->hard_mtu to 0 will cause a divide error in
+usbnet_probe. Protect against devices with bogus CDC Ethernet
+functional descriptors by ignoring a zero wMaxSegmentSize.
+
+Signed-off-by: Bjørn Mork <bjorn@mork.no>
+Acked-by: Oliver Neukum <oneukum@suse.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/cdc_ether.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/usb/cdc_ether.c
++++ b/drivers/net/usb/cdc_ether.c
+@@ -221,7 +221,7 @@ skip:
+ goto bad_desc;
+ }
+
+- if (header.usb_cdc_ether_desc) {
++ if (header.usb_cdc_ether_desc && info->ether->wMaxSegmentSize) {
+ dev->hard_mtu = le16_to_cpu(info->ether->wMaxSegmentSize);
+ /* because of Zaurus, we may be ignoring the host
+ * side link address we were given.
--- /dev/null
+From foo@baz Sun Nov 19 11:12:05 CET 2017
+From: Bjørn Mork <bjorn@mork.no>
+Date: Mon, 6 Nov 2017 15:32:18 +0100
+Subject: net: qmi_wwan: fix divide by 0 on bad descriptors
+
+From: Bjørn Mork <bjorn@mork.no>
+
+
+commit 7fd078337201cf7468f53c3d9ef81ff78cb6df3b upstream.
+
+A CDC Ethernet functional descriptor with wMaxSegmentSize = 0 will
+cause a divide error in usbnet_probe:
+
+divide error: 0000 [#1] PREEMPT SMP KASAN
+Modules linked in:
+CPU: 0 PID: 24 Comm: kworker/0:1 Not tainted 4.14.0-rc8-44453-g1fdc1a82c34f #56
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011
+Workqueue: usb_hub_wq hub_event
+task: ffff88006bef5c00 task.stack: ffff88006bf60000
+RIP: 0010:usbnet_update_max_qlen+0x24d/0x390 drivers/net/usb/usbnet.c:355
+RSP: 0018:ffff88006bf67508 EFLAGS: 00010246
+RAX: 00000000000163c8 RBX: ffff8800621fce40 RCX: ffff8800621fcf34
+RDX: 0000000000000000 RSI: ffffffff837ecb7a RDI: ffff8800621fcf34
+RBP: ffff88006bf67520 R08: ffff88006bef5c00 R09: ffffed000c43f881
+R10: ffffed000c43f880 R11: ffff8800621fc406 R12: 0000000000000003
+R13: ffffffff85c71de0 R14: 0000000000000000 R15: 0000000000000000
+FS: 0000000000000000(0000) GS:ffff88006ca00000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007ffe9c0d6dac CR3: 00000000614f4000 CR4: 00000000000006f0
+Call Trace:
+ usbnet_probe+0x18b5/0x2790 drivers/net/usb/usbnet.c:1783
+ qmi_wwan_probe+0x133/0x220 drivers/net/usb/qmi_wwan.c:1338
+ usb_probe_interface+0x324/0x940 drivers/usb/core/driver.c:361
+ really_probe drivers/base/dd.c:413
+ driver_probe_device+0x522/0x740 drivers/base/dd.c:557
+
+Fix by simply ignoring the bogus descriptor, as it is optional
+for QMI devices anyway.
+
+Fixes: 423ce8caab7e ("net: usb: qmi_wwan: New driver for Huawei QMI based WWAN devices")
+Reported-by: Andrey Konovalov <andreyknvl@google.com>
+Signed-off-by: Bjørn Mork <bjorn@mork.no>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/qmi_wwan.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -262,7 +262,7 @@ static int qmi_wwan_bind(struct usbnet *
+ }
+
+ /* errors aren't fatal - we can live with the dynamic address */
+- if (cdc_ether) {
++ if (cdc_ether && cdc_ether->wMaxSegmentSize) {
+ dev->hard_mtu = le16_to_cpu(cdc_ether->wMaxSegmentSize);
+ usbnet_get_ethernet_addr(dev, cdc_ether->iMACAddress);
+ }
--- /dev/null
+From foo@baz Sun Nov 19 11:12:05 CET 2017
+From: Xin Long <lucien.xin@gmail.com>
+Date: Tue, 17 Oct 2017 23:26:10 +0800
+Subject: sctp: do not peel off an assoc from one netns to another one
+
+From: Xin Long <lucien.xin@gmail.com>
+
+
+commit df80cd9b28b9ebaa284a41df611dbf3a2d05ca74 upstream.
+
+Now when peeling off an association to the sock in another netns, all
+transports in this assoc are not to be rehashed and keep use the old
+key in hashtable.
+
+As a transport uses sk->net as the hash key to insert into hashtable,
+it would miss removing these transports from hashtable due to the new
+netns when closing the sock and all transports are being freeed, then
+later an use-after-free issue could be caused when looking up an asoc
+and dereferencing those transports.
+
+This is a very old issue since very beginning, ChunYu found it with
+syzkaller fuzz testing with this series:
+
+ socket$inet6_sctp()
+ bind$inet6()
+ sendto$inet6()
+ unshare(0x40000000)
+ getsockopt$inet_sctp6_SCTP_GET_ASSOC_ID_LIST()
+ getsockopt$inet_sctp6_SCTP_SOCKOPT_PEELOFF()
+
+This patch is to block this call when peeling one assoc off from one
+netns to another one, so that the netns of all transport would not
+go out-sync with the key in hashtable.
+
+Note that this patch didn't fix it by rehashing transports, as it's
+difficult to handle the situation when the tuple is already in use
+in the new netns. Besides, no one would like to peel off one assoc
+to another netns, considering ipaddrs, ifaces, etc. are usually
+different.
+
+Reported-by: ChunYu Wang <chunwang@redhat.com>
+Signed-off-by: Xin Long <lucien.xin@gmail.com>
+Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+Acked-by: Neil Horman <nhorman@tuxdriver.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sctp/socket.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -4453,6 +4453,10 @@ int sctp_do_peeloff(struct sock *sk, sct
+ struct socket *sock;
+ int err = 0;
+
++ /* Do not peel off from one netns to another one. */
++ if (!net_eq(current->nsproxy->net_ns, sock_net(sk)))
++ return -EINVAL;
++
+ if (!asoc)
+ return -EINVAL;
+
--- /dev/null
+From foo@baz Sun Nov 19 11:12:05 CET 2017
+From: Jan Beulich <jbeulich@suse.com>
+Date: Tue, 13 Jun 2017 16:28:27 -0400
+Subject: xen-blkback: don't leak stack data via response ring
+
+From: Jan Beulich <jbeulich@suse.com>
+
+
+commit 089bc0143f489bd3a4578bdff5f4ca68fb26f341 upstream.
+
+Rather than constructing a local structure instance on the stack, fill
+the fields directly on the shared ring, just like other backends do.
+Build on the fact that all response structure flavors are actually
+identical (the old code did make this assumption too).
+
+This is XSA-216.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+[bwh: Backported to 4.4: adjust context]
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/block/xen-blkback/blkback.c | 23 ++++++++++++-----------
+ drivers/block/xen-blkback/common.h | 25 +++++--------------------
+ 2 files changed, 17 insertions(+), 31 deletions(-)
+
+--- a/drivers/block/xen-blkback/blkback.c
++++ b/drivers/block/xen-blkback/blkback.c
+@@ -1407,33 +1407,34 @@ static int dispatch_rw_block_io(struct x
+ static void make_response(struct xen_blkif *blkif, u64 id,
+ unsigned short op, int st)
+ {
+- struct blkif_response resp;
++ struct blkif_response *resp;
+ unsigned long flags;
+ union blkif_back_rings *blk_rings = &blkif->blk_rings;
+ int notify;
+
+- resp.id = id;
+- resp.operation = op;
+- resp.status = st;
+-
+ spin_lock_irqsave(&blkif->blk_ring_lock, flags);
+ /* Place on the response ring for the relevant domain. */
+ switch (blkif->blk_protocol) {
+ case BLKIF_PROTOCOL_NATIVE:
+- memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
+- &resp, sizeof(resp));
++ resp = RING_GET_RESPONSE(&blk_rings->native,
++ blk_rings->native.rsp_prod_pvt);
+ break;
+ case BLKIF_PROTOCOL_X86_32:
+- memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
+- &resp, sizeof(resp));
++ resp = RING_GET_RESPONSE(&blk_rings->x86_32,
++ blk_rings->x86_32.rsp_prod_pvt);
+ break;
+ case BLKIF_PROTOCOL_X86_64:
+- memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
+- &resp, sizeof(resp));
++ resp = RING_GET_RESPONSE(&blk_rings->x86_64,
++ blk_rings->x86_64.rsp_prod_pvt);
+ break;
+ default:
+ BUG();
+ }
++
++ resp->id = id;
++ resp->operation = op;
++ resp->status = st;
++
+ blk_rings->common.rsp_prod_pvt++;
+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
+ spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
+--- a/drivers/block/xen-blkback/common.h
++++ b/drivers/block/xen-blkback/common.h
+@@ -74,9 +74,8 @@ extern unsigned int xen_blkif_max_ring_o
+ struct blkif_common_request {
+ char dummy;
+ };
+-struct blkif_common_response {
+- char dummy;
+-};
++
++/* i386 protocol version */
+
+ struct blkif_x86_32_request_rw {
+ uint8_t nr_segments; /* number of segments */
+@@ -128,14 +127,6 @@ struct blkif_x86_32_request {
+ } u;
+ } __attribute__((__packed__));
+
+-/* i386 protocol version */
+-#pragma pack(push, 4)
+-struct blkif_x86_32_response {
+- uint64_t id; /* copied from request */
+- uint8_t operation; /* copied from request */
+- int16_t status; /* BLKIF_RSP_??? */
+-};
+-#pragma pack(pop)
+ /* x86_64 protocol version */
+
+ struct blkif_x86_64_request_rw {
+@@ -192,18 +183,12 @@ struct blkif_x86_64_request {
+ } u;
+ } __attribute__((__packed__));
+
+-struct blkif_x86_64_response {
+- uint64_t __attribute__((__aligned__(8))) id;
+- uint8_t operation; /* copied from request */
+- int16_t status; /* BLKIF_RSP_??? */
+-};
+-
+ DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
+- struct blkif_common_response);
++ struct blkif_response);
+ DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
+- struct blkif_x86_32_response);
++ struct blkif_response __packed);
+ DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
+- struct blkif_x86_64_response);
++ struct blkif_response);
+
+ union blkif_back_rings {
+ struct blkif_back_ring native;