--- /dev/null
+From d29216842a85c7970c536108e093963f02714498 Mon Sep 17 00:00:00 2001
+From: "Eric W. Biederman" <ebiederm@xmission.com>
+Date: Wed, 28 Sep 2016 00:27:17 -0500
+Subject: mnt: Add a per mount namespace limit on the number of mounts
+
+From: Eric W. Biederman <ebiederm@xmission.com>
+
+commit d29216842a85c7970c536108e093963f02714498 upstream.
+
+CAI Qian <caiqian@redhat.com> pointed out that the semantics
+of shared subtrees make it possible to create an exponentially
+increasing number of mounts in a mount namespace.
+
+ mkdir /tmp/1 /tmp/2
+ mount --make-rshared /
+ for i in $(seq 1 20) ; do mount --bind /tmp/1 /tmp/2 ; done
+
+Will create create 2^20 or 1048576 mounts, which is a practical problem
+as some people have managed to hit this by accident.
+
+As such CVE-2016-6213 was assigned.
+
+Ian Kent <raven@themaw.net> described the situation for autofs users
+as follows:
+
+> The number of mounts for direct mount maps is usually not very large because of
+> the way they are implemented, large direct mount maps can have performance
+> problems. There can be anywhere from a few (likely case a few hundred) to less
+> than 10000, plus mounts that have been triggered and not yet expired.
+>
+> Indirect mounts have one autofs mount at the root plus the number of mounts that
+> have been triggered and not yet expired.
+>
+> The number of autofs indirect map entries can range from a few to the common
+> case of several thousand and in rare cases up to between 30000 and 50000. I've
+> not heard of people with maps larger than 50000 entries.
+>
+> The larger the number of map entries the greater the possibility for a large
+> number of active mounts so it's not hard to expect cases of a 1000 or somewhat
+> more active mounts.
+
+So I am setting the default number of mounts allowed per mount
+namespace at 100,000. This is more than enough for any use case I
+know of, but small enough to quickly stop an exponential increase
+in mounts. Which should be perfect to catch misconfigurations and
+malfunctioning programs.
+
+For anyone who needs a higher limit this can be changed by writing
+to the new /proc/sys/fs/mount-max sysctl.
+
+Tested-by: CAI Qian <caiqian@redhat.com>
+Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
+[bwh: Backported to 4.4: adjust context]
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/sysctl/fs.txt | 7 ++++++
+ fs/mount.h | 2 +
+ fs/namespace.c | 50 +++++++++++++++++++++++++++++++++++++++++++-
+ fs/pnode.c | 2 -
+ fs/pnode.h | 1
+ include/linux/mount.h | 2 +
+ kernel/sysctl.c | 9 +++++++
+ 7 files changed, 71 insertions(+), 2 deletions(-)
+
+--- a/Documentation/sysctl/fs.txt
++++ b/Documentation/sysctl/fs.txt
+@@ -265,6 +265,13 @@ aio-nr can grow to.
+
+ ==============================================================
+
++mount-max:
++
++This denotes the maximum number of mounts that may exist
++in a mount namespace.
++
++==============================================================
++
+
+ 2. /proc/sys/fs/binfmt_misc
+ ----------------------------------------------------------
+--- a/fs/mount.h
++++ b/fs/mount.h
+@@ -13,6 +13,8 @@ struct mnt_namespace {
+ u64 seq; /* Sequence number to prevent loops */
+ wait_queue_head_t poll;
+ u64 event;
++ unsigned int mounts; /* # of mounts in the namespace */
++ unsigned int pending_mounts;
+ };
+
+ struct mnt_pcp {
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -27,6 +27,9 @@
+ #include "pnode.h"
+ #include "internal.h"
+
++/* Maximum number of mounts in a mount namespace */
++unsigned int sysctl_mount_max __read_mostly = 100000;
++
+ static unsigned int m_hash_mask __read_mostly;
+ static unsigned int m_hash_shift __read_mostly;
+ static unsigned int mp_hash_mask __read_mostly;
+@@ -925,6 +928,9 @@ static void commit_tree(struct mount *mn
+
+ list_splice(&head, n->list.prev);
+
++ n->mounts += n->pending_mounts;
++ n->pending_mounts = 0;
++
+ __attach_mnt(mnt, parent);
+ touch_mnt_namespace(n);
+ }
+@@ -1445,11 +1451,16 @@ static void umount_tree(struct mount *mn
+ propagate_umount(&tmp_list);
+
+ while (!list_empty(&tmp_list)) {
++ struct mnt_namespace *ns;
+ bool disconnect;
+ p = list_first_entry(&tmp_list, struct mount, mnt_list);
+ list_del_init(&p->mnt_expire);
+ list_del_init(&p->mnt_list);
+- __touch_mnt_namespace(p->mnt_ns);
++ ns = p->mnt_ns;
++ if (ns) {
++ ns->mounts--;
++ __touch_mnt_namespace(ns);
++ }
+ p->mnt_ns = NULL;
+ if (how & UMOUNT_SYNC)
+ p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
+@@ -1850,6 +1861,28 @@ static int invent_group_ids(struct mount
+ return 0;
+ }
+
++int count_mounts(struct mnt_namespace *ns, struct mount *mnt)
++{
++ unsigned int max = READ_ONCE(sysctl_mount_max);
++ unsigned int mounts = 0, old, pending, sum;
++ struct mount *p;
++
++ for (p = mnt; p; p = next_mnt(p, mnt))
++ mounts++;
++
++ old = ns->mounts;
++ pending = ns->pending_mounts;
++ sum = old + pending;
++ if ((old > sum) ||
++ (pending > sum) ||
++ (max < sum) ||
++ (mounts > (max - sum)))
++ return -ENOSPC;
++
++ ns->pending_mounts = pending + mounts;
++ return 0;
++}
++
+ /*
+ * @source_mnt : mount tree to be attached
+ * @nd : place the mount tree @source_mnt is attached
+@@ -1919,6 +1952,7 @@ static int attach_recursive_mnt(struct m
+ struct path *parent_path)
+ {
+ HLIST_HEAD(tree_list);
++ struct mnt_namespace *ns = dest_mnt->mnt_ns;
+ struct mountpoint *smp;
+ struct mount *child, *p;
+ struct hlist_node *n;
+@@ -1931,6 +1965,13 @@ static int attach_recursive_mnt(struct m
+ if (IS_ERR(smp))
+ return PTR_ERR(smp);
+
++ /* Is there space to add these mounts to the mount namespace? */
++ if (!parent_path) {
++ err = count_mounts(ns, source_mnt);
++ if (err)
++ goto out;
++ }
++
+ if (IS_MNT_SHARED(dest_mnt)) {
+ err = invent_group_ids(source_mnt, true);
+ if (err)
+@@ -1970,11 +2011,14 @@ static int attach_recursive_mnt(struct m
+ out_cleanup_ids:
+ while (!hlist_empty(&tree_list)) {
+ child = hlist_entry(tree_list.first, struct mount, mnt_hash);
++ child->mnt_parent->mnt_ns->pending_mounts = 0;
+ umount_tree(child, UMOUNT_SYNC);
+ }
+ unlock_mount_hash();
+ cleanup_group_ids(source_mnt, NULL);
+ out:
++ ns->pending_mounts = 0;
++
+ read_seqlock_excl(&mount_lock);
+ put_mountpoint(smp);
+ read_sequnlock_excl(&mount_lock);
+@@ -2804,6 +2848,8 @@ static struct mnt_namespace *alloc_mnt_n
+ init_waitqueue_head(&new_ns->poll);
+ new_ns->event = 0;
+ new_ns->user_ns = get_user_ns(user_ns);
++ new_ns->mounts = 0;
++ new_ns->pending_mounts = 0;
+ return new_ns;
+ }
+
+@@ -2853,6 +2899,7 @@ struct mnt_namespace *copy_mnt_ns(unsign
+ q = new;
+ while (p) {
+ q->mnt_ns = new_ns;
++ new_ns->mounts++;
+ if (new_fs) {
+ if (&p->mnt == new_fs->root.mnt) {
+ new_fs->root.mnt = mntget(&q->mnt);
+@@ -2891,6 +2938,7 @@ static struct mnt_namespace *create_mnt_
+ struct mount *mnt = real_mount(m);
+ mnt->mnt_ns = new_ns;
+ new_ns->root = mnt;
++ new_ns->mounts++;
+ list_add(&mnt->mnt_list, &new_ns->list);
+ } else {
+ mntput(m);
+--- a/fs/pnode.c
++++ b/fs/pnode.c
+@@ -259,7 +259,7 @@ static int propagate_one(struct mount *m
+ read_sequnlock_excl(&mount_lock);
+ }
+ hlist_add_head(&child->mnt_hash, list);
+- return 0;
++ return count_mounts(m->mnt_ns, child);
+ }
+
+ /*
+--- a/fs/pnode.h
++++ b/fs/pnode.h
+@@ -54,4 +54,5 @@ void mnt_change_mountpoint(struct mount
+ struct mount *copy_tree(struct mount *, struct dentry *, int);
+ bool is_path_reachable(struct mount *, struct dentry *,
+ const struct path *root);
++int count_mounts(struct mnt_namespace *ns, struct mount *mnt);
+ #endif /* _LINUX_PNODE_H */
+--- a/include/linux/mount.h
++++ b/include/linux/mount.h
+@@ -95,4 +95,6 @@ extern void mark_mounts_for_expiry(struc
+
+ extern dev_t name_to_dev_t(const char *name);
+
++extern unsigned int sysctl_mount_max;
++
+ #endif /* _LINUX_MOUNT_H */
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -65,6 +65,7 @@
+ #include <linux/sched/sysctl.h>
+ #include <linux/kexec.h>
+ #include <linux/bpf.h>
++#include <linux/mount.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/processor.h>
+@@ -1749,6 +1750,14 @@ static struct ctl_table fs_table[] = {
+ .mode = 0644,
+ .proc_handler = proc_doulongvec_minmax,
+ },
++ {
++ .procname = "mount-max",
++ .data = &sysctl_mount_max,
++ .maxlen = sizeof(unsigned int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec_minmax,
++ .extra1 = &one,
++ },
+ { }
+ };
+
--- /dev/null
+From c58d6c93680f28ac58984af61d0a7ebf4319c241 Mon Sep 17 00:00:00 2001
+From: Phil Turnbull <phil.turnbull@oracle.com>
+Date: Tue, 2 Feb 2016 13:36:45 -0500
+Subject: netfilter: nfnetlink: correctly validate length of batch messages
+
+From: Phil Turnbull <phil.turnbull@oracle.com>
+
+commit c58d6c93680f28ac58984af61d0a7ebf4319c241 upstream.
+
+If nlh->nlmsg_len is zero then an infinite loop is triggered because
+'skb_pull(skb, msglen);' pulls zero bytes.
+
+The calculation in nlmsg_len() underflows if 'nlh->nlmsg_len <
+NLMSG_HDRLEN' which bypasses the length validation and will later
+trigger an out-of-bound read.
+
+If the length validation does fail then the malformed batch message is
+copied back to userspace. However, we cannot do this because the
+nlh->nlmsg_len can be invalid. This leads to an out-of-bounds read in
+netlink_ack:
+
+ [ 41.455421] ==================================================================
+ [ 41.456431] BUG: KASAN: slab-out-of-bounds in memcpy+0x1d/0x40 at addr ffff880119e79340
+ [ 41.456431] Read of size 4294967280 by task a.out/987
+ [ 41.456431] =============================================================================
+ [ 41.456431] BUG kmalloc-512 (Not tainted): kasan: bad access detected
+ [ 41.456431] -----------------------------------------------------------------------------
+ ...
+ [ 41.456431] Bytes b4 ffff880119e79310: 00 00 00 00 d5 03 00 00 b0 fb fe ff 00 00 00 00 ................
+ [ 41.456431] Object ffff880119e79320: 20 00 00 00 10 00 05 00 00 00 00 00 00 00 00 00 ...............
+ [ 41.456431] Object ffff880119e79330: 14 00 0a 00 01 03 fc 40 45 56 11 22 33 10 00 05 .......@EV."3...
+ [ 41.456431] Object ffff880119e79340: f0 ff ff ff 88 99 aa bb 00 14 00 0a 00 06 fe fb ................
+ ^^ start of batch nlmsg with
+ nlmsg_len=4294967280
+ ...
+ [ 41.456431] Memory state around the buggy address:
+ [ 41.456431] ffff880119e79400: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ [ 41.456431] ffff880119e79480: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ [ 41.456431] >ffff880119e79500: 00 00 00 00 fc fc fc fc fc fc fc fc fc fc fc fc
+ [ 41.456431] ^
+ [ 41.456431] ffff880119e79580: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+ [ 41.456431] ffff880119e79600: fc fc fc fc fc fc fc fc fc fc fb fb fb fb fb fb
+ [ 41.456431] ==================================================================
+
+Fix this with better validation of nlh->nlmsg_len and by setting
+NFNL_BATCH_FAILURE if any batch message fails length validation.
+
+CAP_NET_ADMIN is required to trigger the bugs.
+
+Fixes: 9ea2aa8b7dba ("netfilter: nfnetlink: validate nfnetlink header from batch")
+Signed-off-by: Phil Turnbull <phil.turnbull@oracle.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Cc: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/netfilter/nfnetlink.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/net/netfilter/nfnetlink.c
++++ b/net/netfilter/nfnetlink.c
+@@ -326,10 +326,12 @@ replay:
+ nlh = nlmsg_hdr(skb);
+ err = 0;
+
+- if (nlmsg_len(nlh) < sizeof(struct nfgenmsg) ||
+- skb->len < nlh->nlmsg_len) {
+- err = -EINVAL;
+- goto ack;
++ if (nlh->nlmsg_len < NLMSG_HDRLEN ||
++ skb->len < nlh->nlmsg_len ||
++ nlmsg_len(nlh) < sizeof(struct nfgenmsg)) {
++ nfnl_err_reset(&err_list);
++ status |= NFNL_BATCH_FAILURE;
++ goto done;
+ }
+
+ /* Only requests are handled by the kernel */
--- /dev/null
+From 321027c1fe77f892f4ea07846aeae08cefbbb290 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Wed, 11 Jan 2017 21:09:50 +0100
+Subject: perf/core: Fix concurrent sys_perf_event_open() vs. 'move_group' race
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 321027c1fe77f892f4ea07846aeae08cefbbb290 upstream.
+
+Di Shen reported a race between two concurrent sys_perf_event_open()
+calls where both try and move the same pre-existing software group
+into a hardware context.
+
+The problem is exactly that described in commit:
+
+ f63a8daa5812 ("perf: Fix event->ctx locking")
+
+... where, while we wait for a ctx->mutex acquisition, the event->ctx
+relation can have changed under us.
+
+That very same commit failed to recognise sys_perf_event_context() as an
+external access vector to the events and thereby didn't apply the
+established locking rules correctly.
+
+So while one sys_perf_event_open() call is stuck waiting on
+mutex_lock_double(), the other (which owns said locks) moves the group
+about. So by the time the former sys_perf_event_open() acquires the
+locks, the context we've acquired is stale (and possibly dead).
+
+Apply the established locking rules as per perf_event_ctx_lock_nested()
+to the mutex_lock_double() for the 'move_group' case. This obviously means
+we need to validate state after we acquire the locks.
+
+Reported-by: Di Shen (Keen Lab)
+Tested-by: John Dias <joaodias@google.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Min Chong <mchong@google.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Vince Weaver <vincent.weaver@maine.edu>
+Fixes: f63a8daa5812 ("perf: Fix event->ctx locking")
+Link: http://lkml.kernel.org/r/20170106131444.GZ3174@twins.programming.kicks-ass.net
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+[bwh: Backported to 4.4:
+ - Test perf_event::group_flags instead of group_caps
+ - Adjust context]
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/events/core.c | 57 +++++++++++++++++++++++++++++++++++++++++++++++----
+ 1 file changed, 53 insertions(+), 4 deletions(-)
+
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -8250,6 +8250,37 @@ static int perf_event_set_clock(struct p
+ return 0;
+ }
+
++/*
++ * Variation on perf_event_ctx_lock_nested(), except we take two context
++ * mutexes.
++ */
++static struct perf_event_context *
++__perf_event_ctx_lock_double(struct perf_event *group_leader,
++ struct perf_event_context *ctx)
++{
++ struct perf_event_context *gctx;
++
++again:
++ rcu_read_lock();
++ gctx = READ_ONCE(group_leader->ctx);
++ if (!atomic_inc_not_zero(&gctx->refcount)) {
++ rcu_read_unlock();
++ goto again;
++ }
++ rcu_read_unlock();
++
++ mutex_lock_double(&gctx->mutex, &ctx->mutex);
++
++ if (group_leader->ctx != gctx) {
++ mutex_unlock(&ctx->mutex);
++ mutex_unlock(&gctx->mutex);
++ put_ctx(gctx);
++ goto again;
++ }
++
++ return gctx;
++}
++
+ /**
+ * sys_perf_event_open - open a performance event, associate it to a task/cpu
+ *
+@@ -8486,8 +8517,26 @@ SYSCALL_DEFINE5(perf_event_open,
+ }
+
+ if (move_group) {
+- gctx = group_leader->ctx;
+- mutex_lock_double(&gctx->mutex, &ctx->mutex);
++ gctx = __perf_event_ctx_lock_double(group_leader, ctx);
++
++ /*
++ * Check if we raced against another sys_perf_event_open() call
++ * moving the software group underneath us.
++ */
++ if (!(group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
++ /*
++ * If someone moved the group out from under us, check
++ * if this new event wound up on the same ctx, if so
++ * its the regular !move_group case, otherwise fail.
++ */
++ if (gctx != ctx) {
++ err = -EINVAL;
++ goto err_locked;
++ } else {
++ perf_event_ctx_unlock(group_leader, gctx);
++ move_group = 0;
++ }
++ }
+ } else {
+ mutex_lock(&ctx->mutex);
+ }
+@@ -8582,7 +8631,7 @@ SYSCALL_DEFINE5(perf_event_open,
+ perf_unpin_context(ctx);
+
+ if (move_group)
+- mutex_unlock(&gctx->mutex);
++ perf_event_ctx_unlock(group_leader, gctx);
+ mutex_unlock(&ctx->mutex);
+
+ if (task) {
+@@ -8610,7 +8659,7 @@ SYSCALL_DEFINE5(perf_event_open,
+
+ err_locked:
+ if (move_group)
+- mutex_unlock(&gctx->mutex);
++ perf_event_ctx_unlock(group_leader, gctx);
+ mutex_unlock(&ctx->mutex);
+ /* err_file: */
+ fput(event_file);
--- /dev/null
+From 43a6684519ab0a6c52024b5e25322476cabad893 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Fri, 24 Mar 2017 19:36:13 -0700
+Subject: ping: implement proper locking
+
+From: Eric Dumazet <edumazet@google.com>
+
+commit 43a6684519ab0a6c52024b5e25322476cabad893 upstream.
+
+We got a report of yet another bug in ping
+
+http://www.openwall.com/lists/oss-security/2017/03/24/6
+
+->disconnect() is not called with socket lock held.
+
+Fix this by acquiring ping rwlock earlier.
+
+Thanks to Daniel, Alexander and Andrey for letting us know this problem.
+
+Fixes: c319b4d76b9e ("net: ipv4: add IPPROTO_ICMP socket kind")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: Daniel Jiang <danieljiang0415@gmail.com>
+Reported-by: Solar Designer <solar@openwall.com>
+Reported-by: Andrey Konovalov <andreyknvl@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Cc: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/ipv4/ping.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -154,17 +154,18 @@ void ping_hash(struct sock *sk)
+ void ping_unhash(struct sock *sk)
+ {
+ struct inet_sock *isk = inet_sk(sk);
++
+ pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num);
++ write_lock_bh(&ping_table.lock);
+ if (sk_hashed(sk)) {
+- write_lock_bh(&ping_table.lock);
+ hlist_nulls_del(&sk->sk_nulls_node);
+ sk_nulls_node_init(&sk->sk_nulls_node);
+ sock_put(sk);
+ isk->inet_num = 0;
+ isk->inet_sport = 0;
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
+- write_unlock_bh(&ping_table.lock);
+ }
++ write_unlock_bh(&ping_table.lock);
+ }
+ EXPORT_SYMBOL_GPL(ping_unhash);
+
gfs2-avoid-uninitialized-variable-warning.patch
tipc-fix-random-link-resets-while-adding-a-second-bearer.patch
tipc-fix-socket-timer-deadlock.patch
+mnt-add-a-per-mount-namespace-limit-on-the-number-of-mounts.patch
+xc2028-avoid-use-after-free.patch
+netfilter-nfnetlink-correctly-validate-length-of-batch-messages.patch
+tipc-check-minimum-bearer-mtu.patch
+vfio-pci-fix-integer-overflows-bitmask-check.patch
+staging-android-ion-fix-a-race-condition-in-the-ion-driver.patch
+ping-implement-proper-locking.patch
+perf-core-fix-concurrent-sys_perf_event_open-vs.-move_group-race.patch
--- /dev/null
+From 9590232bb4f4cc824f3425a6e1349afbe6d6d2b7 Mon Sep 17 00:00:00 2001
+From: EunTaik Lee <eun.taik.lee@samsung.com>
+Date: Wed, 24 Feb 2016 04:38:06 +0000
+Subject: staging/android/ion : fix a race condition in the ion driver
+
+From: EunTaik Lee <eun.taik.lee@samsung.com>
+
+commit 9590232bb4f4cc824f3425a6e1349afbe6d6d2b7 upstream.
+
+There is a use-after-free problem in the ion driver.
+This is caused by a race condition in the ion_ioctl()
+function.
+
+A handle has ref count of 1 and two tasks on different
+cpus calls ION_IOC_FREE simultaneously.
+
+cpu 0 cpu 1
+-------------------------------------------------------
+ion_handle_get_by_id()
+(ref == 2)
+ ion_handle_get_by_id()
+ (ref == 3)
+
+ion_free()
+(ref == 2)
+
+ion_handle_put()
+(ref == 1)
+
+ ion_free()
+ (ref == 0 so ion_handle_destroy() is
+ called
+ and the handle is freed.)
+
+ ion_handle_put() is called and it
+ decreases the slub's next free pointer
+
+The problem is detected as an unaligned access in the
+spin lock functions since it uses load exclusive
+ instruction. In some cases it corrupts the slub's
+free pointer which causes a mis-aligned access to the
+next free pointer.(kmalloc returns a pointer like
+ffffc0745b4580aa). And it causes lots of other
+hard-to-debug problems.
+
+This symptom is caused since the first member in the
+ion_handle structure is the reference count and the
+ion driver decrements the reference after it has been
+freed.
+
+To fix this problem client->lock mutex is extended
+to protect all the codes that uses the handle.
+
+Signed-off-by: Eun Taik Lee <eun.taik.lee@samsung.com>
+Reviewed-by: Laura Abbott <labbott@redhat.com>
+Cc: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+index 7ff2a7ec871f..33b390e7ea31
+---
+ drivers/staging/android/ion/ion.c | 55 +++++++++++++++++++++++++++++---------
+ 1 file changed, 42 insertions(+), 13 deletions(-)
+
+--- a/drivers/staging/android/ion/ion.c
++++ b/drivers/staging/android/ion/ion.c
+@@ -387,13 +387,22 @@ static void ion_handle_get(struct ion_ha
+ kref_get(&handle->ref);
+ }
+
+-static int ion_handle_put(struct ion_handle *handle)
++static int ion_handle_put_nolock(struct ion_handle *handle)
++{
++ int ret;
++
++ ret = kref_put(&handle->ref, ion_handle_destroy);
++
++ return ret;
++}
++
++int ion_handle_put(struct ion_handle *handle)
+ {
+ struct ion_client *client = handle->client;
+ int ret;
+
+ mutex_lock(&client->lock);
+- ret = kref_put(&handle->ref, ion_handle_destroy);
++ ret = ion_handle_put_nolock(handle);
+ mutex_unlock(&client->lock);
+
+ return ret;
+@@ -417,20 +426,30 @@ static struct ion_handle *ion_handle_loo
+ return ERR_PTR(-EINVAL);
+ }
+
+-static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
++static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
+ int id)
+ {
+ struct ion_handle *handle;
+
+- mutex_lock(&client->lock);
+ handle = idr_find(&client->idr, id);
+ if (handle)
+ ion_handle_get(handle);
+- mutex_unlock(&client->lock);
+
+ return handle ? handle : ERR_PTR(-EINVAL);
+ }
+
++struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
++ int id)
++{
++ struct ion_handle *handle;
++
++ mutex_lock(&client->lock);
++ handle = ion_handle_get_by_id_nolock(client, id);
++ mutex_unlock(&client->lock);
++
++ return handle;
++}
++
+ static bool ion_handle_validate(struct ion_client *client,
+ struct ion_handle *handle)
+ {
+@@ -532,22 +551,28 @@ struct ion_handle *ion_alloc(struct ion_
+ }
+ EXPORT_SYMBOL(ion_alloc);
+
+-void ion_free(struct ion_client *client, struct ion_handle *handle)
++static void ion_free_nolock(struct ion_client *client, struct ion_handle *handle)
+ {
+ bool valid_handle;
+
+ BUG_ON(client != handle->client);
+
+- mutex_lock(&client->lock);
+ valid_handle = ion_handle_validate(client, handle);
+
+ if (!valid_handle) {
+ WARN(1, "%s: invalid handle passed to free.\n", __func__);
+- mutex_unlock(&client->lock);
+ return;
+ }
++ ion_handle_put_nolock(handle);
++}
++
++void ion_free(struct ion_client *client, struct ion_handle *handle)
++{
++ BUG_ON(client != handle->client);
++
++ mutex_lock(&client->lock);
++ ion_free_nolock(client, handle);
+ mutex_unlock(&client->lock);
+- ion_handle_put(handle);
+ }
+ EXPORT_SYMBOL(ion_free);
+
+@@ -1283,11 +1308,15 @@ static long ion_ioctl(struct file *filp,
+ {
+ struct ion_handle *handle;
+
+- handle = ion_handle_get_by_id(client, data.handle.handle);
+- if (IS_ERR(handle))
++ mutex_lock(&client->lock);
++ handle = ion_handle_get_by_id_nolock(client, data.handle.handle);
++ if (IS_ERR(handle)) {
++ mutex_unlock(&client->lock);
+ return PTR_ERR(handle);
+- ion_free(client, handle);
+- ion_handle_put(handle);
++ }
++ ion_free_nolock(client, handle);
++ ion_handle_put_nolock(handle);
++ mutex_unlock(&client->lock);
+ break;
+ }
+ case ION_IOC_SHARE:
--- /dev/null
+From 3de81b758853f0b29c61e246679d20b513c4cfec Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Michal=20Kube=C4=8Dek?= <mkubecek@suse.cz>
+Date: Fri, 2 Dec 2016 09:33:41 +0100
+Subject: tipc: check minimum bearer MTU
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Michal Kubeček <mkubecek@suse.cz>
+
+commit 3de81b758853f0b29c61e246679d20b513c4cfec upstream.
+
+Qian Zhang (张谦) reported a potential socket buffer overflow in
+tipc_msg_build() which is also known as CVE-2016-8632: due to
+insufficient checks, a buffer overflow can occur if MTU is too short for
+even tipc headers. As anyone can set device MTU in a user/net namespace,
+this issue can be abused by a regular user.
+
+As agreed in the discussion on Ben Hutchings' original patch, we should
+check the MTU at the moment a bearer is attached rather than for each
+processed packet. We also need to repeat the check when bearer MTU is
+adjusted to new device MTU. UDP case also needs a check to avoid
+overflow when calculating bearer MTU.
+
+Fixes: b97bf3fd8f6a ("[TIPC] Initial merge")
+Signed-off-by: Michal Kubecek <mkubecek@suse.cz>
+Reported-by: Qian Zhang (张谦) <zhangqian-c@360.cn>
+Acked-by: Ying Xue <ying.xue@windriver.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+[bwh: Backported to 4.4:
+ - Adjust context
+ - NETDEV_GOING_DOWN and NETDEV_CHANGEMTU cases in net notifier were combined]
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tipc/bearer.c | 13 +++++++++++--
+ net/tipc/bearer.h | 13 +++++++++++++
+ net/tipc/udp_media.c | 5 +++++
+ 3 files changed, 29 insertions(+), 2 deletions(-)
+
+--- a/net/tipc/bearer.c
++++ b/net/tipc/bearer.c
+@@ -381,6 +381,10 @@ int tipc_enable_l2_media(struct net *net
+ dev = dev_get_by_name(net, driver_name);
+ if (!dev)
+ return -ENODEV;
++ if (tipc_mtu_bad(dev, 0)) {
++ dev_put(dev);
++ return -EINVAL;
++ }
+
+ /* Associate TIPC bearer with L2 bearer */
+ rcu_assign_pointer(b->media_ptr, dev);
+@@ -570,14 +574,19 @@ static int tipc_l2_device_event(struct n
+ if (!b_ptr)
+ return NOTIFY_DONE;
+
+- b_ptr->mtu = dev->mtu;
+-
+ switch (evt) {
+ case NETDEV_CHANGE:
+ if (netif_carrier_ok(dev))
+ break;
+ case NETDEV_GOING_DOWN:
++ tipc_reset_bearer(net, b_ptr);
++ break;
+ case NETDEV_CHANGEMTU:
++ if (tipc_mtu_bad(dev, 0)) {
++ bearer_disable(net, b_ptr);
++ break;
++ }
++ b_ptr->mtu = dev->mtu;
+ tipc_reset_bearer(net, b_ptr);
+ break;
+ case NETDEV_CHANGEADDR:
+--- a/net/tipc/bearer.h
++++ b/net/tipc/bearer.h
+@@ -39,6 +39,7 @@
+
+ #include "netlink.h"
+ #include "core.h"
++#include "msg.h"
+ #include <net/genetlink.h>
+
+ #define MAX_MEDIA 3
+@@ -61,6 +62,9 @@
+ #define TIPC_MEDIA_TYPE_IB 2
+ #define TIPC_MEDIA_TYPE_UDP 3
+
++/* minimum bearer MTU */
++#define TIPC_MIN_BEARER_MTU (MAX_H_SIZE + INT_H_SIZE)
++
+ /**
+ * struct tipc_node_map - set of node identifiers
+ * @count: # of nodes in set
+@@ -226,4 +230,13 @@ void tipc_bearer_xmit(struct net *net, u
+ void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id,
+ struct sk_buff_head *xmitq);
+
++/* check if device MTU is too low for tipc headers */
++static inline bool tipc_mtu_bad(struct net_device *dev, unsigned int reserve)
++{
++ if (dev->mtu >= TIPC_MIN_BEARER_MTU + reserve)
++ return false;
++ netdev_warn(dev, "MTU too low for tipc bearer\n");
++ return true;
++}
++
+ #endif /* _TIPC_BEARER_H */
+--- a/net/tipc/udp_media.c
++++ b/net/tipc/udp_media.c
+@@ -376,6 +376,11 @@ static int tipc_udp_enable(struct net *n
+ udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
+ udp_conf.use_udp_checksums = false;
+ ub->ifindex = dev->ifindex;
++ if (tipc_mtu_bad(dev, sizeof(struct iphdr) +
++ sizeof(struct udphdr))) {
++ err = -EINVAL;
++ goto err;
++ }
+ b->mtu = dev->mtu - sizeof(struct iphdr)
+ - sizeof(struct udphdr);
+ #if IS_ENABLED(CONFIG_IPV6)
--- /dev/null
+From 05692d7005a364add85c6e25a6c4447ce08f913a Mon Sep 17 00:00:00 2001
+From: Vlad Tsyrklevich <vlad@tsyrklevich.net>
+Date: Wed, 12 Oct 2016 18:51:24 +0200
+Subject: vfio/pci: Fix integer overflows, bitmask check
+
+From: Vlad Tsyrklevich <vlad@tsyrklevich.net>
+
+commit 05692d7005a364add85c6e25a6c4447ce08f913a upstream.
+
+The VFIO_DEVICE_SET_IRQS ioctl did not sufficiently sanitize
+user-supplied integers, potentially allowing memory corruption. This
+patch adds appropriate integer overflow checks, checks the range bounds
+for VFIO_IRQ_SET_DATA_NONE, and also verifies that only single element
+in the VFIO_IRQ_SET_DATA_TYPE_MASK bitmask is set.
+VFIO_IRQ_SET_ACTION_TYPE_MASK is already correctly checked later in
+vfio_pci_set_irqs_ioctl().
+
+Furthermore, a kzalloc is changed to a kcalloc because the use of a
+kzalloc with an integer multiplication allowed an integer overflow
+condition to be reached without this patch. kcalloc checks for overflow
+and should prevent a similar occurrence.
+
+Signed-off-by: Vlad Tsyrklevich <vlad@tsyrklevich.net>
+Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
+Cc: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/vfio/pci/vfio_pci.c | 35 ++++++++++++++++++++++-------------
+ drivers/vfio/pci/vfio_pci_intrs.c | 2 +-
+ 2 files changed, 23 insertions(+), 14 deletions(-)
+
+--- a/drivers/vfio/pci/vfio_pci.c
++++ b/drivers/vfio/pci/vfio_pci.c
+@@ -562,8 +562,9 @@ static long vfio_pci_ioctl(void *device_
+
+ } else if (cmd == VFIO_DEVICE_SET_IRQS) {
+ struct vfio_irq_set hdr;
++ size_t size;
+ u8 *data = NULL;
+- int ret = 0;
++ int max, ret = 0;
+
+ minsz = offsetofend(struct vfio_irq_set, count);
+
+@@ -571,23 +572,31 @@ static long vfio_pci_ioctl(void *device_
+ return -EFAULT;
+
+ if (hdr.argsz < minsz || hdr.index >= VFIO_PCI_NUM_IRQS ||
++ hdr.count >= (U32_MAX - hdr.start) ||
+ hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
+ VFIO_IRQ_SET_ACTION_TYPE_MASK))
+ return -EINVAL;
+
+- if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
+- size_t size;
+- int max = vfio_pci_get_irq_count(vdev, hdr.index);
+-
+- if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)
+- size = sizeof(uint8_t);
+- else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD)
+- size = sizeof(int32_t);
+- else
+- return -EINVAL;
++ max = vfio_pci_get_irq_count(vdev, hdr.index);
++ if (hdr.start >= max || hdr.start + hdr.count > max)
++ return -EINVAL;
++
++ switch (hdr.flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
++ case VFIO_IRQ_SET_DATA_NONE:
++ size = 0;
++ break;
++ case VFIO_IRQ_SET_DATA_BOOL:
++ size = sizeof(uint8_t);
++ break;
++ case VFIO_IRQ_SET_DATA_EVENTFD:
++ size = sizeof(int32_t);
++ break;
++ default:
++ return -EINVAL;
++ }
+
+- if (hdr.argsz - minsz < hdr.count * size ||
+- hdr.start >= max || hdr.start + hdr.count > max)
++ if (size) {
++ if (hdr.argsz - minsz < hdr.count * size)
+ return -EINVAL;
+
+ data = memdup_user((void __user *)(arg + minsz),
+--- a/drivers/vfio/pci/vfio_pci_intrs.c
++++ b/drivers/vfio/pci/vfio_pci_intrs.c
+@@ -255,7 +255,7 @@ static int vfio_msi_enable(struct vfio_p
+ if (!is_irq_none(vdev))
+ return -EINVAL;
+
+- vdev->ctx = kzalloc(nvec * sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
++ vdev->ctx = kcalloc(nvec, sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
+ if (!vdev->ctx)
+ return -ENOMEM;
+
--- /dev/null
+From 8dfbcc4351a0b6d2f2d77f367552f48ffefafe18 Mon Sep 17 00:00:00 2001
+From: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+Date: Thu, 28 Jan 2016 09:22:44 -0200
+Subject: [media] xc2028: avoid use after free
+
+From: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+
+commit 8dfbcc4351a0b6d2f2d77f367552f48ffefafe18 upstream.
+
+If struct xc2028_config is passed without a firmware name,
+the following trouble may happen:
+
+[11009.907205] xc2028 5-0061: type set to XCeive xc2028/xc3028 tuner
+[11009.907491] ==================================================================
+[11009.907750] BUG: KASAN: use-after-free in strcmp+0x96/0xb0 at addr ffff8803bd78ab40
+[11009.907992] Read of size 1 by task modprobe/28992
+[11009.907994] =============================================================================
+[11009.907997] BUG kmalloc-16 (Tainted: G W ): kasan: bad access detected
+[11009.907999] -----------------------------------------------------------------------------
+
+[11009.908008] INFO: Allocated in xhci_urb_enqueue+0x214/0x14c0 [xhci_hcd] age=0 cpu=3 pid=28992
+[11009.908012] ___slab_alloc+0x581/0x5b0
+[11009.908014] __slab_alloc+0x51/0x90
+[11009.908017] __kmalloc+0x27b/0x350
+[11009.908022] xhci_urb_enqueue+0x214/0x14c0 [xhci_hcd]
+[11009.908026] usb_hcd_submit_urb+0x1e8/0x1c60
+[11009.908029] usb_submit_urb+0xb0e/0x1200
+[11009.908032] usb_serial_generic_write_start+0xb6/0x4c0
+[11009.908035] usb_serial_generic_write+0x92/0xc0
+[11009.908039] usb_console_write+0x38a/0x560
+[11009.908045] call_console_drivers.constprop.14+0x1ee/0x2c0
+[11009.908051] console_unlock+0x40d/0x900
+[11009.908056] vprintk_emit+0x4b4/0x830
+[11009.908061] vprintk_default+0x1f/0x30
+[11009.908064] printk+0x99/0xb5
+[11009.908067] kasan_report_error+0x10a/0x550
+[11009.908070] __asan_report_load1_noabort+0x43/0x50
+[11009.908074] INFO: Freed in xc2028_set_config+0x90/0x630 [tuner_xc2028] age=1 cpu=3 pid=28992
+[11009.908077] __slab_free+0x2ec/0x460
+[11009.908080] kfree+0x266/0x280
+[11009.908083] xc2028_set_config+0x90/0x630 [tuner_xc2028]
+[11009.908086] xc2028_attach+0x310/0x8a0 [tuner_xc2028]
+[11009.908090] em28xx_attach_xc3028.constprop.7+0x1f9/0x30d [em28xx_dvb]
+[11009.908094] em28xx_dvb_init.part.3+0x8e4/0x5cf4 [em28xx_dvb]
+[11009.908098] em28xx_dvb_init+0x81/0x8a [em28xx_dvb]
+[11009.908101] em28xx_register_extension+0xd9/0x190 [em28xx]
+[11009.908105] em28xx_dvb_register+0x10/0x1000 [em28xx_dvb]
+[11009.908108] do_one_initcall+0x141/0x300
+[11009.908111] do_init_module+0x1d0/0x5ad
+[11009.908114] load_module+0x6666/0x9ba0
+[11009.908117] SyS_finit_module+0x108/0x130
+[11009.908120] entry_SYSCALL_64_fastpath+0x16/0x76
+[11009.908123] INFO: Slab 0xffffea000ef5e280 objects=25 used=25 fp=0x (null) flags=0x2ffff8000004080
+[11009.908126] INFO: Object 0xffff8803bd78ab40 @offset=2880 fp=0x0000000000000001
+
+[11009.908130] Bytes b4 ffff8803bd78ab30: 01 00 00 00 2a 07 00 00 9d 28 00 00 01 00 00 00 ....*....(......
+[11009.908133] Object ffff8803bd78ab40: 01 00 00 00 00 00 00 00 b0 1d c3 6a 00 88 ff ff ...........j....
+[11009.908137] CPU: 3 PID: 28992 Comm: modprobe Tainted: G B W 4.5.0-rc1+ #43
+[11009.908140] Hardware name: /NUC5i7RYB, BIOS RYBDWi35.86A.0350.2015.0812.1722 08/12/2015
+[11009.908142] ffff8803bd78a000 ffff8802c273f1b8 ffffffff81932007 ffff8803c6407a80
+[11009.908148] ffff8802c273f1e8 ffffffff81556759 ffff8803c6407a80 ffffea000ef5e280
+[11009.908153] ffff8803bd78ab40 dffffc0000000000 ffff8802c273f210 ffffffff8155ccb4
+[11009.908158] Call Trace:
+[11009.908162] [<ffffffff81932007>] dump_stack+0x4b/0x64
+[11009.908165] [<ffffffff81556759>] print_trailer+0xf9/0x150
+[11009.908168] [<ffffffff8155ccb4>] object_err+0x34/0x40
+[11009.908171] [<ffffffff8155f260>] kasan_report_error+0x230/0x550
+[11009.908175] [<ffffffff81237d71>] ? trace_hardirqs_off_caller+0x21/0x290
+[11009.908179] [<ffffffff8155e926>] ? kasan_unpoison_shadow+0x36/0x50
+[11009.908182] [<ffffffff8155f5c3>] __asan_report_load1_noabort+0x43/0x50
+[11009.908185] [<ffffffff8155ea00>] ? __asan_register_globals+0x50/0xa0
+[11009.908189] [<ffffffff8194cea6>] ? strcmp+0x96/0xb0
+[11009.908192] [<ffffffff8194cea6>] strcmp+0x96/0xb0
+[11009.908196] [<ffffffffa13ba4ac>] xc2028_set_config+0x15c/0x630 [tuner_xc2028]
+[11009.908200] [<ffffffffa13bac90>] xc2028_attach+0x310/0x8a0 [tuner_xc2028]
+[11009.908203] [<ffffffff8155ea78>] ? memset+0x28/0x30
+[11009.908206] [<ffffffffa13ba980>] ? xc2028_set_config+0x630/0x630 [tuner_xc2028]
+[11009.908211] [<ffffffffa157a59a>] em28xx_attach_xc3028.constprop.7+0x1f9/0x30d [em28xx_dvb]
+[11009.908215] [<ffffffffa157aa2a>] ? em28xx_dvb_init.part.3+0x37c/0x5cf4 [em28xx_dvb]
+[11009.908219] [<ffffffffa157a3a1>] ? hauppauge_hvr930c_init+0x487/0x487 [em28xx_dvb]
+[11009.908222] [<ffffffffa01795ac>] ? lgdt330x_attach+0x1cc/0x370 [lgdt330x]
+[11009.908226] [<ffffffffa01793e0>] ? i2c_read_demod_bytes.isra.2+0x210/0x210 [lgdt330x]
+[11009.908230] [<ffffffff812e87d0>] ? ref_module.part.15+0x10/0x10
+[11009.908233] [<ffffffff812e56e0>] ? module_assert_mutex_or_preempt+0x80/0x80
+[11009.908238] [<ffffffffa157af92>] em28xx_dvb_init.part.3+0x8e4/0x5cf4 [em28xx_dvb]
+[11009.908242] [<ffffffffa157a6ae>] ? em28xx_attach_xc3028.constprop.7+0x30d/0x30d [em28xx_dvb]
+[11009.908245] [<ffffffff8195222d>] ? string+0x14d/0x1f0
+[11009.908249] [<ffffffff8195381f>] ? symbol_string+0xff/0x1a0
+[11009.908253] [<ffffffff81953720>] ? uuid_string+0x6f0/0x6f0
+[11009.908257] [<ffffffff811a775e>] ? __kernel_text_address+0x7e/0xa0
+[11009.908260] [<ffffffff8104b02f>] ? print_context_stack+0x7f/0xf0
+[11009.908264] [<ffffffff812e9846>] ? __module_address+0xb6/0x360
+[11009.908268] [<ffffffff8137fdc9>] ? is_ftrace_trampoline+0x99/0xe0
+[11009.908271] [<ffffffff811a775e>] ? __kernel_text_address+0x7e/0xa0
+[11009.908275] [<ffffffff81240a70>] ? debug_check_no_locks_freed+0x290/0x290
+[11009.908278] [<ffffffff8104a24b>] ? dump_trace+0x11b/0x300
+[11009.908282] [<ffffffffa13e8143>] ? em28xx_register_extension+0x23/0x190 [em28xx]
+[11009.908285] [<ffffffff81237d71>] ? trace_hardirqs_off_caller+0x21/0x290
+[11009.908289] [<ffffffff8123ff56>] ? trace_hardirqs_on_caller+0x16/0x590
+[11009.908292] [<ffffffff812404dd>] ? trace_hardirqs_on+0xd/0x10
+[11009.908296] [<ffffffffa13e8143>] ? em28xx_register_extension+0x23/0x190 [em28xx]
+[11009.908299] [<ffffffff822dcbb0>] ? mutex_trylock+0x400/0x400
+[11009.908302] [<ffffffff810021a1>] ? do_one_initcall+0x131/0x300
+[11009.908306] [<ffffffff81296dc7>] ? call_rcu_sched+0x17/0x20
+[11009.908309] [<ffffffff8159e708>] ? put_object+0x48/0x70
+[11009.908314] [<ffffffffa1579f11>] em28xx_dvb_init+0x81/0x8a [em28xx_dvb]
+[11009.908317] [<ffffffffa13e81f9>] em28xx_register_extension+0xd9/0x190 [em28xx]
+[11009.908320] [<ffffffffa0150000>] ? 0xffffffffa0150000
+[11009.908324] [<ffffffffa0150010>] em28xx_dvb_register+0x10/0x1000 [em28xx_dvb]
+[11009.908327] [<ffffffff810021b1>] do_one_initcall+0x141/0x300
+[11009.908330] [<ffffffff81002070>] ? try_to_run_init_process+0x40/0x40
+[11009.908333] [<ffffffff8123ff56>] ? trace_hardirqs_on_caller+0x16/0x590
+[11009.908337] [<ffffffff8155e926>] ? kasan_unpoison_shadow+0x36/0x50
+[11009.908340] [<ffffffff8155e926>] ? kasan_unpoison_shadow+0x36/0x50
+[11009.908343] [<ffffffff8155e926>] ? kasan_unpoison_shadow+0x36/0x50
+[11009.908346] [<ffffffff8155ea37>] ? __asan_register_globals+0x87/0xa0
+[11009.908350] [<ffffffff8144da7b>] do_init_module+0x1d0/0x5ad
+[11009.908353] [<ffffffff812f2626>] load_module+0x6666/0x9ba0
+[11009.908356] [<ffffffff812e9c90>] ? symbol_put_addr+0x50/0x50
+[11009.908361] [<ffffffffa1580037>] ? em28xx_dvb_init.part.3+0x5989/0x5cf4 [em28xx_dvb]
+[11009.908366] [<ffffffff812ebfc0>] ? module_frob_arch_sections+0x20/0x20
+[11009.908369] [<ffffffff815bc940>] ? open_exec+0x50/0x50
+[11009.908374] [<ffffffff811671bb>] ? ns_capable+0x5b/0xd0
+[11009.908377] [<ffffffff812f5e58>] SyS_finit_module+0x108/0x130
+[11009.908379] [<ffffffff812f5d50>] ? SyS_init_module+0x1f0/0x1f0
+[11009.908383] [<ffffffff81004044>] ? lockdep_sys_exit_thunk+0x12/0x14
+[11009.908394] [<ffffffff822e6936>] entry_SYSCALL_64_fastpath+0x16/0x76
+[11009.908396] Memory state around the buggy address:
+[11009.908398] ffff8803bd78aa00: 00 00 fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+[11009.908401] ffff8803bd78aa80: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+[11009.908403] >ffff8803bd78ab00: fc fc fc fc fc fc fc fc 00 00 fc fc fc fc fc fc
+[11009.908405] ^
+[11009.908407] ffff8803bd78ab80: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+[11009.908409] ffff8803bd78ac00: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+[11009.908411] ==================================================================
+
+In order to avoid it, let's set the cached value of the firmware
+name to NULL after freeing it. While here, return an error if
+the memory allocation fails.
+
+Signed-off-by: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+Cc: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/media/tuners/tuner-xc2028.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/media/tuners/tuner-xc2028.c
++++ b/drivers/media/tuners/tuner-xc2028.c
+@@ -1403,11 +1403,12 @@ static int xc2028_set_config(struct dvb_
+ * in order to avoid troubles during device release.
+ */
+ kfree(priv->ctrl.fname);
++ priv->ctrl.fname = NULL;
+ memcpy(&priv->ctrl, p, sizeof(priv->ctrl));
+ if (p->fname) {
+ priv->ctrl.fname = kstrdup(p->fname, GFP_KERNEL);
+ if (priv->ctrl.fname == NULL)
+- rc = -ENOMEM;
++ return -ENOMEM;
+ }
+
+ /*