--- /dev/null
+From b58c89986a77a23658682a100eb15d8edb571ebb Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Sat, 8 Feb 2014 10:26:33 -0500
+Subject: cgroup: fix error return from cgroup_create()
+
+From: Tejun Heo <tj@kernel.org>
+
+commit b58c89986a77a23658682a100eb15d8edb571ebb upstream.
+
+cgroup_create() was returning 0 after allocation failures. Fix it.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Acked-by: Li Zefan <lizefan@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/cgroup.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -4348,7 +4348,7 @@ static long cgroup_create(struct cgroup
+ struct cgroup *cgrp;
+ struct cgroup_name *name;
+ struct cgroupfs_root *root = parent->root;
+- int err = 0;
++ int err;
+ struct cgroup_subsys *ss;
+ struct super_block *sb = root->sb;
+
+@@ -4358,8 +4358,10 @@ static long cgroup_create(struct cgroup
+ return -ENOMEM;
+
+ name = cgroup_alloc_name(dentry);
+- if (!name)
++ if (!name) {
++ err = -ENOMEM;
+ goto err_free_cgrp;
++ }
+ rcu_assign_pointer(cgrp->name, name);
+
+ /*
+@@ -4367,8 +4369,10 @@ static long cgroup_create(struct cgroup
+ * a half-baked cgroup.
+ */
+ cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL);
+- if (cgrp->id < 0)
++ if (cgrp->id < 0) {
++ err = -ENOMEM;
+ goto err_free_name;
++ }
+
+ /*
+ * Only live parents can have children. Note that the liveliness
--- /dev/null
+From eb46bf89696972b856a9adb6aebd5c7b65c266e4 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Sat, 8 Feb 2014 10:26:33 -0500
+Subject: cgroup: fix error return value in cgroup_mount()
+
+From: Tejun Heo <tj@kernel.org>
+
+commit eb46bf89696972b856a9adb6aebd5c7b65c266e4 upstream.
+
+When cgroup_mount() fails to allocate an id for the root, it didn't
+set ret before jumping to unlock_drop ending up returning 0 after a
+failure. Fix it.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Acked-by: Li Zefan <lizefan@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/cgroup.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -1580,10 +1580,10 @@ static struct dentry *cgroup_mount(struc
+ mutex_lock(&cgroup_mutex);
+ mutex_lock(&cgroup_root_mutex);
+
+- root_cgrp->id = idr_alloc(&root->cgroup_idr, root_cgrp,
+- 0, 1, GFP_KERNEL);
+- if (root_cgrp->id < 0)
++ ret = idr_alloc(&root->cgroup_idr, root_cgrp, 0, 1, GFP_KERNEL);
++ if (ret < 0)
+ goto unlock_drop;
++ root_cgrp->id = ret;
+
+ /* Check for name clashes with existing mounts */
+ ret = -EBUSY;
--- /dev/null
+From 48573a893303986e3b0b2974d6fb11f3d1bb7064 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Sat, 8 Feb 2014 10:26:34 -0500
+Subject: cgroup: fix locking in cgroup_cfts_commit()
+
+From: Tejun Heo <tj@kernel.org>
+
+commit 48573a893303986e3b0b2974d6fb11f3d1bb7064 upstream.
+
+cgroup_cfts_commit() walks the cgroup hierarchy that the target
+subsystem is attached to and tries to apply the file changes. Due to
+the convolution with inode locking, it can't keep cgroup_mutex locked
+while iterating. It currently holds only RCU read lock around the
+actual iteration and then pins the found cgroup using dget().
+
+Unfortunately, this is incorrect. Although the iteration does check
+cgroup_is_dead() before invoking dget(), there's nothing which
+prevents the dentry from going away inbetween. Note that this is
+different from the usual css iterations where css_tryget() is used to
+pin the css - css_tryget() tests whether the css can be pinned and
+fails if not.
+
+The problem can be solved by simply holding cgroup_mutex instead of
+RCU read lock around the iteration, which actually reduces LOC.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Acked-by: Li Zefan <lizefan@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/cgroup.c | 11 ++---------
+ 1 file changed, 2 insertions(+), 9 deletions(-)
+
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -2845,10 +2845,7 @@ static int cgroup_cfts_commit(struct cft
+ */
+ update_before = cgroup_serial_nr_next;
+
+- mutex_unlock(&cgroup_mutex);
+-
+ /* add/rm files for all cgroups created before */
+- rcu_read_lock();
+ css_for_each_descendant_pre(css, cgroup_css(root, ss)) {
+ struct cgroup *cgrp = css->cgroup;
+
+@@ -2857,23 +2854,19 @@ static int cgroup_cfts_commit(struct cft
+
+ inode = cgrp->dentry->d_inode;
+ dget(cgrp->dentry);
+- rcu_read_unlock();
+-
+ dput(prev);
+ prev = cgrp->dentry;
+
++ mutex_unlock(&cgroup_mutex);
+ mutex_lock(&inode->i_mutex);
+ mutex_lock(&cgroup_mutex);
+ if (cgrp->serial_nr < update_before && !cgroup_is_dead(cgrp))
+ ret = cgroup_addrm_files(cgrp, cfts, is_add);
+- mutex_unlock(&cgroup_mutex);
+ mutex_unlock(&inode->i_mutex);
+-
+- rcu_read_lock();
+ if (ret)
+ break;
+ }
+- rcu_read_unlock();
++ mutex_unlock(&cgroup_mutex);
+ dput(prev);
+ deactivate_super(sb);
+ return ret;
--- /dev/null
+From 532de3fc72adc2a6525c4d53c07bf81e1732083d Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Thu, 13 Feb 2014 13:29:31 -0500
+Subject: cgroup: update cgroup_enable_task_cg_lists() to grab siglock
+
+From: Tejun Heo <tj@kernel.org>
+
+commit 532de3fc72adc2a6525c4d53c07bf81e1732083d upstream.
+
+Currently, there's nothing preventing cgroup_enable_task_cg_lists()
+from missing set PF_EXITING and race against cgroup_exit(). Depending
+on the timing, cgroup_exit() may finish with the task still linked on
+css_set leading to list corruption. Fix it by grabbing siglock in
+cgroup_enable_task_cg_lists() so that PF_EXITING is guaranteed to be
+visible.
+
+This whole on-demand cg_list optimization is extremely fragile and has
+ample possibility to lead to bugs which can cause things like
+once-a-year oops during boot. I'm wondering whether the better
+approach would be just adding "cgroup_disable=all" handling which
+disables the whole cgroup rather than tempting fate with this
+on-demand craziness.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Acked-by: Li Zefan <lizefan@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/cgroup.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -2985,9 +2985,14 @@ static void cgroup_enable_task_cg_lists(
+ * We should check if the process is exiting, otherwise
+ * it will race with cgroup_exit() in that the list
+ * entry won't be deleted though the process has exited.
++ * Do it while holding siglock so that we don't end up
++ * racing against cgroup_exit().
+ */
++ spin_lock_irq(&p->sighand->siglock);
+ if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list))
+ list_add(&p->cg_list, &task_css_set(p)->tasks);
++ spin_unlock_irq(&p->sighand->siglock);
++
+ task_unlock(p);
+ } while_each_thread(g, p);
+ read_unlock(&tasklist_lock);
--- /dev/null
+From ab3f5faa6255a0eb4f832675507d9e295ca7e9ba Mon Sep 17 00:00:00 2001
+From: Hugh Dickins <hughd@google.com>
+Date: Thu, 6 Feb 2014 15:56:01 -0800
+Subject: cgroup: use an ordered workqueue for cgroup destruction
+
+From: Hugh Dickins <hughd@google.com>
+
+commit ab3f5faa6255a0eb4f832675507d9e295ca7e9ba upstream.
+
+Sometimes the cleanup after memcg hierarchy testing gets stuck in
+mem_cgroup_reparent_charges(), unable to bring non-kmem usage down to 0.
+
+There may turn out to be several causes, but a major cause is this: the
+workitem to offline parent can get run before workitem to offline child;
+parent's mem_cgroup_reparent_charges() circles around waiting for the
+child's pages to be reparented to its lrus, but it's holding cgroup_mutex
+which prevents the child from reaching its mem_cgroup_reparent_charges().
+
+Just use an ordered workqueue for cgroup_destroy_wq.
+
+tj: Committing as the temporary fix until the reverse dependency can
+ be removed from memcg. Comment updated accordingly.
+
+Fixes: e5fca243abae ("cgroup: use a dedicated workqueue for cgroup destruction")
+Suggested-by: Filipe Brandenburger <filbranden@google.com>
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/cgroup.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -5093,12 +5093,16 @@ static int __init cgroup_wq_init(void)
+ /*
+ * There isn't much point in executing destruction path in
+ * parallel. Good chunk is serialized with cgroup_mutex anyway.
+- * Use 1 for @max_active.
++ *
++ * XXX: Must be ordered to make sure parent is offlined after
++ * children. The ordering requirement is for memcg where a
++ * parent's offline may wait for a child's leading to deadlock. In
++ * the long term, this should be fixed from memcg side.
+ *
+ * We would prefer to do this in cgroup_init() above, but that
+ * is called before init_workqueues(): so leave this until after.
+ */
+- cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
++ cgroup_destroy_wq = alloc_ordered_workqueue("cgroup_destroy", 0);
+ BUG_ON(!cgroup_destroy_wq);
+ return 0;
+ }
batman-adv-free-skb-on-tvlv-parsing-success.patch
batman-adv-avoid-double-free-when-orig_node-initialization-fails.patch
batman-adv-fix-potential-kernel-paging-error-for-unicast-transmissions.patch
+cgroup-use-an-ordered-workqueue-for-cgroup-destruction.patch
+cgroup-fix-error-return-value-in-cgroup_mount.patch
+cgroup-fix-error-return-from-cgroup_create.patch
+cgroup-fix-locking-in-cgroup_cfts_commit.patch
+cgroup-update-cgroup_enable_task_cg_lists-to-grab-siglock.patch