--- /dev/null
+From f3713fd9cff733d9df83116422d8e4af6e86b2bb Mon Sep 17 00:00:00 2001
+From: Davidlohr Bueso <davidlohr@hp.com>
+Date: Tue, 25 Feb 2014 15:01:45 -0800
+Subject: ipc,mqueue: remove limits for the amount of system-wide
+ queues
+
+From: Davidlohr Bueso <davidlohr@hp.com>
+
+commit f3713fd9cff733d9df83116422d8e4af6e86b2bb upstream.
+
+Commit 93e6f119c0ce ("ipc/mqueue: cleanup definition names and
+locations") added global hardcoded limits to the amount of message
+queues that can be created. While these limits are per-namespace,
+reality is that it ends up breaking userspace applications.
+Historically users have, at least in theory, been able to create up to
+INT_MAX queues, and limiting it to just 1024 is way too low and dramatic
+for some workloads and use cases. For instance, Madars reports:
+
+ "This update imposes bad limits on our multi-process application. As
+ our app uses approaches that each process opens its own set of queues
+ (usually something about 3-5 queues per process). In some scenarios
+ we might run up to 3000 processes or more (which of-course for linux
+ is not a problem). Thus we might need up to 9000 queues or more. All
+ processes run under one user."
+
+Other affected users can be found in launchpad bug #1155695:
+ https://bugs.launchpad.net/ubuntu/+source/manpages/+bug/1155695
+
+Instead of increasing this limit, revert it entirely and fallback to the
+original way of dealing queue limits -- where once a user's resource
+limit is reached, and all memory is used, new queues cannot be created.
+
+Signed-off-by: Davidlohr Bueso <davidlohr@hp.com>
+Reported-by: Madars Vitolins <m@silodev.com>
+Acked-by: Doug Ledford <dledford@redhat.com>
+Cc: Manfred Spraul <manfred@colorfullife.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/ipc_namespace.h | 2 --
+ ipc/mq_sysctl.c | 18 ++++++++++++------
+ ipc/mqueue.c | 6 +++---
+ 3 files changed, 15 insertions(+), 11 deletions(-)
+
+--- a/include/linux/ipc_namespace.h
++++ b/include/linux/ipc_namespace.h
+@@ -119,9 +119,7 @@ extern int mq_init_ns(struct ipc_namespa
+ * the new maximum will handle anyone else. I may have to revisit this
+ * in the future.
+ */
+-#define MIN_QUEUESMAX 1
+ #define DFLT_QUEUESMAX 256
+-#define HARD_QUEUESMAX 1024
+ #define MIN_MSGMAX 1
+ #define DFLT_MSG 10U
+ #define DFLT_MSGMAX 10
+--- a/ipc/mq_sysctl.c
++++ b/ipc/mq_sysctl.c
+@@ -22,6 +22,16 @@ static void *get_mq(ctl_table *table)
+ return which;
+ }
+
++static int proc_mq_dointvec(ctl_table *table, int write,
++ void __user *buffer, size_t *lenp, loff_t *ppos)
++{
++ struct ctl_table mq_table;
++ memcpy(&mq_table, table, sizeof(mq_table));
++ mq_table.data = get_mq(table);
++
++ return proc_dointvec(&mq_table, write, buffer, lenp, ppos);
++}
++
+ static int proc_mq_dointvec_minmax(ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+@@ -33,12 +43,10 @@ static int proc_mq_dointvec_minmax(ctl_t
+ lenp, ppos);
+ }
+ #else
++#define proc_mq_dointvec NULL
+ #define proc_mq_dointvec_minmax NULL
+ #endif
+
+-static int msg_queues_limit_min = MIN_QUEUESMAX;
+-static int msg_queues_limit_max = HARD_QUEUESMAX;
+-
+ static int msg_max_limit_min = MIN_MSGMAX;
+ static int msg_max_limit_max = HARD_MSGMAX;
+
+@@ -51,9 +59,7 @@ static ctl_table mq_sysctls[] = {
+ .data = &init_ipc_ns.mq_queues_max,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+- .proc_handler = proc_mq_dointvec_minmax,
+- .extra1 = &msg_queues_limit_min,
+- .extra2 = &msg_queues_limit_max,
++ .proc_handler = proc_mq_dointvec,
+ },
+ {
+ .procname = "msg_max",
+--- a/ipc/mqueue.c
++++ b/ipc/mqueue.c
+@@ -433,9 +433,9 @@ static int mqueue_create(struct inode *d
+ error = -EACCES;
+ goto out_unlock;
+ }
+- if (ipc_ns->mq_queues_count >= HARD_QUEUESMAX ||
+- (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
+- !capable(CAP_SYS_RESOURCE))) {
++
++ if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
++ !capable(CAP_SYS_RESOURCE)) {
+ error = -ENOSPC;
+ goto out_unlock;
+ }
--- /dev/null
+From 1362f4ea20fa63688ba6026e586d9746ff13a846 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Thu, 20 Feb 2014 17:02:27 +0100
+Subject: quota: Fix race between dqput() and dquot_scan_active()
+
+From: Jan Kara <jack@suse.cz>
+
+commit 1362f4ea20fa63688ba6026e586d9746ff13a846 upstream.
+
+Currently last dqput() can race with dquot_scan_active() causing it to
+call callback for an already deactivated dquot. The race is as follows:
+
+CPU1 CPU2
+ dqput()
+ spin_lock(&dq_list_lock);
+ if (atomic_read(&dquot->dq_count) > 1) {
+ - not taken
+ if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
+ spin_unlock(&dq_list_lock);
+ ->release_dquot(dquot);
+ if (atomic_read(&dquot->dq_count) > 1)
+ - not taken
+ dquot_scan_active()
+ spin_lock(&dq_list_lock);
+ if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
+ - not taken
+ atomic_inc(&dquot->dq_count);
+ spin_unlock(&dq_list_lock);
+ - proceeds to release dquot
+ ret = fn(dquot, priv);
+ - called for inactive dquot
+
+Fix the problem by making sure possible ->release_dquot() is finished by
+the time we call the callback and new calls to it will notice reference
+dquot_scan_active() has taken and bail out.
+
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/quota/dquot.c | 14 +++++++++++---
+ 1 file changed, 11 insertions(+), 3 deletions(-)
+
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -581,9 +581,17 @@ int dquot_scan_active(struct super_block
+ dqstats_inc(DQST_LOOKUPS);
+ dqput(old_dquot);
+ old_dquot = dquot;
+- ret = fn(dquot, priv);
+- if (ret < 0)
+- goto out;
++ /*
++ * ->release_dquot() can be racing with us. Our reference
++ * protects us from new calls to it so just wait for any
++ * outstanding call and recheck the DQ_ACTIVE_B after that.
++ */
++ wait_on_dquot(dquot);
++ if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
++ ret = fn(dquot, priv);
++ if (ret < 0)
++ goto out;
++ }
+ spin_lock(&dq_list_lock);
+ /* We are safe to continue now because our dquot could not
+ * be moved out of the inuse list while we hold the reference */
--- /dev/null
+From 9085a6422900092886da8c404e1c5340c4ff1cbf Mon Sep 17 00:00:00 2001
+From: Eric Paris <eparis@redhat.com>
+Date: Thu, 20 Feb 2014 10:56:45 -0500
+Subject: SELinux: bigendian problems with filename trans rules
+
+From: Eric Paris <eparis@redhat.com>
+
+commit 9085a6422900092886da8c404e1c5340c4ff1cbf upstream.
+
+When writing policy via /sys/fs/selinux/policy I wrote the type and class
+of filename trans rules in CPU endian instead of little endian. On
+x86_64 this works just fine, but it means that on big endian arch's like
+ppc64 and s390 userspace reads the policy and converts it from
+le32_to_cpu. So the values are all screwed up. Write the values in le
+format like it should have been to start.
+
+Signed-off-by: Eric Paris <eparis@redhat.com>
+Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
+Signed-off-by: Paul Moore <pmoore@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ security/selinux/ss/policydb.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/security/selinux/ss/policydb.c
++++ b/security/selinux/ss/policydb.c
+@@ -3258,10 +3258,10 @@ static int filename_write_helper(void *k
+ if (rc)
+ return rc;
+
+- buf[0] = ft->stype;
+- buf[1] = ft->ttype;
+- buf[2] = ft->tclass;
+- buf[3] = otype->otype;
++ buf[0] = cpu_to_le32(ft->stype);
++ buf[1] = cpu_to_le32(ft->ttype);
++ buf[2] = cpu_to_le32(ft->tclass);
++ buf[3] = cpu_to_le32(otype->otype);
+
+ rc = put_entry(buf, sizeof(u32), 4, fp);
+ if (rc)