--- /dev/null
+From 4d839b14d2091a224a6d0a6fa1cffa58fc00d8a7 Mon Sep 17 00:00:00 2001
+From: Oleg Nesterov <oleg@redhat.com>
+Date: Wed, 12 Jun 2013 14:04:46 -0700
+Subject: audit: wait_for_auditd() should use TASK_UNINTERRUPTIBLE
+
+From: Oleg Nesterov <oleg@redhat.com>
+
+commit f000cfdde5de4fc15dead5ccf524359c07eadf2b upstream.
+
+audit_log_start() does wait_for_auditd() in a loop until
+audit_backlog_wait_time passes or audit_skb_queue has a room.
+
+If signal_pending() is true this becomes a busy-wait loop, schedule() in
+TASK_INTERRUPTIBLE won't block.
+
+Thanks to Guy for fully investigating and explaining the problem.
+
+(akpm: that'll cause the system to lock up on a non-preemptible
+uniprocessor kernel)
+
+(Guy: "Our customer was in fact running a uniprocessor machine, and they
+reported a system hang.")
+
+Signed-off-by: Oleg Nesterov <oleg@redhat.com>
+Reported-by: Guy Streeter <streeter@redhat.com>
+Cc: Eric Paris <eparis@redhat.com>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[bwh: Backported to 3.2: adjust context, indentation]
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Cc: Qiang Huang <h.huangqiang@huawei.com>
+Cc: Li Zefan <lizefan@huawei.com>
+Cc: Jianguo Wu <wujianguo@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/audit.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/audit.c
++++ b/kernel/audit.c
+@@ -1168,7 +1168,7 @@ struct audit_buffer *audit_log_start(str
+
+ /* Wait for auditd to drain the queue a little */
+ DECLARE_WAITQUEUE(wait, current);
+- set_current_state(TASK_INTERRUPTIBLE);
++ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&audit_backlog_wait, &wait);
+
+ if (audit_backlog_limit &&
--- /dev/null
+From c12558716007837227b962caa4299b355870fed6 Mon Sep 17 00:00:00 2001
+From: Daniel Santos <daniel.santos@pobox.com>
+Date: Thu, 21 Feb 2013 16:41:39 -0800
+Subject: compiler-gcc.h: Add gcc-recommended GCC_VERSION macro
+
+From: Daniel Santos <daniel.santos@pobox.com>
+
+commit 3f3f8d2f48acfd8ed3b8e6b7377935da57b27b16 upstream.
+
+Throughout compiler*.h, many version checks are made. These can be
+simplified by using the macro that gcc's documentation recommends.
+However, my primary reason for adding this is that I need bug-check
+macros that are enabled at certain gcc versions and it's cleaner to use
+this macro than the tradition method:
+
+ #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ => 2)
+
+If you add patch level, it gets this ugly:
+
+ #if __GNUC__ > 4 || (__GNUC__ == 4 && (__GNUC_MINOR__ > 2 || \
+ __GNUC_MINOR__ == 2 __GNUC_PATCHLEVEL__ >= 1))
+
+As opposed to:
+
+ #if GCC_VERSION >= 40201
+
+While having separate headers for gcc 3 & 4 eliminates some of this
+verbosity, they can still be cleaned up by this.
+
+See also:
+
+ http://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html
+
+Signed-off-by: Daniel Santos <daniel.santos@pobox.com>
+Acked-by: Borislav Petkov <bp@alien8.de>
+Acked-by: David Rientjes <rientjes@google.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Joe Perches <joe@perches.com>
+Cc: Josh Triplett <josh@joshtriplett.org>
+Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Cc: Qiang Huang <h.huangqiang@huawei.com>
+Cc: Li Zefan <lizefan@huawei.com>
+Cc: Jianguo Wu <wujianguo@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/compiler-gcc.h | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/include/linux/compiler-gcc.h
++++ b/include/linux/compiler-gcc.h
+@@ -5,6 +5,9 @@
+ /*
+ * Common definitions for all gcc versions go here.
+ */
++#define GCC_VERSION (__GNUC__ * 10000 \
++ + __GNUC_MINOR__ * 100 \
++ + __GNUC_PATCHLEVEL__)
+
+
+ /* Optimization barrier */
--- /dev/null
+From 3f0116c3238a96bc18ad4b4acefe4e7be32fa861 Mon Sep 17 00:00:00 2001
+From: Ingo Molnar <mingo@kernel.org>
+Date: Thu, 10 Oct 2013 10:16:30 +0200
+Subject: compiler/gcc4: Add quirk for 'asm goto' miscompilation bug
+
+From: Ingo Molnar <mingo@kernel.org>
+
+commit 3f0116c3238a96bc18ad4b4acefe4e7be32fa861 upstream.
+
+Fengguang Wu, Oleg Nesterov and Peter Zijlstra tracked down
+a kernel crash to a GCC bug: GCC miscompiles certain 'asm goto'
+constructs, as outlined here:
+
+ http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
+
+Implement a workaround suggested by Jakub Jelinek.
+
+Reported-and-tested-by: Fengguang Wu <fengguang.wu@intel.com>
+Reported-by: Oleg Nesterov <oleg@redhat.com>
+Reported-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Suggested-by: Jakub Jelinek <jakub@redhat.com>
+Reviewed-by: Richard Henderson <rth@twiddle.net>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+[hq: Backported to 3.4: Adjust context]
+Signed-off-by: Qiang Huang <h.huangqiang@huawei.com>
+Cc: Li Zefan <lizefan@huawei.com>
+Cc: Jianguo Wu <wujianguo@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/include/asm/jump_label.h | 2 +-
+ arch/powerpc/include/asm/jump_label.h | 2 +-
+ arch/s390/include/asm/jump_label.h | 2 +-
+ arch/sparc/include/asm/jump_label.h | 2 +-
+ arch/x86/include/asm/cpufeature.h | 2 +-
+ arch/x86/include/asm/jump_label.h | 2 +-
+ include/linux/compiler-gcc4.h | 16 ++++++++++++++++
+ 7 files changed, 22 insertions(+), 6 deletions(-)
+
+--- a/arch/mips/include/asm/jump_label.h
++++ b/arch/mips/include/asm/jump_label.h
+@@ -22,7 +22,7 @@
+
+ static __always_inline bool arch_static_branch(struct static_key *key)
+ {
+- asm goto("1:\tnop\n\t"
++ asm_volatile_goto("1:\tnop\n\t"
+ "nop\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ WORD_INSN " 1b, %l[l_yes], %0\n\t"
+--- a/arch/powerpc/include/asm/jump_label.h
++++ b/arch/powerpc/include/asm/jump_label.h
+@@ -19,7 +19,7 @@
+
+ static __always_inline bool arch_static_branch(struct static_key *key)
+ {
+- asm goto("1:\n\t"
++ asm_volatile_goto("1:\n\t"
+ "nop\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
+--- a/arch/s390/include/asm/jump_label.h
++++ b/arch/s390/include/asm/jump_label.h
+@@ -15,7 +15,7 @@
+
+ static __always_inline bool arch_static_branch(struct static_key *key)
+ {
+- asm goto("0: brcl 0,0\n"
++ asm_volatile_goto("0: brcl 0,0\n"
+ ".pushsection __jump_table, \"aw\"\n"
+ ASM_ALIGN "\n"
+ ASM_PTR " 0b, %l[label], %0\n"
+--- a/arch/sparc/include/asm/jump_label.h
++++ b/arch/sparc/include/asm/jump_label.h
+@@ -9,7 +9,7 @@
+
+ static __always_inline bool arch_static_branch(struct static_key *key)
+ {
+- asm goto("1:\n\t"
++ asm_volatile_goto("1:\n\t"
+ "nop\n\t"
+ "nop\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -342,7 +342,7 @@ extern const char * const x86_power_flag
+ static __always_inline __pure bool __static_cpu_has(u16 bit)
+ {
+ #if __GNUC__ > 4 || __GNUC_MINOR__ >= 5
+- asm goto("1: jmp %l[t_no]\n"
++ asm_volatile_goto("1: jmp %l[t_no]\n"
+ "2:\n"
+ ".section .altinstructions,\"a\"\n"
+ " .long 1b - .\n"
+--- a/arch/x86/include/asm/jump_label.h
++++ b/arch/x86/include/asm/jump_label.h
+@@ -13,7 +13,7 @@
+
+ static __always_inline bool arch_static_branch(struct static_key *key)
+ {
+- asm goto("1:"
++ asm_volatile_goto("1:"
+ STATIC_KEY_INITIAL_NOP
+ ".pushsection __jump_table, \"aw\" \n\t"
+ _ASM_ALIGN "\n\t"
+--- a/include/linux/compiler-gcc4.h
++++ b/include/linux/compiler-gcc4.h
+@@ -31,6 +31,22 @@
+
+ #define __linktime_error(message) __attribute__((__error__(message)))
+
++/*
++ * GCC 'asm goto' miscompiles certain code sequences:
++ *
++ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
++ *
++ * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
++ * Fixed in GCC 4.8.2 and later versions.
++ *
++ * (asm goto is automatically volatile - the naming reflects this.)
++ */
++#if GCC_VERSION <= 40801
++# define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
++#else
++# define asm_volatile_goto(x...) do { asm goto(x); } while (0)
++#endif
++
+ #if __GNUC_MINOR__ >= 5
+ /*
+ * Mark a position in code as unreachable. This can be used to
--- /dev/null
+From a2a9af6cffebedfbbe70081157096826ff4eef9a Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Wed, 27 Feb 2013 17:05:02 -0800
+Subject: idr: fix top layer handling
+
+From: Tejun Heo <tj@kernel.org>
+
+commit 326cf0f0f308933c10236280a322031f0097205d upstream.
+
+Most functions in idr fail to deal with the high bits when the idr
+tree grows to the maximum height.
+
+* idr_get_empty_slot() stops growing idr tree once the depth reaches
+ MAX_IDR_LEVEL - 1, which is one depth shallower than necessary to
+ cover the whole range. The function doesn't even notice that it
+ didn't grow the tree enough and ends up allocating the wrong ID
+ given sufficiently high @starting_id.
+
+ For example, on 64 bit, if the starting id is 0x7fffff01,
+ idr_get_empty_slot() will grow the tree 5 layer deep, which only
+ covers the 30 bits and then proceed to allocate as if the bit 30
+ wasn't specified. It ends up allocating 0x3fffff01 without the bit
+ 30 but still returns 0x7fffff01.
+
+* __idr_remove_all() will not remove anything if the tree is fully
+ grown.
+
+* idr_find() can't find anything if the tree is fully grown.
+
+* idr_for_each() and idr_get_next() can't iterate anything if the tree
+ is fully grown.
+
+Fix it by introducing idr_max() which returns the maximum possible ID
+given the depth of tree and replacing the id limit checks in all
+affected places.
+
+As the idr_layer pointer array pa[] needs to be 1 larger than the
+maximum depth, enlarge pa[] arrays by one.
+
+While this plugs the discovered issues, the whole code base is
+horrible and in desparate need of rewrite. It's fragile like hell,
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Cc: Rusty Russell <rusty@rustcorp.com.au>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[bwh: Backported to 3.2:
+ - Adjust context
+ - s/MAX_IDR_LEVEL/MAX_LEVEL/; s/MAX_IDR_SHIFT/MAX_ID_SHIFT/
+ - Drop change to idr_alloc()]
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Cc: Qiang Huang <h.huangqiang@huawei.com>
+Cc: Li Zefan <lizefan@huawei.com>
+Cc: Jianguo Wu <wujianguo@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ lib/idr.c | 36 ++++++++++++++++++++++--------------
+ 1 file changed, 22 insertions(+), 14 deletions(-)
+
+--- a/lib/idr.c
++++ b/lib/idr.c
+@@ -39,6 +39,14 @@
+ static struct kmem_cache *idr_layer_cache;
+ static DEFINE_SPINLOCK(simple_ida_lock);
+
++/* the maximum ID which can be allocated given idr->layers */
++static int idr_max(int layers)
++{
++ int bits = min_t(int, layers * IDR_BITS, MAX_ID_SHIFT);
++
++ return (1 << bits) - 1;
++}
++
+ static struct idr_layer *get_from_free_list(struct idr *idp)
+ {
+ struct idr_layer *p;
+@@ -223,7 +231,7 @@ build_up:
+ * Add a new layer to the top of the tree if the requested
+ * id is larger than the currently allocated space.
+ */
+- while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
++ while (id > idr_max(layers)) {
+ layers++;
+ if (!p->count) {
+ /* special case: if the tree is currently empty,
+@@ -265,7 +273,7 @@ build_up:
+
+ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
+ {
+- struct idr_layer *pa[MAX_LEVEL];
++ struct idr_layer *pa[MAX_LEVEL + 1];
+ int id;
+
+ id = idr_get_empty_slot(idp, starting_id, pa);
+@@ -357,7 +365,7 @@ static void idr_remove_warning(int id)
+ static void sub_remove(struct idr *idp, int shift, int id)
+ {
+ struct idr_layer *p = idp->top;
+- struct idr_layer **pa[MAX_LEVEL];
++ struct idr_layer **pa[MAX_LEVEL + 1];
+ struct idr_layer ***paa = &pa[0];
+ struct idr_layer *to_free;
+ int n;
+@@ -451,16 +459,16 @@ void idr_remove_all(struct idr *idp)
+ int n, id, max;
+ int bt_mask;
+ struct idr_layer *p;
+- struct idr_layer *pa[MAX_LEVEL];
++ struct idr_layer *pa[MAX_LEVEL + 1];
+ struct idr_layer **paa = &pa[0];
+
+ n = idp->layers * IDR_BITS;
+ p = idp->top;
+ rcu_assign_pointer(idp->top, NULL);
+- max = 1 << n;
++ max = idr_max(idp->layers);
+
+ id = 0;
+- while (id < max) {
++ while (id >= 0 && id <= max) {
+ while (n > IDR_BITS && p) {
+ n -= IDR_BITS;
+ *paa++ = p;
+@@ -519,7 +527,7 @@ void *idr_find(struct idr *idp, int id)
+ /* Mask off upper bits we don't use for the search. */
+ id &= MAX_ID_MASK;
+
+- if (id >= (1 << n))
++ if (id > idr_max(p->layer + 1))
+ return NULL;
+ BUG_ON(n == 0);
+
+@@ -555,15 +563,15 @@ int idr_for_each(struct idr *idp,
+ {
+ int n, id, max, error = 0;
+ struct idr_layer *p;
+- struct idr_layer *pa[MAX_LEVEL];
++ struct idr_layer *pa[MAX_LEVEL + 1];
+ struct idr_layer **paa = &pa[0];
+
+ n = idp->layers * IDR_BITS;
+ p = rcu_dereference_raw(idp->top);
+- max = 1 << n;
++ max = idr_max(idp->layers);
+
+ id = 0;
+- while (id < max) {
++ while (id >= 0 && id <= max) {
+ while (n > 0 && p) {
+ n -= IDR_BITS;
+ *paa++ = p;
+@@ -601,7 +609,7 @@ EXPORT_SYMBOL(idr_for_each);
+ */
+ void *idr_get_next(struct idr *idp, int *nextidp)
+ {
+- struct idr_layer *p, *pa[MAX_LEVEL];
++ struct idr_layer *p, *pa[MAX_LEVEL + 1];
+ struct idr_layer **paa = &pa[0];
+ int id = *nextidp;
+ int n, max;
+@@ -611,9 +619,9 @@ void *idr_get_next(struct idr *idp, int
+ if (!p)
+ return NULL;
+ n = (p->layer + 1) * IDR_BITS;
+- max = 1 << n;
++ max = idr_max(p->layer + 1);
+
+- while (id < max) {
++ while (id >= 0 && id <= max) {
+ while (n > 0 && p) {
+ n -= IDR_BITS;
+ *paa++ = p;
+@@ -787,7 +795,7 @@ EXPORT_SYMBOL(ida_pre_get);
+ */
+ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
+ {
+- struct idr_layer *pa[MAX_LEVEL];
++ struct idr_layer *pa[MAX_LEVEL + 1];
+ struct ida_bitmap *bitmap;
+ unsigned long flags;
+ int idr_id = starting_id / IDA_BITMAP_BITS;
--- /dev/null
+From 72df9eccdc41633b6eab5ef2489ff8acb8391c6c Mon Sep 17 00:00:00 2001
+From: Philipp Reisner <philipp.reisner@linbit.com>
+Date: Wed, 20 Jul 2011 14:59:37 +0200
+Subject: idr: idr_for_each_entry() macro
+
+From: Philipp Reisner <philipp.reisner@linbit.com>
+
+commit 9749f30f1a387070e6e8351f35aeb829eacc3ab6 upstream.
+
+Inspired by the list_for_each_entry() macro
+
+Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Cc: Qiang Huang <h.huangqiang@huawei.com>
+Cc: Li Zefan <lizefan@huawei.com>
+Cc: Jianguo Wu <wujianguo@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/idr.h | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+--- a/include/linux/idr.h
++++ b/include/linux/idr.h
+@@ -152,4 +152,15 @@ void ida_simple_remove(struct ida *ida,
+
+ void __init idr_init_cache(void);
+
++/**
++ * idr_for_each_entry - iterate over an idr's elements of a given type
++ * @idp: idr handle
++ * @entry: the type * to use as cursor
++ * @id: id entry's key
++ */
++#define idr_for_each_entry(idp, entry, id) \
++ for (id = 0, entry = (typeof(entry))idr_get_next((idp), &(id)); \
++ entry != NULL; \
++ ++id, entry = (typeof(entry))idr_get_next((idp), &(id)))
++
+ #endif /* __IDR_H__ */
--- /dev/null
+From f2414ee66a2901b376845bfec33f33b2a79fd004 Mon Sep 17 00:00:00 2001
+From: Mathias Krause <minipli@googlemail.com>
+Date: Tue, 12 Nov 2013 15:11:47 -0800
+Subject: ipc, msg: fix message length check for negative values
+
+From: Mathias Krause <minipli@googlemail.com>
+
+commit 4e9b45a19241354daec281d7a785739829b52359 upstream.
+
+On 64 bit systems the test for negative message sizes is bogus as the
+size, which may be positive when evaluated as a long, will get truncated
+to an int when passed to load_msg(). So a long might very well contain a
+positive value but when truncated to an int it would become negative.
+
+That in combination with a small negative value of msg_ctlmax (which will
+be promoted to an unsigned type for the comparison against msgsz, making
+it a big positive value and therefore make it pass the check) will lead to
+two problems: 1/ The kmalloc() call in alloc_msg() will allocate a too
+small buffer as the addition of alen is effectively a subtraction. 2/ The
+copy_from_user() call in load_msg() will first overflow the buffer with
+userland data and then, when the userland access generates an access
+violation, the fixup handler copy_user_handle_tail() will try to fill the
+remainder with zeros -- roughly 4GB. That almost instantly results in a
+system crash or reset.
+
+ ,-[ Reproducer (needs to be run as root) ]--
+ | #include <sys/stat.h>
+ | #include <sys/msg.h>
+ | #include <unistd.h>
+ | #include <fcntl.h>
+ |
+ | int main(void) {
+ | long msg = 1;
+ | int fd;
+ |
+ | fd = open("/proc/sys/kernel/msgmax", O_WRONLY);
+ | write(fd, "-1", 2);
+ | close(fd);
+ |
+ | msgsnd(0, &msg, 0xfffffff0, IPC_NOWAIT);
+ |
+ | return 0;
+ | }
+ '---
+
+Fix the issue by preventing msgsz from getting truncated by consistently
+using size_t for the message length. This way the size checks in
+do_msgsnd() could still be passed with a negative value for msg_ctlmax but
+we would fail on the buffer allocation in that case and error out.
+
+Also change the type of m_ts from int to size_t to avoid similar nastiness
+in other code paths -- it is used in similar constructs, i.e. signed vs.
+unsigned checks. It should never become negative under normal
+circumstances, though.
+
+Setting msg_ctlmax to a negative value is an odd configuration and should
+be prevented. As that might break existing userland, it will be handled
+in a separate commit so it could easily be reverted and reworked without
+reintroducing the above described bug.
+
+Hardening mechanisms for user copy operations would have catched that bug
+early -- e.g. checking slab object sizes on user copy operations as the
+usercopy feature of the PaX patch does. Or, for that matter, detect the
+long vs. int sign change due to truncation, as the size overflow plugin
+of the very same patch does.
+
+[akpm@linux-foundation.org: fix i386 min() warnings]
+Signed-off-by: Mathias Krause <minipli@googlemail.com>
+Cc: Pax Team <pageexec@freemail.hu>
+Cc: Davidlohr Bueso <davidlohr@hp.com>
+Cc: Brad Spengler <spender@grsecurity.net>
+Cc: Manfred Spraul <manfred@colorfullife.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[bwh: Backported to 3.2:
+ - Adjust context
+ - Drop changes to alloc_msg() and copy_msg(), which don't exist]
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Cc: Qiang Huang <h.huangqiang@huawei.com>
+Cc: Li Zefan <lizefan@huawei.com>
+Cc: Jianguo Wu <wujianguo@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/msg.h | 6 +++---
+ ipc/msgutil.c | 12 ++++++------
+ ipc/util.h | 4 ++--
+ 3 files changed, 11 insertions(+), 11 deletions(-)
+
+--- a/include/linux/msg.h
++++ b/include/linux/msg.h
+@@ -76,9 +76,9 @@ struct msginfo {
+
+ /* one msg_msg structure for each message */
+ struct msg_msg {
+- struct list_head m_list;
+- long m_type;
+- int m_ts; /* message text size */
++ struct list_head m_list;
++ long m_type;
++ size_t m_ts; /* message text size */
+ struct msg_msgseg* next;
+ void *security;
+ /* the actual message follows immediately */
+--- a/ipc/msgutil.c
++++ b/ipc/msgutil.c
+@@ -39,15 +39,15 @@ struct msg_msgseg {
+ /* the next part of the message follows immediately */
+ };
+
+-#define DATALEN_MSG (PAGE_SIZE-sizeof(struct msg_msg))
+-#define DATALEN_SEG (PAGE_SIZE-sizeof(struct msg_msgseg))
++#define DATALEN_MSG ((size_t)PAGE_SIZE-sizeof(struct msg_msg))
++#define DATALEN_SEG ((size_t)PAGE_SIZE-sizeof(struct msg_msgseg))
+
+-struct msg_msg *load_msg(const void __user *src, int len)
++struct msg_msg *load_msg(const void __user *src, size_t len)
+ {
+ struct msg_msg *msg;
+ struct msg_msgseg **pseg;
+ int err;
+- int alen;
++ size_t alen;
+
+ alen = len;
+ if (alen > DATALEN_MSG)
+@@ -101,9 +101,9 @@ out_err:
+ return ERR_PTR(err);
+ }
+
+-int store_msg(void __user *dest, struct msg_msg *msg, int len)
++int store_msg(void __user *dest, struct msg_msg *msg, size_t len)
+ {
+- int alen;
++ size_t alen;
+ struct msg_msgseg *seg;
+
+ alen = len;
+--- a/ipc/util.h
++++ b/ipc/util.h
+@@ -138,8 +138,8 @@ int ipc_parse_version (int *cmd);
+ #endif
+
+ extern void free_msg(struct msg_msg *msg);
+-extern struct msg_msg *load_msg(const void __user *src, int len);
+-extern int store_msg(void __user *dest, struct msg_msg *msg, int len);
++extern struct msg_msg *load_msg(const void __user *src, size_t len);
++extern int store_msg(void __user *dest, struct msg_msg *msg, size_t len);
+
+ extern void recompute_msgmni(struct ipc_namespace *);
+
--- /dev/null
+From 7063a9950d1005f4e797bd047eae7fda181209fb Mon Sep 17 00:00:00 2001
+From: George Spelvin <linux@horizon.com>
+Date: Sun, 10 Feb 2013 04:08:32 -0500
+Subject: pps: Add pps_lookup_dev() function
+
+From: George Spelvin <linux@horizon.com>
+
+commit 513b032c98b4b9414aa4e9b4a315cb1bf0380101 upstream.
+
+The PPS serial line discipline wants to attach a PPS device to a tty
+without changing the tty code to add a struct pps_device * pointer.
+
+Since the number of PPS devices in a typical system is generally very low
+(n=1 is by far the most common), it's practical to search the entire list
+of allocated pps devices. (We capture the timestamp before the lookup,
+so the timing isn't affected.)
+
+It is a bit ugly that this function, which is part of the in-kernel
+PPS API, has to be in pps.c as opposed to kapi,c, but that's not
+something that affects users.
+
+Signed-off-by: George Spelvin <linux@horizon.com>
+Acked-by: Rodolfo Giometti <giometti@enneenne.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Cc: Qiang Huang <h.huangqiang@huawei.com>
+Cc: Li Zefan <lizefan@huawei.com>
+Cc: Jianguo Wu <wujianguo@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pps/pps.c | 33 +++++++++++++++++++++++++++++++++
+ include/linux/pps_kernel.h | 17 ++++++++++++++---
+ 2 files changed, 47 insertions(+), 3 deletions(-)
+
+--- a/drivers/pps/pps.c
++++ b/drivers/pps/pps.c
+@@ -350,11 +350,44 @@ free_idr:
+
+ void pps_unregister_cdev(struct pps_device *pps)
+ {
++ pps->lookup_cookie = NULL;
+ device_destroy(pps_class, pps->dev->devt);
+ cdev_del(&pps->cdev);
+ }
+
+ /*
++ * Look up a pps device by magic cookie.
++ * The cookie is usually a pointer to some enclosing device, but this
++ * code doesn't care; you should never be dereferencing it.
++ *
++ * This is a bit of a kludge that is currently used only by the PPS
++ * serial line discipline. It may need to be tweaked when a second user
++ * is found.
++ *
++ * There is no function interface for setting the lookup_cookie field.
++ * It's initialized to NULL when the pps device is created, and if a
++ * client wants to use it, just fill it in afterward.
++ *
++ * The cookie is automatically set to NULL in pps_unregister_source()
++ * so that it will not be used again, even if the pps device cannot
++ * be removed from the idr due to pending references holding the minor
++ * number in use.
++ */
++struct pps_device *pps_lookup_dev(void const *cookie)
++{
++ struct pps_device *pps;
++ unsigned id;
++
++ rcu_read_lock();
++ idr_for_each_entry(&pps_idr, pps, id)
++ if (cookie == pps->lookup_cookie)
++ break;
++ rcu_read_unlock();
++ return pps;
++}
++EXPORT_SYMBOL(pps_lookup_dev);
++
++/*
+ * Module stuff
+ */
+
+--- a/include/linux/pps_kernel.h
++++ b/include/linux/pps_kernel.h
+@@ -43,7 +43,7 @@ struct pps_source_info {
+ int event, void *data); /* PPS echo function */
+
+ struct module *owner;
+- struct device *dev;
++ struct device *dev; /* Parent device for device_create */
+ };
+
+ struct pps_event_time {
+@@ -69,6 +69,7 @@ struct pps_device {
+ wait_queue_head_t queue; /* PPS event queue */
+
+ unsigned int id; /* PPS source unique ID */
++ void const *lookup_cookie; /* pps_lookup_dev only */
+ struct cdev cdev;
+ struct device *dev;
+ struct fasync_struct *async_queue; /* fasync method */
+@@ -82,16 +83,26 @@ struct pps_device {
+ extern struct device_attribute pps_attrs[];
+
+ /*
++ * Internal functions.
++ *
++ * These are not actually part of the exported API, but this is a
++ * convenient header file to put them in.
++ */
++
++extern int pps_register_cdev(struct pps_device *pps);
++extern void pps_unregister_cdev(struct pps_device *pps);
++
++/*
+ * Exported functions
+ */
+
+ extern struct pps_device *pps_register_source(
+ struct pps_source_info *info, int default_params);
+ extern void pps_unregister_source(struct pps_device *pps);
+-extern int pps_register_cdev(struct pps_device *pps);
+-extern void pps_unregister_cdev(struct pps_device *pps);
+ extern void pps_event(struct pps_device *pps,
+ struct pps_event_time *ts, int event, void *data);
++/* Look up a pps device by magic cookie */
++struct pps_device *pps_lookup_dev(void const *cookie);
+
+ static inline void timespec_to_pps_ktime(struct pps_ktime *kt,
+ struct timespec ts)
--- /dev/null
+From 77327a71f9841b7dfa708195d1cb133d4ef4a989 Mon Sep 17 00:00:00 2001
+From: George Spelvin <linux@horizon.com>
+Date: Tue, 12 Feb 2013 02:27:20 -0500
+Subject: pps: Fix a use-after free bug when unregistering a source.
+
+From: George Spelvin <linux@horizon.com>
+
+commit d953e0e837e65ecc1ddaa4f9560f7925878a0de6 upstream.
+
+Remove the cdev from the system (with cdev_del) *before* deallocating it
+(in pps_device_destruct, called via kobject_put from device_destroy).
+
+Also prevent deallocating a device with open file handles.
+
+A better long-term fix is probably to remove the cdev from the pps_device
+entirely, and instead have all devices reference one global cdev. Then
+the deallocation ordering becomes simpler.
+
+But that's more complex and invasive change, so we leave that
+for later.
+
+Signed-off-by: George Spelvin <linux@horizon.com>
+Acked-by: Rodolfo Giometti <giometti@enneenne.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Cc: Qiang Huang <h.huangqiang@huawei.com>
+Cc: Li Zefan <lizefan@huawei.com>
+Cc: Jianguo Wu <wujianguo@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pps/pps.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+--- a/drivers/pps/pps.c
++++ b/drivers/pps/pps.c
+@@ -247,12 +247,15 @@ static int pps_cdev_open(struct inode *i
+ struct pps_device *pps = container_of(inode->i_cdev,
+ struct pps_device, cdev);
+ file->private_data = pps;
+-
++ kobject_get(&pps->dev->kobj);
+ return 0;
+ }
+
+ static int pps_cdev_release(struct inode *inode, struct file *file)
+ {
++ struct pps_device *pps = container_of(inode->i_cdev,
++ struct pps_device, cdev);
++ kobject_put(&pps->dev->kobj);
+ return 0;
+ }
+
+@@ -274,8 +277,10 @@ static void pps_device_destruct(struct d
+ {
+ struct pps_device *pps = dev_get_drvdata(dev);
+
+- /* release id here to protect others from using it while it's
+- * still in use */
++ cdev_del(&pps->cdev);
++
++ /* Now we can release the ID for re-use */
++ pr_debug("deallocating pps%d\n", pps->id);
+ mutex_lock(&pps_idr_lock);
+ idr_remove(&pps_idr, pps->id);
+ mutex_unlock(&pps_idr_lock);
+@@ -330,6 +335,7 @@ int pps_register_cdev(struct pps_device
+ if (IS_ERR(pps->dev))
+ goto del_cdev;
+
++ /* Override the release function with our own */
+ pps->dev->release = pps_device_destruct;
+
+ pr_debug("source %s got cdev (%d:%d)\n", pps->info.name,
+@@ -350,9 +356,9 @@ free_idr:
+
+ void pps_unregister_cdev(struct pps_device *pps)
+ {
++ pr_debug("unregistering pps%d\n", pps->id);
+ pps->lookup_cookie = NULL;
+ device_destroy(pps_class, pps->dev->devt);
+- cdev_del(&pps->cdev);
+ }
+
+ /*
--- /dev/null
+From 24625640fe278feac753e36888ff1d335d39964b Mon Sep 17 00:00:00 2001
+From: George Spelvin <linux@horizon.com>
+Date: Sun, 10 Feb 2013 04:41:56 -0500
+Subject: pps: Use pps_lookup_dev to reduce ldisc coupling
+
+From: George Spelvin <linux@horizon.com>
+
+commit 03a7ffe4e542310838bac70ef85acc17536b6d7c upstream.
+
+Now that N_TTY uses tty->disc_data for its private data,
+'subclass' ldiscs cannot use ->disc_data for their own private data.
+(This is a regression is v3.8-rc1)
+
+Use pps_lookup_dev to associate the tty with the pps source instead.
+
+This fixes a crashing regression in 3.8-rc1.
+
+Signed-off-by: George Spelvin <linux@horizon.com>
+Acked-by: Rodolfo Giometti <giometti@enneenne.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Cc: Qiang Huang <h.huangqiang@huawei.com>
+Cc: Li Zefan <lizefan@huawei.com>
+Cc: Jianguo Wu <wujianguo@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pps/clients/pps-ldisc.c | 10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+--- a/drivers/pps/clients/pps-ldisc.c
++++ b/drivers/pps/clients/pps-ldisc.c
+@@ -31,7 +31,7 @@
+ static void pps_tty_dcd_change(struct tty_struct *tty, unsigned int status,
+ struct pps_event_time *ts)
+ {
+- struct pps_device *pps = (struct pps_device *)tty->disc_data;
++ struct pps_device *pps = pps_lookup_dev(tty);
+
+ BUG_ON(pps == NULL);
+
+@@ -67,9 +67,9 @@ static int pps_tty_open(struct tty_struc
+ pr_err("cannot register PPS source \"%s\"\n", info.path);
+ return -ENOMEM;
+ }
+- tty->disc_data = pps;
++ pps->lookup_cookie = tty;
+
+- /* Should open N_TTY ldisc too */
++ /* Now open the base class N_TTY ldisc */
+ ret = alias_n_tty_open(tty);
+ if (ret < 0) {
+ pr_err("cannot open tty ldisc \"%s\"\n", info.path);
+@@ -81,7 +81,6 @@ static int pps_tty_open(struct tty_struc
+ return 0;
+
+ err_unregister:
+- tty->disc_data = NULL;
+ pps_unregister_source(pps);
+ return ret;
+ }
+@@ -90,11 +89,10 @@ static void (*alias_n_tty_close)(struct
+
+ static void pps_tty_close(struct tty_struct *tty)
+ {
+- struct pps_device *pps = (struct pps_device *)tty->disc_data;
++ struct pps_device *pps = pps_lookup_dev(tty);
+
+ alias_n_tty_close(tty);
+
+- tty->disc_data = NULL;
+ dev_info(pps->dev, "removed\n");
+ pps_unregister_source(pps);
+ }
--- /dev/null
+From ff766b8d26c7669cc1ae9048a04cf0dd5607f02c Mon Sep 17 00:00:00 2001
+From: "Bu, Yitian" <ybu@qti.qualcomm.com>
+Date: Mon, 18 Feb 2013 12:53:37 +0000
+Subject: printk: Fix rq->lock vs logbuf_lock unlock lock inversion
+
+From: "Bu, Yitian" <ybu@qti.qualcomm.com>
+
+commit dbda92d16f8655044e082930e4e9d244b87fde77 upstream.
+
+commit 07354eb1a74d1 ("locking printk: Annotate logbuf_lock as raw")
+reintroduced a lock inversion problem which was fixed in commit
+0b5e1c5255 ("printk: Release console_sem after logbuf_lock"). This
+happened probably when fixing up patch rejects.
+
+Restore the ordering and unlock logbuf_lock before releasing
+console_sem.
+
+Signed-off-by: ybu <ybu@qti.qualcomm.com>
+Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Link: http://lkml.kernel.org/r/E807E903FE6CBE4D95E420FBFCC273B827413C@nasanexd01h.na.qualcomm.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+[bwh: Backported to 3.2: adjust context]
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Cc: Qiang Huang <h.huangqiang@huawei.com>
+Cc: Li Zefan <lizefan@huawei.com>
+Cc: Jianguo Wu <wujianguo@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/printk.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/printk.c
++++ b/kernel/printk.c
+@@ -822,9 +822,9 @@ static int console_trylock_for_printk(un
+ }
+ }
+ printk_cpu = UINT_MAX;
++ raw_spin_unlock(&logbuf_lock);
+ if (wake)
+ up(&console_sem);
+- raw_spin_unlock(&logbuf_lock);
+ return retval;
+ }
+ static const char recursion_bug_msg [] =
--- /dev/null
+From 4efbbb6ce0d1d46e57be495e791c109277f07a14 Mon Sep 17 00:00:00 2001
+From: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
+Date: Mon, 17 Dec 2012 16:03:17 -0800
+Subject: proc: pid/status: show all supplementary groups
+
+From: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
+
+
+commit 8d238027b87e654be552eabdf492042a34c5c300 upstream.
+
+We display a list of supplementary group for each process in
+/proc/<pid>/status. However, we show only the first 32 groups, not all of
+them.
+
+Although this is rare, but sometimes processes do have more than 32
+supplementary groups, and this kernel limitation breaks user-space apps
+that rely on the group list in /proc/<pid>/status.
+
+Number 32 comes from the internal NGROUPS_SMALL macro which defines the
+length for the internal kernel "small" groups buffer. There is no
+apparent reason to limit to this value.
+
+This patch removes the 32 groups printing limit.
+
+The Linux kernel limits the amount of supplementary groups by NGROUPS_MAX,
+which is currently set to 65536. And this is the maximum count of groups
+we may possibly print.
+
+Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
+Acked-by: Serge E. Hallyn <serge.hallyn@ubuntu.com>
+Acked-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[bwh: Backported to 3.2: adjust context]
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Cc: Qiang Huang <h.huangqiang@huawei.com>
+Cc: Li Zefan <lizefan@huawei.com>
+Cc: Jianguo Wu <wujianguo@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/proc/array.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -204,7 +204,7 @@ static inline void task_state(struct seq
+ group_info = cred->group_info;
+ task_unlock(p);
+
+- for (g = 0; g < min(group_info->ngroups, NGROUPS_SMALL); g++)
++ for (g = 0; g < group_info->ngroups; g++)
+ seq_printf(m, "%d ", GROUP_AT(group_info, g));
+ put_cred(cred);
+
--- /dev/null
+proc-pid-status-show-all-supplementary-groups.patch
+idr-fix-top-layer-handling.patch
+audit-wait_for_auditd-should-use-task_uninterruptible.patch
+printk-fix-rq-lock-vs-logbuf_lock-unlock-lock-inversion.patch
+workqueue-cond_resched-after-processing-each-work-item.patch
+compiler-gcc.h-add-gcc-recommended-gcc_version-macro.patch
+compiler-gcc4-add-quirk-for-asm-goto-miscompilation-bug.patch
+ipc-msg-fix-message-length-check-for-negative-values.patch
+idr-idr_for_each_entry-macro.patch
+pps-add-pps_lookup_dev-function.patch
+pps-use-pps_lookup_dev-to-reduce-ldisc-coupling.patch
+pps-fix-a-use-after-free-bug-when-unregistering-a-source.patch
--- /dev/null
+From fbbd6511ab0dff8a79fc5803250b77a1260be354 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Wed, 28 Aug 2013 17:33:37 -0400
+Subject: workqueue: cond_resched() after processing each work item
+
+From: Tejun Heo <tj@kernel.org>
+
+commit b22ce2785d97423846206cceec4efee0c4afd980 upstream.
+
+If !PREEMPT, a kworker running work items back to back can hog CPU.
+This becomes dangerous when a self-requeueing work item which is
+waiting for something to happen races against stop_machine. Such
+self-requeueing work item would requeue itself indefinitely hogging
+the kworker and CPU it's running on while stop_machine would wait for
+that CPU to enter stop_machine while preventing anything else from
+happening on all other CPUs. The two would deadlock.
+
+Jamie Liu reports that this deadlock scenario exists around
+scsi_requeue_run_queue() and libata port multiplier support, where one
+port may exclude command processing from other ports. With the right
+timing, scsi_requeue_run_queue() can end up requeueing itself trying
+to execute an IO which is asked to be retried while another device has
+an exclusive access, which in turn can't make forward progress due to
+stop_machine.
+
+Fix it by invoking cond_resched() after executing each work item.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Reported-by: Jamie Liu <jamieliu@google.com>
+References: http://thread.gmane.org/gmane.linux.kernel/1552567
+[bwh: Backported to 3.2: adjust context]
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Cc: Qiang Huang <h.huangqiang@huawei.com>
+Cc: Li Zefan <lizefan@huawei.com>
+Cc: Jianguo Wu <wujianguo@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/workqueue.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -1922,6 +1922,15 @@ __acquires(&gcwq->lock)
+ dump_stack();
+ }
+
++ /*
++ * The following prevents a kworker from hogging CPU on !PREEMPT
++ * kernels, where a requeueing work item waiting for something to
++ * happen could deadlock with stop_machine as such work item could
++ * indefinitely requeue itself while all other CPUs are trapped in
++ * stop_machine.
++ */
++ cond_resched();
++
+ spin_lock_irq(&gcwq->lock);
+
+ /* clear cpu intensive status */