--- /dev/null
+From 12b2f117f3bf738c1a00a6f64393f1953a740bd4 Mon Sep 17 00:00:00 2001
+From: Chen Gang <gang.chen@asianux.com>
+Date: Mon, 29 Apr 2013 15:05:19 -0700
+Subject: kernel/audit_tree.c: tree will leak memory when failure occurs in audit_trim_trees()
+
+From: Chen Gang <gang.chen@asianux.com>
+
+commit 12b2f117f3bf738c1a00a6f64393f1953a740bd4 upstream.
+
+audit_trim_trees() calls get_tree(). If a failure occurs we must call
+put_tree().
+
+[akpm@linux-foundation.org: run put_tree() before mutex_lock() for small scalability improvement]
+Signed-off-by: Chen Gang <gang.chen@asianux.com>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: Eric Paris <eparis@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Jonghwan Choi <jhbird.choi@samsung.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/audit_tree.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/audit_tree.c
++++ b/kernel/audit_tree.c
+@@ -608,9 +608,9 @@ void audit_trim_trees(void)
+ }
+ spin_unlock(&hash_lock);
+ trim_marked(tree);
+- put_tree(tree);
+ drop_collected_mounts(root_mnt);
+ skip_it:
++ put_tree(tree);
+ mutex_lock(&audit_filter_mutex);
+ }
+ list_del(&cursor);
--- /dev/null
+From jerry.hoemann@hp.com Thu May 9 13:59:15 2013
+From: Jerry Hoemann <jerry.hoemann@hp.com>
+Date: Tue, 30 Apr 2013 15:15:55 -0600
+Subject: x86/mm: account for PGDIR_SIZE alignment
+To: tglx@linutronix.de, mingo@redhat.com, hpa@zytor.com
+Cc: x86@kernel.org, jacob.shin@amd.com, gregkh@linuxfoundation.org, yinghai@kernel.org, Jerry Hoemann <jerry.hoemann@hp.com>
+Message-ID: <1367356555-16320-1-git-send-email-jerry.hoemann@hp.com>
+
+From: Jerry Hoemann <jerry.hoemann@hp.com>
+
+Patch for -stable. Function find_early_table_space removed upstream.
+
+Fixes panic in alloc_low_page due to pgt_buf overflow during
+init_memory_mapping.
+
+find_early_table_space sizes pgt_buf based upon the size of the
+memory being mapped, but it does not take into account the alignment
+of the memory. When the region being mapped spans a 512GB (PGDIR_SIZE)
+alignment, a panic from alloc_low_pages occurs.
+
+kernel_physical_mapping_init takes into account PGDIR_SIZE alignment.
+This causes an extra call to alloc_low_page to be made. This extra call
+isn't accounted for by find_early_table_space and causes a kernel panic.
+
+Change is to take into account PGDIR_SIZE alignment in find_early_table_space.
+
+Signed-off-by: Jerry Hoemann <jerry.hoemann@hp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/mm/init.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -45,11 +45,15 @@ static void __init find_early_table_spac
+ int i;
+ unsigned long puds = 0, pmds = 0, ptes = 0, tables;
+ unsigned long start = 0, good_end;
++ unsigned long pgd_extra = 0;
+ phys_addr_t base;
+
+ for (i = 0; i < nr_range; i++) {
+ unsigned long range, extra;
+
++ if ((mr[i].end >> PGDIR_SHIFT) - (mr[i].start >> PGDIR_SHIFT))
++ pgd_extra++;
++
+ range = mr[i].end - mr[i].start;
+ puds += (range + PUD_SIZE - 1) >> PUD_SHIFT;
+
+@@ -74,6 +78,7 @@ static void __init find_early_table_spac
+ tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
+ tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
+ tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
++ tables += (pgd_extra * PAGE_SIZE);
+
+ #ifdef CONFIG_X86_32
+ /* for fixmap */