s390-timer-avoid-overflow-when-programming-clock-comparator.patch
+x86-do-not-leak-kernel-page-mapping-locations.patch
+x86-apic-work-around-boot-failure-on-hp-proliant-dl980-g7-server-systems.patch
+x86-mm-check-if-pud-is-large-when-validating-a-kernel-address.patch
--- /dev/null
+From cb214ede7657db458fd0b2a25ea0b28dbf900ebc Mon Sep 17 00:00:00 2001
+From: Stoney Wang <song-bo.wang@hp.com>
+Date: Thu, 7 Feb 2013 10:53:02 -0800
+Subject: x86/apic: Work around boot failure on HP ProLiant DL980 G7 Server systems
+
+From: Stoney Wang <song-bo.wang@hp.com>
+
+commit cb214ede7657db458fd0b2a25ea0b28dbf900ebc upstream.
+
+When a HP ProLiant DL980 G7 Server boots a regular kernel,
+there will be intermittent lost interrupts which could
+result in a hang or (in extreme cases) data loss.
+
+The reason is that this system only supports x2apic physical
+mode, while the kernel boots with a logical-cluster default
+setting.
+
+This bug can be worked around by specifying the "x2apic_phys" or
+"nox2apic" boot option, but we want to handle this system
+without requiring manual workarounds.
+
+The BIOS sets ACPI_FADT_APIC_PHYSICAL in FADT table.
+As all apicids are smaller than 255, BIOS need to pass the
+control to the OS with xapic mode, according to x2apic-spec,
+chapter 2.9.
+
+Current code handle x2apic when BIOS pass with xapic mode
+enabled:
+
+When user specifies x2apic_phys, or FADT indicates PHYSICAL:
+
+1. During madt oem check, apic driver is set with xapic logical
+ or xapic phys driver at first.
+
+2. enable_IR_x2apic() will enable x2apic_mode.
+
+3. if user specifies x2apic_phys on the boot line, x2apic_phys_probe()
+ will install the correct x2apic phys driver and use x2apic phys mode.
+ Otherwise it will skip the driver will let x2apic_cluster_probe to
+ take over to install x2apic cluster driver (wrong one) even though FADT
+ indicates PHYSICAL, because x2apic_phys_probe does not check
+ FADT PHYSICAL.
+
+Add checking x2apic_fadt_phys in x2apic_phys_probe() to fix the
+problem.
+
+Signed-off-by: Stoney Wang <song-bo.wang@hp.com>
+[ updated the changelog and simplified the code ]
+Signed-off-by: Yinghai Lu <yinghai@kernel.org>
+Link: http://lkml.kernel.org/r/1360263182-16226-1-git-send-email-yinghai@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/apic/x2apic_phys.c | 21 +++++++++++----------
+ 1 file changed, 11 insertions(+), 10 deletions(-)
+
+--- a/arch/x86/kernel/apic/x2apic_phys.c
++++ b/arch/x86/kernel/apic/x2apic_phys.c
+@@ -20,18 +20,19 @@ static int set_x2apic_phys_mode(char *ar
+ }
+ early_param("x2apic_phys", set_x2apic_phys_mode);
+
+-static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
++static bool x2apic_fadt_phys(void)
+ {
+- if (x2apic_phys)
+- return x2apic_enabled();
+- else if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) &&
+- (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL) &&
+- x2apic_enabled()) {
++ if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) &&
++ (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) {
+ printk(KERN_DEBUG "System requires x2apic physical mode\n");
+- return 1;
++ return true;
+ }
+- else
+- return 0;
++ return false;
++}
++
++static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
++{
++ return x2apic_enabled() && (x2apic_phys || x2apic_fadt_phys());
+ }
+
+ static void
+@@ -114,7 +115,7 @@ static void init_x2apic_ldr(void)
+
+ static int x2apic_phys_probe(void)
+ {
+- if (x2apic_mode && x2apic_phys)
++ if (x2apic_mode && (x2apic_phys || x2apic_fadt_phys()))
+ return 1;
+
+ return apic == &apic_x2apic_phys;
--- /dev/null
+From e575a86fdc50d013bf3ad3aa81d9100e8e6cc60d Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Thu, 7 Feb 2013 09:44:13 -0800
+Subject: x86: Do not leak kernel page mapping locations
+
+From: Kees Cook <keescook@chromium.org>
+
+commit e575a86fdc50d013bf3ad3aa81d9100e8e6cc60d upstream.
+
+Without this patch, it is trivial to determine kernel page
+mappings by examining the error code reported to dmesg[1].
+Instead, declare the entire kernel memory space as a violation
+of a present page.
+
+Additionally, since show_unhandled_signals is enabled by
+default, switch branch hinting to the more realistic
+expectation, and unobfuscate the setting of the PF_PROT bit to
+improve readability.
+
+[1] http://vulnfactory.org/blog/2013/02/06/a-linux-memory-trick/
+
+Reported-by: Dan Rosenberg <dan.j.rosenberg@gmail.com>
+Suggested-by: Brad Spengler <spender@grsecurity.net>
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Acked-by: H. Peter Anvin <hpa@zytor.com>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Frederic Weisbecker <fweisbec@gmail.com>
+Cc: Eric W. Biederman <ebiederm@xmission.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Link: http://lkml.kernel.org/r/20130207174413.GA12485@www.outflux.net
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/mm/fault.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -747,13 +747,15 @@ __bad_area_nosemaphore(struct pt_regs *r
+ return;
+ }
+ #endif
++ /* Kernel addresses are always protection faults: */
++ if (address >= TASK_SIZE)
++ error_code |= PF_PROT;
+
+- if (unlikely(show_unhandled_signals))
++ if (likely(show_unhandled_signals))
+ show_signal_msg(regs, error_code, address, tsk);
+
+- /* Kernel addresses are always protection faults: */
+ tsk->thread.cr2 = address;
+- tsk->thread.error_code = error_code | (address >= TASK_SIZE);
++ tsk->thread.error_code = error_code;
+ tsk->thread.trap_nr = X86_TRAP_PF;
+
+ force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0);
--- /dev/null
+From 0ee364eb316348ddf3e0dfcd986f5f13f528f821 Mon Sep 17 00:00:00 2001
+From: Mel Gorman <mgorman@suse.de>
+Date: Mon, 11 Feb 2013 14:52:36 +0000
+Subject: x86/mm: Check if PUD is large when validating a kernel address
+
+From: Mel Gorman <mgorman@suse.de>
+
+commit 0ee364eb316348ddf3e0dfcd986f5f13f528f821 upstream.
+
+A user reported the following oops when a backup process reads
+/proc/kcore:
+
+ BUG: unable to handle kernel paging request at ffffbb00ff33b000
+ IP: [<ffffffff8103157e>] kern_addr_valid+0xbe/0x110
+ [...]
+
+ Call Trace:
+ [<ffffffff811b8aaa>] read_kcore+0x17a/0x370
+ [<ffffffff811ad847>] proc_reg_read+0x77/0xc0
+ [<ffffffff81151687>] vfs_read+0xc7/0x130
+ [<ffffffff811517f3>] sys_read+0x53/0xa0
+ [<ffffffff81449692>] system_call_fastpath+0x16/0x1b
+
+Investigation determined that the bug triggered when reading
+system RAM at the 4G mark. On this system, that was the first
+address using 1G pages for the virt->phys direct mapping so the
+PUD is pointing to a physical address, not a PMD page.
+
+The problem is that the page table walker in kern_addr_valid() is
+not checking pud_large() and treats the physical address as if
+it was a PMD. If it happens to look like pmd_none then it'll
+silently fail, probably returning zeros instead of real data. If
+the data happens to look like a present PMD though, it will be
+walked resulting in the oops above.
+
+This patch adds the necessary pud_large() check.
+
+Unfortunately the problem was not readily reproducible and now
+they are running the backup program without accessing
+/proc/kcore so the patch has not been validated but I think it
+makes sense.
+
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Reviewed-by: Rik van Riel <riel@redhat.coM>
+Reviewed-by: Michal Hocko <mhocko@suse.cz>
+Acked-by: Johannes Weiner <hannes@cmpxchg.org>
+Cc: linux-mm@kvack.org
+Link: http://lkml.kernel.org/r/20130211145236.GX21389@suse.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/pgtable.h | 5 +++++
+ arch/x86/mm/init_64.c | 3 +++
+ 2 files changed, 8 insertions(+)
+
+--- a/arch/x86/include/asm/pgtable.h
++++ b/arch/x86/include/asm/pgtable.h
+@@ -142,6 +142,11 @@ static inline unsigned long pmd_pfn(pmd_
+ return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
+ }
+
++static inline unsigned long pud_pfn(pud_t pud)
++{
++ return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
++}
++
+ #define pte_page(pte) pfn_to_page(pte_pfn(pte))
+
+ static inline int pmd_large(pmd_t pte)
+--- a/arch/x86/mm/init_64.c
++++ b/arch/x86/mm/init_64.c
+@@ -821,6 +821,9 @@ int kern_addr_valid(unsigned long addr)
+ if (pud_none(*pud))
+ return 0;
+
++ if (pud_large(*pud))
++ return pfn_valid(pud_pfn(*pud));
++
+ pmd = pmd_offset(pud, addr);
+ if (pmd_none(*pmd))
+ return 0;