]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Add x86_64 patches from Andi:
author <chrisw@vas.sous-sol.org> <>
Sat, 21 May 2005 00:57:18 +0000 (17:57 -0700)
committer <chrisw@vas.sous-sol.org> <>
Sat, 21 May 2005 00:57:18 +0000 (17:57 -0700)
x86_64-ptrace-RIP-is-canonical.patch
x86_64-fix-canonical-checking.patch
x86_64-add-guard-page.patch
x86_64-vmalloc-mapping-dont-use-pte_page.patch
x86_64-dont-lookup-page-of-pa-in-iounmap.patch

queue/series
queue/x86_64-add-guard-page.patch [new file with mode: 0644]
queue/x86_64-dont-lookup-page-of-pa-in-iounmap.patch [new file with mode: 0644]
queue/x86_64-fix-canonical-checking.patch [new file with mode: 0644]
queue/x86_64-ptrace-RIP-is-canonical.patch [new file with mode: 0644]
queue/x86_64-vmalloc-mapping-dont-use-pte_page.patch [new file with mode: 0644]

index f4e97e9eb9e6a896314d6917d05f878b85d60659..f5f29ebec0c6f445ce0987af47c6f5030eca3900 100644 (file)
@@ -9,3 +9,8 @@ rose-minor-security-fix.patch
 usbaudio-prevent-oops-on-usb-unplug.patch
 usbusx2y-prevent-oops-on-usb-unplug.patch
 visor-oops-fix.patch
+x86_64-ptrace-RIP-is-canonical.patch
+x86_64-fix-canonical-checking.patch
+x86_64-add-guard-page.patch
+x86_64-vmalloc-mapping-dont-use-pte_page.patch
+x86_64-dont-lookup-page-of-pa-in-iounmap.patch
diff --git a/queue/x86_64-add-guard-page.patch b/queue/x86_64-add-guard-page.patch
new file mode 100644 (file)
index 0000000..dcc418d
--- /dev/null
@@ -0,0 +1,29 @@
+[PATCH] x86_64: Add a guard page at the end of the 47bit address space
+
+This works around a bug in the AMD K8 CPUs.
+
+Signed-off-by: Andi Kleen <ak@suse.de>
+Signed-off-by: Andrew Morton <akpm@osdl.org>
+Signed-off-by: Linus Torvalds <torvalds@osdl.org>
+Signed-off-by: Chris Wright <chrisw@osdl.org>
+---
+
+ processor.h |    4 ++--
+ 1 files changed, 2 insertions(+), 2 deletions(-)
+
+Index: release-2.6.11/include/asm-x86_64/processor.h
+===================================================================
+--- release-2.6.11.orig/include/asm-x86_64/processor.h
++++ release-2.6.11/include/asm-x86_64/processor.h
+@@ -160,9 +160,9 @@ static inline void clear_in_cr4 (unsigne
+ /*
+- * User space process size. 47bits.
++ * User space process size. 47bits minus one guard page.
+  */
+-#define TASK_SIZE     (0x800000000000UL)
++#define TASK_SIZE     (0x800000000000UL - 4096)
+ /* This decides where the kernel will search for a free chunk of vm
+  * space during mmap's.
diff --git a/queue/x86_64-dont-lookup-page-of-pa-in-iounmap.patch b/queue/x86_64-dont-lookup-page-of-pa-in-iounmap.patch
new file mode 100644 (file)
index 0000000..2bff40c
--- /dev/null
@@ -0,0 +1,29 @@
+[PATCH] x86_64: Don't look up struct page pointer of physical address in iounmap
+
+It could be in a memory hole not mapped in mem_map and that causes the hash
+lookup to go off to nirvana.
+
+Back port to -stable tree by Chris Wright
+
+Signed-off-by: Andi Kleen <ak@suse.de>
+Signed-off-by: Andrew Morton <akpm@osdl.org>
+Signed-off-by: Linus Torvalds <torvalds@osdl.org>
+Signed-off-by: Chris Wright <chrisw@osdl.org>
+---
+
+ ioremap.c |    2 +-
+ 1 files changed, 1 insertion(+), 1 deletion(-)
+
+Index: release-2.6.11/arch/x86_64/mm/ioremap.c
+===================================================================
+--- release-2.6.11.orig/arch/x86_64/mm/ioremap.c
++++ release-2.6.11/arch/x86_64/mm/ioremap.c
+@@ -266,7 +266,7 @@ void iounmap(volatile void __iomem *addr
+       if ((p->flags >> 20) &&
+               p->phys_addr + p->size - 1 < virt_to_phys(high_memory)) {
+               /* p->size includes the guard page, but cpa doesn't like that */
+-              change_page_attr(virt_to_page(__va(p->phys_addr)),
++              change_page_attr_addr((unsigned long)(__va(p->phys_addr)),
+                                (p->size - PAGE_SIZE) >> PAGE_SHIFT,
+                                PAGE_KERNEL);                           
+               global_flush_tlb();
diff --git a/queue/x86_64-fix-canonical-checking.patch b/queue/x86_64-fix-canonical-checking.patch
new file mode 100644 (file)
index 0000000..c1e9a0b
--- /dev/null
@@ -0,0 +1,40 @@
+[PATCH] x86_64: Fix canonical checking for segment registers in ptrace
+
+Allowed user programs to set a non canonical segment base, which would cause
+oopses in the kernel later.
+
+Credit-to: Alexander Nyberg <alexn@dsv.su.se>
+
+ For identifying and reporting this bug.
+
+Signed-off-by: Andi Kleen <ak@suse.de>
+Signed-off-by: Andrew Morton <akpm@osdl.org>
+Signed-off-by: Linus Torvalds <torvalds@osdl.org>
+Signed-off-by: Chris Wright <chrisw@osdl.org>
+---
+
+ ptrace.c |    8 ++++----
+ 1 files changed, 4 insertions(+), 4 deletions(-)
+
+Index: release-2.6.11/arch/x86_64/kernel/ptrace.c
+===================================================================
+--- release-2.6.11.orig/arch/x86_64/kernel/ptrace.c
++++ release-2.6.11/arch/x86_64/kernel/ptrace.c
+@@ -129,13 +129,13 @@ static int putreg(struct task_struct *ch
+                       value &= 0xffff;
+                       return 0;
+               case offsetof(struct user_regs_struct,fs_base):
+-                      if (!((value >> 48) == 0 || (value >> 48) == 0xffff))
+-                              return -EIO; 
++                      if (value >= TASK_SIZE)
++                              return -EIO;
+                       child->thread.fs = value;
+                       return 0;
+               case offsetof(struct user_regs_struct,gs_base):
+-                      if (!((value >> 48) == 0 || (value >> 48) == 0xffff))
+-                              return -EIO; 
++                      if (value >= TASK_SIZE)
++                              return -EIO;
+                       child->thread.gs = value;
+                       return 0;
+               case offsetof(struct user_regs_struct, eflags):
diff --git a/queue/x86_64-ptrace-RIP-is-canonical.patch b/queue/x86_64-ptrace-RIP-is-canonical.patch
new file mode 100644 (file)
index 0000000..437df29
--- /dev/null
@@ -0,0 +1,29 @@
+[PATCH] x86_64: check if ptrace RIP is canonical
+
+This works around an AMD Erratum.
+
+Signed-off-by: Andi Kleen <ak@suse.de>
+Signed-off-by: Andrew Morton <akpm@osdl.org>
+Signed-off-by: Linus Torvalds <torvalds@osdl.org>
+Signed-off-by: Chris Wright <chrisw@osdl.org>
+---
+
+ ptrace.c |    5 +++++
+ 1 files changed, 5 insertions(+)
+
+Index: release-2.6.11/arch/x86_64/kernel/ptrace.c
+===================================================================
+--- release-2.6.11.orig/arch/x86_64/kernel/ptrace.c
++++ release-2.6.11/arch/x86_64/kernel/ptrace.c
+@@ -149,6 +149,11 @@ static int putreg(struct task_struct *ch
+                               return -EIO;
+                       value &= 0xffff;
+                       break;
++              case offsetof(struct user_regs_struct, rip):
++                      /* Check if the new RIP address is canonical */
++                      if (value >= TASK_SIZE)
++                              return -EIO;
++                      break;
+       }
+       put_stack_long(child, regno - sizeof(struct pt_regs), value);
+       return 0;
diff --git a/queue/x86_64-vmalloc-mapping-dont-use-pte_page.patch b/queue/x86_64-vmalloc-mapping-dont-use-pte_page.patch
new file mode 100644 (file)
index 0000000..a4129c7
--- /dev/null
@@ -0,0 +1,51 @@
+[PATCH] x86_64: When checking vmalloc mappings don't use pte_page
+
+The PTEs can point to ioremap mappings too, and these are often outside
+mem_map.  The NUMA hash page lookup functions cannot handle out of bounds
+accesses properly.
+
+Signed-off-by: Andi Kleen <ak@suse.de>
+Signed-off-by: Andrew Morton <akpm@osdl.org>
+Signed-off-by: Linus Torvalds <torvalds@osdl.org>
+Signed-off-by: Chris Wright <chrisw@osdl.org>
+---
+
+ fault.c |   11 +++++++++--
+ 1 files changed, 9 insertions(+), 2 deletions(-)
+
+Index: release-2.6.11/arch/x86_64/mm/fault.c
+===================================================================
+--- release-2.6.11.orig/arch/x86_64/mm/fault.c
++++ release-2.6.11/arch/x86_64/mm/fault.c
+@@ -236,6 +236,8 @@ static noinline void pgtable_bad(unsigne
+ /*
+  * Handle a fault on the vmalloc or module mapping area
++ *
++ * This assumes no large pages in there.
+  */
+ static int vmalloc_fault(unsigned long address)
+ {
+@@ -274,7 +276,10 @@ static int vmalloc_fault(unsigned long a
+       if (!pte_present(*pte_ref))
+               return -1;
+       pte = pte_offset_kernel(pmd, address);
+-      if (!pte_present(*pte) || pte_page(*pte) != pte_page(*pte_ref))
++      /* Don't use pte_page here, because the mappings can point
++         outside mem_map, and the NUMA hash lookup cannot handle
++         that. */
++      if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
+               BUG();
+       __flush_tlb_all();
+       return 0;
+@@ -348,7 +353,9 @@ asmlinkage void do_page_fault(struct pt_
+        * protection error (error_code & 1) == 0.
+        */
+       if (unlikely(address >= TASK_SIZE)) {
+-              if (!(error_code & 5)) {
++              if (!(error_code & 5) &&
++                    ((address >= VMALLOC_START && address < VMALLOC_END) ||
++                     (address >= MODULES_VADDR && address < MODULES_END))) {
+                       if (vmalloc_fault(address) < 0)
+                               goto bad_area_nosemaphore;
+                       return;