--- /dev/null
+From stable-bounces@linux.kernel.org Thu Apr 27 05:23:14 2006
+Message-Id: <20060427122214.159808000@linux-mips.org>
+Date: Thu, 27 Apr 2006 00:00:01 +0100
+From: Ralf Baechle <ralf@linux-mips.org>
+To: stable@kernel.org
+Content-Disposition: inline; filename=0001.patch
+Cc:
+Subject: MIPS: Use "R" constraint for cache_op.
+
+Gcc might emit an absolute address for the the "m" constraint which
+gas unfortunately does not permit.
+
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/asm-mips/r4kcache.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- linux-2.6.16.11.orig/include/asm-mips/r4kcache.h
++++ linux-2.6.16.11/include/asm-mips/r4kcache.h
+@@ -37,7 +37,7 @@
+ " cache %0, %1 \n" \
+ " .set pop \n" \
+ : \
+- : "i" (op), "m" (*(unsigned char *)(addr)))
++ : "i" (op), "R" (*(unsigned char *)(addr)))
+
+ static inline void flush_icache_line_indexed(unsigned long addr)
+ {
--- /dev/null
+From stable-bounces@linux.kernel.org Thu Apr 27 05:23:14 2006
+Message-Id: <20060427122214.315677000@linux-mips.org>
+Date: Thu, 27 Apr 2006 00:00:02 +0100
+From: Ralf Baechle <ralf@linux-mips.org>
+To: stable@kernel.org
+Content-Disposition: inline; filename=0002.patch
+Cc:
+Subject: MIPS: R2 build fixes for gcc < 3.4.
+
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/asm-mips/bitops.h | 14 ++++++++++++--
+ include/asm-mips/byteorder.h | 4 ++++
+ include/asm-mips/interrupt.h | 8 ++++++++
+ 3 files changed, 24 insertions(+), 2 deletions(-)
+
+--- linux-2.6.16.11.orig/include/asm-mips/bitops.h
++++ linux-2.6.16.11/include/asm-mips/bitops.h
+@@ -654,7 +654,12 @@ static inline unsigned long fls(unsigned
+ {
+ #ifdef CONFIG_32BIT
+ #ifdef CONFIG_CPU_MIPS32
+- __asm__ ("clz %0, %1" : "=r" (word) : "r" (word));
++ __asm__ (
++ " .set mips32 \n"
++ " clz %0, %1 \n"
++ " .set mips0 \n"
++ : "=r" (word)
++ : "r" (word));
+
+ return 32 - word;
+ #else
+@@ -678,7 +683,12 @@ static inline unsigned long fls(unsigned
+ #ifdef CONFIG_64BIT
+ #ifdef CONFIG_CPU_MIPS64
+
+- __asm__ ("dclz %0, %1" : "=r" (word) : "r" (word));
++ __asm__ (
++ " .set mips64 \n"
++ " dclz %0, %1 \n"
++ " .set mips0 \n"
++ : "=r" (word)
++ : "r" (word));
+
+ return 64 - word;
+ #else
+--- linux-2.6.16.11.orig/include/asm-mips/byteorder.h
++++ linux-2.6.16.11/include/asm-mips/byteorder.h
+@@ -19,7 +19,9 @@
+ static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 x)
+ {
+ __asm__(
++ " .set mips32r2 \n"
+ " wsbh %0, %1 \n"
++ " .set mips0 \n"
+ : "=r" (x)
+ : "r" (x));
+
+@@ -30,8 +32,10 @@ static __inline__ __attribute_const__ __
+ static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
+ {
+ __asm__(
++ " .set mips32r2 \n"
+ " wsbh %0, %1 \n"
+ " rotr %0, %0, 16 \n"
++ " .set mips0 \n"
+ : "=r" (x)
+ : "r" (x));
+
+--- linux-2.6.16.11.orig/include/asm-mips/interrupt.h
++++ linux-2.6.16.11/include/asm-mips/interrupt.h
+@@ -20,7 +20,9 @@ __asm__ (
+ " .set reorder \n"
+ " .set noat \n"
+ #ifdef CONFIG_CPU_MIPSR2
++ " .set mips32r2 \n"
+ " ei \n"
++ " .set mips0 \n"
+ #else
+ " mfc0 $1,$12 \n"
+ " ori $1,0x1f \n"
+@@ -63,7 +65,9 @@ __asm__ (
+ " .set push \n"
+ " .set noat \n"
+ #ifdef CONFIG_CPU_MIPSR2
++ " .set mips32r2 \n"
+ " di \n"
++ " .set mips0 \n"
+ #else
+ " mfc0 $1,$12 \n"
+ " ori $1,0x1f \n"
+@@ -103,8 +107,10 @@ __asm__ (
+ " .set reorder \n"
+ " .set noat \n"
+ #ifdef CONFIG_CPU_MIPSR2
++ " .set mips32r2 \n"
+ " di \\result \n"
+ " andi \\result, 1 \n"
++ " .set mips0 \n"
+ #else
+ " mfc0 \\result, $12 \n"
+ " ori $1, \\result, 0x1f \n"
+@@ -133,9 +139,11 @@ __asm__ (
+ * Slow, but doesn't suffer from a relativly unlikely race
+ * condition we're having since days 1.
+ */
++ " .set mips32r2 \n"
+ " beqz \\flags, 1f \n"
+ " di \n"
+ " ei \n"
++ " .set mips0 \n"
+ "1: \n"
+ #elif defined(CONFIG_CPU_MIPSR2)
+ /*
--- /dev/null
+From stable-bounces@linux.kernel.org Thu Apr 27 05:23:25 2006
+Message-Id: <20060427122214.551882000@linux-mips.org>
+Date: Thu, 27 Apr 2006 00:00:03 +0100
+From: Ralf Baechle <ralf@linux-mips.org>
+To: stable@kernel.org
+Content-Disposition: inline; filename=0003.patch
+Cc: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
+Subject: MIPS: Fix tx49_blast_icache32_page_indexed.
+
+From: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
+
+Fix the cache index value in tx49_blast_icache32_page_indexed().
+This is damage by de62893bc0725f8b5f0445250577cd7a10b2d8f8 commit.
+
+Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/mips/mm/c-r4k.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- linux-2.6.16.11.orig/arch/mips/mm/c-r4k.c
++++ linux-2.6.16.11/arch/mips/mm/c-r4k.c
+@@ -154,7 +154,8 @@ static inline void blast_icache32_r4600_
+
+ static inline void tx49_blast_icache32_page_indexed(unsigned long page)
+ {
+- unsigned long start = page;
++ unsigned long indexmask = current_cpu_data.icache.waysize - 1;
++ unsigned long start = INDEX_BASE + (page & indexmask);
+ unsigned long end = start + PAGE_SIZE;
+ unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
+ unsigned long ws_end = current_cpu_data.icache.ways <<
--- /dev/null
+From stable-bounces@linux.kernel.org Thu Apr 27 05:23:36 2006
+Message-Id: <20060427122215.016221000@linux-mips.org>
+Date: Thu, 27 Apr 2006 00:00:04 +0100
+From: Ralf Baechle <ralf@linux-mips.org>
+To: stable@kernel.org
+Content-Disposition: inline; filename=0004.patch
+Cc: Win Treese <treese@acm.org>
+Subject: MIPS: Fix branch emulation for floating-point exceptions.
+
+From: Win Treese <treese@acm.org>
+
+In the branch emulation for floating-point exceptions, __compute_return_epc
+must determine for bc1f et al which condition code bit to test. This is
+based on bits <4:2> of the rt field. The switch statement to distinguish
+bc1f et al needs to use only the two low bits of rt, but the old code tests
+on the whole rt field. This patch masks off the proper bits.
+
+Signed-off-by: Win Treese <treese@acm.org>
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/mips/kernel/branch.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- linux-2.6.16.11.orig/arch/mips/kernel/branch.c
++++ linux-2.6.16.11/arch/mips/kernel/branch.c
+@@ -184,7 +184,7 @@ int __compute_return_epc(struct pt_regs
+ bit = (insn.i_format.rt >> 2);
+ bit += (bit != 0);
+ bit += 23;
+- switch (insn.i_format.rt) {
++ switch (insn.i_format.rt & 3) {
+ case 0: /* bc1f */
+ case 2: /* bc1fl */
+ if (~fcr31 & (1 << bit))
--- /dev/null
+From stable-bounces@linux.kernel.org Tue Apr 25 23:13:04 2006
+From: "Kok, Auke" <auke-jan.h.kok@intel.com>
+Date: Tue, 25 Apr 2006 23:16:29 -0700
+To: stable@kernel.org, "Garzik, Jeff" <jgarzik@pobox.com>
+Message-Id: <20060426061628.25966.93051.stgit@gitlost.site>
+Cc: netdev@vger.kernel.org, "Brandeburg, Jesse" <jesse.brandeburg@intel.com>, "Ronciak, John" <john.ronciak@intel.com>, "Kirsher, Jeff" <Jeffrey.t.kirsher@intel.com>, "Kok, Auke" <auke@foo-projects.org>, "Miller, David" <davem@davemloft.net>
+Subject: NET: e1000: Update truesize with the length of the packet for packet split
+
+
+Update skb with the real packet size.
+
+
+Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
+Signed-off-by: Auke Kok <auke-jan.h.kok@intel.com>
+Signed-off-by: John Ronciak <john.ronciak@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/e1000/e1000_main.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- linux-2.6.16.11.orig/drivers/net/e1000/e1000_main.c
++++ linux-2.6.16.11/drivers/net/e1000/e1000_main.c
+@@ -3851,6 +3851,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapt
+ skb_shinfo(skb)->nr_frags++;
+ skb->len += length;
+ skb->data_len += length;
++ skb->truesize += length;
+ }
+
+ e1000_rx_checksum(adapter, staterr,
altix-snsc-duplicate-kobject-fix.patch
alpha-strncpy-fix.patch
lsm-add-missing-hook-to-do_compat_readv_writev.patch
+x86-pae-fix-pte_clear-for-the-4gb-ram-case.patch
+net-e1000-update-truesize-with-the-length-of-the-packet-for-packet-split.patch
+MIPS-0001.patch
+MIPS-0002.patch
+MIPS-0003.patch
+MIPS-0004.patch
--- /dev/null
+From git-commits-head-owner@vger.kernel.org Thu Apr 27 13:02:14 2006
+Date: Thu, 27 Apr 2006 20:01:39 GMT
+Message-Id: <200604272001.k3RK1dmX007637@hera.kernel.org>
+From: Linux Kernel Mailing List <linux-kernel@vger.kernel.org>
+To: git-commits-head@vger.kernel.org
+Subject: [PATCH] x86/PAE: Fix pte_clear for the >4GB RAM case
+
+From: Zachary Amsden <zach@vmware.com>
+
+[PATCH] x86/PAE: Fix pte_clear for the >4GB RAM case
+
+Proposed fix for ptep_get_and_clear_full PAE bug. Pte_clear had the same bug,
+so use the same fix for both. Turns out pmd_clear had it as well, but pgds
+are not affected.
+
+The problem is rather intricate. Page table entries in PAE mode are 64-bits
+wide, but the only atomic 8-byte write operation available in 32-bit mode is
+cmpxchg8b, which is expensive (at least on P4), and thus avoided. But it can
+happen that the processor may prefetch entries into the TLB in the middle of an
+operation which clears a page table entry. So one must always clear the P-bit
+in the low word of the page table entry first when clearing it.
+
+Since the sequence *ptep = __pte(0) leaves the order of the write dependent on
+the compiler, it must be coded explicitly as a clear of the low word followed
+by a clear of the high word. Further, there must be a write memory barrier
+here to enforce proper ordering by the compiler (and, in the future, by the
+processor as well).
+
+On > 4GB memory machines, the implementation of pte_clear for PAE was clearly
+deficient, as it could leave virtual mappings of physical memory above 4GB
+aliased to memory below 4GB in the TLB. The implementation of
+ptep_get_and_clear_full has a similar bug, although not nearly as likely to
+occur, since the mappings being cleared are in the process of being destroyed,
+and should never be dereferenced again.
+
+But, as luck would have it, it is possible to trigger bugs even without ever
+dereferencing these bogus TLB mappings, even if the clear is followed fairly
+soon after with a TLB flush or invalidation. The problem is that memory above
+4GB may now be aliased into the first 4GB of memory, and in fact, may hit a
+region of memory with non-memory semantics. These regions include AGP and PCI
+space. As such, these memory regions are not cached by the processor. This
+introduces the bug.
+
+The processor can speculate memory operations, including memory writes, as long
+as they are committed with the proper ordering. Speculating a memory write to
+a linear address that has a bogus TLB mapping is possible. Normally, the
+speculation is harmless. But for cached memory, it does leave the falsely
+speculated cacheline unmodified, but in a dirty state. This cache line will be
+eventually written back. If this cacheline happens to intersect a region of
+memory that is not protected by the cache coherency protocol, it can corrupt
+data in I/O memory, which is generally a very bad thing to do, and can cause
+total system failure or just plain undefined behavior.
+
+These bugs are extremely unlikely, but the severity is of such magnitude, and
+the fix so simple that I think fixing them immediately is justified. Also,
+they are nearly impossible to debug.
+
+Signed-off-by: Zachary Amsden <zach@vmware.com>
+Signed-off-by: Linus Torvalds <torvalds@osdl.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/asm-i386/pgtable-2level.h | 3 +++
+ include/asm-i386/pgtable-3level.h | 20 ++++++++++++++++++++
+ include/asm-i386/pgtable.h | 4 +---
+ 3 files changed, 24 insertions(+), 3 deletions(-)
+
+--- linux-2.6.16.11.orig/include/asm-i386/pgtable-2level.h
++++ linux-2.6.16.11/include/asm-i386/pgtable-2level.h
+@@ -18,6 +18,9 @@
+ #define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
+ #define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
+
++#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
++#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
++
+ #define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte_low, 0))
+ #define pte_same(a, b) ((a).pte_low == (b).pte_low)
+ #define pte_page(x) pfn_to_page(pte_pfn(x))
+--- linux-2.6.16.11.orig/include/asm-i386/pgtable-3level.h
++++ linux-2.6.16.11/include/asm-i386/pgtable-3level.h
+@@ -85,6 +85,26 @@ static inline void pud_clear (pud_t * pu
+ #define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \
+ pmd_index(address))
+
++/*
++ * For PTEs and PDEs, we must clear the P-bit first when clearing a page table
++ * entry, so clear the bottom half first and enforce ordering with a compiler
++ * barrier.
++ */
++static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
++{
++ ptep->pte_low = 0;
++ smp_wmb();
++ ptep->pte_high = 0;
++}
++
++static inline void pmd_clear(pmd_t *pmd)
++{
++ u32 *tmp = (u32 *)pmd;
++ *tmp = 0;
++ smp_wmb();
++ *(tmp + 1) = 0;
++}
++
+ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+ {
+ pte_t res;
+--- linux-2.6.16.11.orig/include/asm-i386/pgtable.h
++++ linux-2.6.16.11/include/asm-i386/pgtable.h
+@@ -204,12 +204,10 @@ extern unsigned long long __PAGE_KERNEL,
+ extern unsigned long pg0[];
+
+ #define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
+-#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
+
+ /* To avoid harmful races, pmd_none(x) should check only the lower when PAE */
+ #define pmd_none(x) (!(unsigned long)pmd_val(x))
+ #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
+-#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
+ #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
+
+
+@@ -269,7 +267,7 @@ static inline pte_t ptep_get_and_clear_f
+ pte_t pte;
+ if (full) {
+ pte = *ptep;
+- *ptep = __pte(0);
++ pte_clear(mm, addr, ptep);
+ } else {
+ pte = ptep_get_and_clear(mm, addr, ptep);
+ }