]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
more .26 patches
authorGreg Kroah-Hartman <gregkh@suse.de>
Mon, 18 Aug 2008 17:52:50 +0000 (10:52 -0700)
committerGreg Kroah-Hartman <gregkh@suse.de>
Mon, 18 Aug 2008 17:52:50 +0000 (10:52 -0700)
queue-2.6.26/cifs-properly-account-for-new-user-field-in-spnego-upcall-string-allocation.patch [new file with mode: 0644]
queue-2.6.26/crypto-padlock-fix-via-padlock-instruction-usage-with-irq_ts_save-restore.patch [new file with mode: 0644]
queue-2.6.26/pci-limit-vpd-length-for-broadcom-5708s.patch [new file with mode: 0644]
queue-2.6.26/qla2xxx-add-dev_loss_tmo_callbk-terminate_rport_io-callback-support.patch [new file with mode: 0644]
queue-2.6.26/qla2xxx-set-an-rport-s-dev_loss_tmo-value-in-a-consistent-manner.patch [new file with mode: 0644]
queue-2.6.26/series
queue-2.6.26/usb-storage-automatically-recognize-bad-residues.patch [new file with mode: 0644]
queue-2.6.26/usb-storage-revert-dma-alignment-change-for-wireless-usb.patch [new file with mode: 0644]
queue-2.6.26/x86-fix-setup-code-crashes-on-my-old-486-box.patch [new file with mode: 0644]
queue-2.6.26/x86-fix-spin_is_contended.patch [new file with mode: 0644]

diff --git a/queue-2.6.26/cifs-properly-account-for-new-user-field-in-spnego-upcall-string-allocation.patch b/queue-2.6.26/cifs-properly-account-for-new-user-field-in-spnego-upcall-string-allocation.patch
new file mode 100644 (file)
index 0000000..05e72f7
--- /dev/null
@@ -0,0 +1,62 @@
+From smfrench@gmail.com  Mon Aug 18 10:40:51 2008
+From: Jeff Layton <jlayton@redhat.com>
+Date: Fri, 1 Aug 2008 13:08:14 -0500
+Subject: CIFS: properly account for new user= field in SPNEGO upcall string allocation
+To: "Jeff Layton" <jlayton@redhat.com>
+Cc: linux-cifs-client@lists.samba.org, chrisw@sous-sol.org, gregkh@suse.de
+Message-ID: <524f69650808011108x988ae14t2fbef8238ad1ded2@mail.gmail.com>
+
+From: Jeff Layton <jlayton@redhat.com>
+
+commit 66b8bd3c405389213de1d6ba6c2565990f62004f upstream
+
+[CIFS] properly account for new user= field in SPNEGO upcall string allocation
+
+...it doesn't look like it's being accounted for at the moment. Also
+try to reorganize the calculation to make it a little more evident
+what each piece means.
+
+This should probably go to the stable series as well...
+
+Signed-off-by: Jeff Layton <jlayton@redhat.com>
+Signed-off-by: Steve French <sfrench@us.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/cifs/cifs_spnego.c |   18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+--- a/fs/cifs/cifs_spnego.c
++++ b/fs/cifs/cifs_spnego.c
+@@ -66,8 +66,8 @@ struct key_type cifs_spnego_key_type = {
+       .describe       = user_describe,
+ };
+-#define MAX_VER_STR_LEN   9 /* length of longest version string e.g.
+-                              strlen(";ver=0xFF") */
++#define MAX_VER_STR_LEN   8 /* length of longest version string e.g.
++                              strlen("ver=0xFF") */
+ #define MAX_MECH_STR_LEN 13 /* length of longest security mechanism name, eg
+                              in future could have strlen(";sec=ntlmsspi") */
+ #define MAX_IPV6_ADDR_LEN 42 /* eg FEDC:BA98:7654:3210:FEDC:BA98:7654:3210/60 */
+@@ -81,11 +81,15 @@ cifs_get_spnego_key(struct cifsSesInfo *
+       struct key *spnego_key;
+       const char *hostname = server->hostname;
+-      /* BB: come up with better scheme for determining length */
+-      /* length of fields (with semicolons): ver=0xyz ipv4= ipaddress host=
+-         hostname sec=mechanism uid=0x uid */
+-      desc_len = MAX_VER_STR_LEN + 5 + MAX_IPV6_ADDR_LEN + 1 + 6 +
+-                strlen(hostname) + MAX_MECH_STR_LEN + 8 + (sizeof(uid_t) * 2);
++      /* length of fields (with semicolons): ver=0xyz ip4=ipaddress
++         host=hostname sec=mechanism uid=0xFF user=username */
++      desc_len = MAX_VER_STR_LEN +
++                 6 /* len of "host=" */ + strlen(hostname) +
++                 5 /* len of ";ipv4=" */ + MAX_IPV6_ADDR_LEN +
++                 MAX_MECH_STR_LEN +
++                 7 /* len of ";uid=0x" */ + (sizeof(uid_t) * 2) +
++                 6 /* len of ";user=" */ + strlen(sesInfo->userName) + 1;
++
+       spnego_key = ERR_PTR(-ENOMEM);
+       description = kzalloc(desc_len, GFP_KERNEL);
+       if (description == NULL)
diff --git a/queue-2.6.26/crypto-padlock-fix-via-padlock-instruction-usage-with-irq_ts_save-restore.patch b/queue-2.6.26/crypto-padlock-fix-via-padlock-instruction-usage-with-irq_ts_save-restore.patch
new file mode 100644 (file)
index 0000000..9bcec5a
--- /dev/null
@@ -0,0 +1,416 @@
+From herbert@gondor.apana.org.au  Mon Aug 18 10:48:35 2008
+From: Suresh Siddha <suresh.b.siddha@intel.com>
+Date: Fri, 15 Aug 2008 10:13:20 +1000
+Subject: crypto: padlock - fix VIA PadLock instruction usage with irq_ts_save/restore()
+To: stable@kernel.org
+Message-ID: <20080815001320.GA4327@gondor.apana.org.au>
+Content-Disposition: inline
+
+From: Suresh Siddha <suresh.b.siddha@intel.com>
+
+crypto: padlock - fix VIA PadLock instruction usage with irq_ts_save/restore()
+
+[ Upstream commit: e49140120c88eb99db1a9172d9ac224c0f2bbdd2 ]
+
+Wolfgang Walter reported this oops on his via C3 using padlock for
+AES-encryption:
+
+##################################################################
+
+BUG: unable to handle kernel NULL pointer dereference at 000001f0
+IP: [<c01028c5>] __switch_to+0x30/0x117
+*pde = 00000000
+Oops: 0002 [#1] PREEMPT
+Modules linked in:
+
+Pid: 2071, comm: sleep Not tainted (2.6.26 #11)
+EIP: 0060:[<c01028c5>] EFLAGS: 00010002 CPU: 0
+EIP is at __switch_to+0x30/0x117
+EAX: 00000000 EBX: c0493300 ECX: dc48dd00 EDX: c0493300
+ESI: dc48dd00 EDI: c0493530 EBP: c04cff8c ESP: c04cff7c
+DS: 007b ES: 007b FS: 0000 GS: 0033 SS: 0068
+Process sleep (pid: 2071, ti=c04ce000 task=dc48dd00 task.ti=d2fe6000)
+Stack: dc48df30 c0493300 00000000 00000000 d2fe7f44 c03b5b43 c04cffc8 00000046
+   c0131856 0000005a dc472d3c c0493300 c0493470 d983ae00 00002696 00000000
+   c0239f54 00000000 c04c4000 c04cffd8 c01025fe c04f3740 00049800 c04cffe0
+Call Trace:
+[<c03b5b43>] ? schedule+0x285/0x2ff
+[<c0131856>] ? pm_qos_requirement+0x3c/0x53
+[<c0239f54>] ? acpi_processor_idle+0x0/0x434
+[<c01025fe>] ? cpu_idle+0x73/0x7f
+[<c03a4dcd>] ? rest_init+0x61/0x63
+=======================
+
+Wolfgang also found out that adding kernel_fpu_begin() and kernel_fpu_end()
+around the padlock instructions fix the oops.
+
+Suresh wrote:
+
+These padlock instructions though don't use/touch SSE registers, but it behaves
+similar to other SSE instructions. For example, it might cause DNA faults
+when cr0.ts is set. While this is a spurious DNA trap, it might cause
+oops with the recent fpu code changes.
+
+This is the code sequence  that is probably causing this problem:
+
+a) new app is getting exec'd and it is somewhere in between
+start_thread() and flush_old_exec() in the load_xyz_binary()
+
+b) At pont "a", task's fpu state (like TS_USEDFPU, used_math() etc) is
+cleared.
+
+c) Now we get an interrupt/softirq which starts using these encrypt/decrypt
+routines in the network stack. This generates a math fault (as
+cr0.ts is '1') which sets TS_USEDFPU and restores the math that is
+in the task's xstate.
+
+d) Return to exec code path, which does start_thread() which does
+free_thread_xstate() and sets xstate pointer to NULL while
+the TS_USEDFPU is still set.
+
+e) At the next context switch from the new exec'd task to another task,
+we have a scenarios where TS_USEDFPU is set but xstate pointer is null.
+This can cause an oops during unlazy_fpu() in __switch_to()
+
+Now:
+
+1) This should happen with or with out pre-emption. Viro also encountered
+similar problem with out CONFIG_PREEMPT.
+
+2) kernel_fpu_begin() and kernel_fpu_end() will fix this problem, because
+kernel_fpu_begin() will manually do a clts() and won't run in to the
+situation of setting TS_USEDFPU in step "c" above.
+
+3) This was working before the fpu changes, because its a spurious
+math fault  which doesn't corrupt any fpu/sse registers and the task's
+math state was always in an allocated state.
+
+With out the recent lazy fpu allocation changes, while we don't see oops,
+there is a possible race still present in older kernels(for example,
+while kernel is using kernel_fpu_begin() in some optimized clear/copy
+page and an interrupt/softirq happens which uses these padlock
+instructions generating DNA fault).
+
+This is the failing scenario that existed even before the lazy fpu allocation
+changes:
+
+0. CPU's TS flag is set
+
+1. kernel using FPU in some optimized copy  routine and while doing
+kernel_fpu_begin() takes an interrupt just before doing clts()
+
+2. Takes an interrupt and ipsec uses padlock instruction. And we
+take a DNA fault as TS flag is still set.
+
+3. We handle the DNA fault and set TS_USEDFPU and clear cr0.ts
+
+4. We complete the padlock routine
+
+5. Go back to step-1, which resumes clts() in kernel_fpu_begin(), finishes
+the optimized copy routine and does kernel_fpu_end(). At this point,
+we have cr0.ts again set to '1' but the task's TS_USEFPU is stilll
+set and not cleared.
+
+6. Now kernel resumes its user operation. And at the next context
+switch, kernel sees it has do a FP save as TS_USEDFPU is still set
+and then will do a unlazy_fpu() in __switch_to(). unlazy_fpu()
+will take a DNA fault, as cr0.ts is '1' and now, because we are
+in __switch_to(), math_state_restore() will get confused and will
+restore the next task's FP state and will save it in prev tasks's FP state.
+Remember, in __switch_to() we are already on the stack of the next task
+but take a DNA fault for the prev task.
+
+This causes the fpu leakage.
+
+Fix the padlock instruction usage by calling them inside the
+context of new routines irq_ts_save/restore(), which clear/restore cr0.ts
+manually in the interrupt context. This will not generate spurious DNA
+in the  context of the interrupt which will fix the oops encountered and
+the possible FPU leakage issue.
+
+Reported-and-bisected-by: Wolfgang Walter <wolfgang.walter@stwm.de>
+Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/char/hw_random/via-rng.c |    8 ++++++++
+ drivers/crypto/padlock-aes.c     |   28 +++++++++++++++++++++++++++-
+ drivers/crypto/padlock-sha.c     |    9 +++++++++
+ include/asm-x86/i387.h           |   32 ++++++++++++++++++++++++++++++++
+ 4 files changed, 76 insertions(+), 1 deletion(-)
+
+--- a/drivers/char/hw_random/via-rng.c
++++ b/drivers/char/hw_random/via-rng.c
+@@ -31,6 +31,7 @@
+ #include <asm/io.h>
+ #include <asm/msr.h>
+ #include <asm/cpufeature.h>
++#include <asm/i387.h>
+ #define PFX   KBUILD_MODNAME ": "
+@@ -67,16 +68,23 @@ enum {
+  * Another possible performance boost may come from simply buffering
+  * until we have 4 bytes, thus returning a u32 at a time,
+  * instead of the current u8-at-a-time.
++ *
++ * Padlock instructions can generate a spurious DNA fault, so
++ * we have to call them in the context of irq_ts_save/restore()
+  */
+ static inline u32 xstore(u32 *addr, u32 edx_in)
+ {
+       u32 eax_out;
++      int ts_state;
++
++      ts_state = irq_ts_save();
+       asm(".byte 0x0F,0xA7,0xC0 /* xstore %%edi (addr=%0) */"
+               :"=m"(*addr), "=a"(eax_out)
+               :"D"(addr), "d"(edx_in));
++      irq_ts_restore(ts_state);
+       return eax_out;
+ }
+--- a/drivers/crypto/padlock-aes.c
++++ b/drivers/crypto/padlock-aes.c
+@@ -16,6 +16,7 @@
+ #include <linux/interrupt.h>
+ #include <linux/kernel.h>
+ #include <asm/byteorder.h>
++#include <asm/i387.h>
+ #include "padlock.h"
+ /* Control word. */
+@@ -141,6 +142,12 @@ static inline void padlock_reset_key(voi
+       asm volatile ("pushfl; popfl");
+ }
++/*
++ * While the padlock instructions don't use FP/SSE registers, they
++ * generate a spurious DNA fault when cr0.ts is '1'. These instructions
++ * should be used only inside the irq_ts_save/restore() context
++ */
++
+ static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key,
+                                 void *control_word)
+ {
+@@ -205,15 +212,23 @@ static inline u8 *padlock_xcrypt_cbc(con
+ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+ {
+       struct aes_ctx *ctx = aes_ctx(tfm);
++      int ts_state;
+       padlock_reset_key();
++
++      ts_state = irq_ts_save();
+       aes_crypt(in, out, ctx->E, &ctx->cword.encrypt);
++      irq_ts_restore(ts_state);
+ }
+ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+ {
+       struct aes_ctx *ctx = aes_ctx(tfm);
++      int ts_state;
+       padlock_reset_key();
++
++      ts_state = irq_ts_save();
+       aes_crypt(in, out, ctx->D, &ctx->cword.decrypt);
++      irq_ts_restore(ts_state);
+ }
+ static struct crypto_alg aes_alg = {
+@@ -244,12 +259,14 @@ static int ecb_aes_encrypt(struct blkcip
+       struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       int err;
++      int ts_state;
+       padlock_reset_key();
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
++      ts_state = irq_ts_save();
+       while ((nbytes = walk.nbytes)) {
+               padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
+                                  ctx->E, &ctx->cword.encrypt,
+@@ -257,6 +274,7 @@ static int ecb_aes_encrypt(struct blkcip
+               nbytes &= AES_BLOCK_SIZE - 1;
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
++      irq_ts_restore(ts_state);
+       return err;
+ }
+@@ -268,12 +286,14 @@ static int ecb_aes_decrypt(struct blkcip
+       struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       int err;
++      int ts_state;
+       padlock_reset_key();
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
++      ts_state = irq_ts_save();
+       while ((nbytes = walk.nbytes)) {
+               padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
+                                  ctx->D, &ctx->cword.decrypt,
+@@ -281,7 +301,7 @@ static int ecb_aes_decrypt(struct blkcip
+               nbytes &= AES_BLOCK_SIZE - 1;
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+-
++      irq_ts_restore(ts_state);
+       return err;
+ }
+@@ -314,12 +334,14 @@ static int cbc_aes_encrypt(struct blkcip
+       struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       int err;
++      int ts_state;
+       padlock_reset_key();
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
++      ts_state = irq_ts_save();
+       while ((nbytes = walk.nbytes)) {
+               u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
+                                           walk.dst.virt.addr, ctx->E,
+@@ -329,6 +351,7 @@ static int cbc_aes_encrypt(struct blkcip
+               nbytes &= AES_BLOCK_SIZE - 1;
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
++      irq_ts_restore(ts_state);
+       return err;
+ }
+@@ -340,12 +363,14 @@ static int cbc_aes_decrypt(struct blkcip
+       struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       int err;
++      int ts_state;
+       padlock_reset_key();
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
++      ts_state = irq_ts_save();
+       while ((nbytes = walk.nbytes)) {
+               padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
+                                  ctx->D, walk.iv, &ctx->cword.decrypt,
+@@ -354,6 +379,7 @@ static int cbc_aes_decrypt(struct blkcip
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
++      irq_ts_restore(ts_state);
+       return err;
+ }
+--- a/drivers/crypto/padlock-sha.c
++++ b/drivers/crypto/padlock-sha.c
+@@ -22,6 +22,7 @@
+ #include <linux/interrupt.h>
+ #include <linux/kernel.h>
+ #include <linux/scatterlist.h>
++#include <asm/i387.h>
+ #include "padlock.h"
+ #define SHA1_DEFAULT_FALLBACK "sha1-generic"
+@@ -102,6 +103,7 @@ static void padlock_do_sha1(const char *
+        *     PadLock microcode needs it that big. */
+       char buf[128+16];
+       char *result = NEAREST_ALIGNED(buf);
++      int ts_state;
+       ((uint32_t *)result)[0] = SHA1_H0;
+       ((uint32_t *)result)[1] = SHA1_H1;
+@@ -109,9 +111,12 @@ static void padlock_do_sha1(const char *
+       ((uint32_t *)result)[3] = SHA1_H3;
+       ((uint32_t *)result)[4] = SHA1_H4;
+  
++      /* prevent taking the spurious DNA fault with padlock. */
++      ts_state = irq_ts_save();
+       asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
+                     : "+S"(in), "+D"(result)
+                     : "c"(count), "a"(0));
++      irq_ts_restore(ts_state);
+       padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
+ }
+@@ -123,6 +128,7 @@ static void padlock_do_sha256(const char
+        *     PadLock microcode needs it that big. */
+       char buf[128+16];
+       char *result = NEAREST_ALIGNED(buf);
++      int ts_state;
+       ((uint32_t *)result)[0] = SHA256_H0;
+       ((uint32_t *)result)[1] = SHA256_H1;
+@@ -133,9 +139,12 @@ static void padlock_do_sha256(const char
+       ((uint32_t *)result)[6] = SHA256_H6;
+       ((uint32_t *)result)[7] = SHA256_H7;
++      /* prevent taking the spurious DNA fault with padlock. */
++      ts_state = irq_ts_save();
+       asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
+                     : "+S"(in), "+D"(result)
+                     : "c"(count), "a"(0));
++      irq_ts_restore(ts_state);
+       padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
+ }
+--- a/include/asm-x86/i387.h
++++ b/include/asm-x86/i387.h
+@@ -13,6 +13,7 @@
+ #include <linux/sched.h>
+ #include <linux/kernel_stat.h>
+ #include <linux/regset.h>
++#include <linux/hardirq.h>
+ #include <asm/asm.h>
+ #include <asm/processor.h>
+ #include <asm/sigcontext.h>
+@@ -290,6 +291,37 @@ static inline void kernel_fpu_end(void)
+       preempt_enable();
+ }
++/*
++ * Some instructions like VIA's padlock instructions generate a spurious
++ * DNA fault but don't modify SSE registers. And these instructions
++ * get used from interrupt context aswell. To prevent these kernel instructions
++ * in interrupt context interact wrongly with other user/kernel fpu usage, we
++ * should use them only in the context of irq_ts_save/restore()
++ */
++static inline int irq_ts_save(void)
++{
++      /*
++       * If we are in process context, we are ok to take a spurious DNA fault.
++       * Otherwise, doing clts() in process context require pre-emption to
++       * be disabled or some heavy lifting like kernel_fpu_begin()
++       */
++      if (!in_interrupt())
++              return 0;
++
++      if (read_cr0() & X86_CR0_TS) {
++              clts();
++              return 1;
++      }
++
++      return 0;
++}
++
++static inline void irq_ts_restore(int TS_state)
++{
++      if (TS_state)
++              stts();
++}
++
+ #ifdef CONFIG_X86_64
+ static inline void save_init_fpu(struct task_struct *tsk)
diff --git a/queue-2.6.26/pci-limit-vpd-length-for-broadcom-5708s.patch b/queue-2.6.26/pci-limit-vpd-length-for-broadcom-5708s.patch
new file mode 100644 (file)
index 0000000..0d2b1c2
--- /dev/null
@@ -0,0 +1,38 @@
+From 35405f256de924be56ea5edaca4cdc627f1bb0f8 Mon Sep 17 00:00:00 2001
+From: Dean Hildebrand <seattleplus@gmail.com>
+Date: Thu, 7 Aug 2008 17:31:45 -0700
+Subject: PCI: Limit VPD length for Broadcom 5708S
+Message-Id: <200807311319.24549.jbarnes@virtuousgeek.org>
+
+From: Dean Hildebrand <seattleplus@gmail.com>
+
+commit 35405f256de924be56ea5edaca4cdc627f1bb0f8 upstream
+
+BCM5706S wont work correctly unless VPD length truncated to 128
+
+Signed-off-by: Dean Hildebrand <dhildeb@us.ibm.com>
+Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/pci/quirks.c |    7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -1683,9 +1683,14 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_VI
+  */
+ static void __devinit quirk_brcm_570x_limit_vpd(struct pci_dev *dev)
+ {
+-      /*  Only disable the VPD capability for 5706, 5708, and 5709 rev. A */
++      /*
++       * Only disable the VPD capability for 5706, 5706S, 5708,
++       * 5708S and 5709 rev. A
++       */
+       if ((dev->device == PCI_DEVICE_ID_NX2_5706) ||
++          (dev->device == PCI_DEVICE_ID_NX2_5706S) ||
+           (dev->device == PCI_DEVICE_ID_NX2_5708) ||
++          (dev->device == PCI_DEVICE_ID_NX2_5708S) ||
+           ((dev->device == PCI_DEVICE_ID_NX2_5709) &&
+            (dev->revision & 0xf0) == 0x0)) {
+               if (dev->vpd)
diff --git a/queue-2.6.26/qla2xxx-add-dev_loss_tmo_callbk-terminate_rport_io-callback-support.patch b/queue-2.6.26/qla2xxx-add-dev_loss_tmo_callbk-terminate_rport_io-callback-support.patch
new file mode 100644 (file)
index 0000000..39ec16a
--- /dev/null
@@ -0,0 +1,249 @@
+From andrew.vasquez@qlogic.com  Mon Aug 18 10:33:06 2008
+From: Seokmann Ju <seokmann.ju@qlogic.com>
+Date: Thu, 14 Aug 2008 09:37:34 -0700
+Subject: qla2xxx: Add dev_loss_tmo_callbk/terminate_rport_io callback support.
+To: stable@kernel.org
+Message-ID: <20080814163734.GC75223@plap4-2.qlogic.org>
+
+From: Seokmann Ju <seokmann.ju@qlogic.com>
+
+[ Upstream commit 5f3a9a207f1fccde476dd31b4c63ead2967d934f ]
+
+Signed-off-by: Seokmann Ju <seokmann.ju@qlogic.com>
+Signed-off-by: Andrew Vasquez <andrew.vasquez@qlogic.com>
+Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/scsi/qla2xxx/qla_attr.c |   31 +++++++++++++++++++++++
+ drivers/scsi/qla2xxx/qla_def.h  |    1 
+ drivers/scsi/qla2xxx/qla_gbl.h  |    2 +
+ drivers/scsi/qla2xxx/qla_init.c |   16 +++---------
+ drivers/scsi/qla2xxx/qla_os.c   |   53 ++++++++++++++++++++++++++++++----------
+ 5 files changed, 78 insertions(+), 25 deletions(-)
+
+--- a/drivers/scsi/qla2xxx/qla_attr.c
++++ b/drivers/scsi/qla2xxx/qla_attr.c
+@@ -994,6 +994,33 @@ qla2x00_set_rport_loss_tmo(struct fc_rpo
+       rport->dev_loss_tmo = ha->port_down_retry_count + 5;
+ }
++static void
++qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
++{
++      struct Scsi_Host *host = rport_to_shost(rport);
++      fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
++
++      qla2x00_abort_fcport_cmds(fcport);
++
++      /*
++       * Transport has effectively 'deleted' the rport, clear
++       * all local references.
++       */
++      spin_lock_irq(host->host_lock);
++      fcport->rport = NULL;
++      *((fc_port_t **)rport->dd_data) = NULL;
++      spin_unlock_irq(host->host_lock);
++}
++
++static void
++qla2x00_terminate_rport_io(struct fc_rport *rport)
++{
++      fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
++
++      qla2x00_abort_fcport_cmds(fcport);
++      scsi_target_unblock(&rport->dev);
++}
++
+ static int
+ qla2x00_issue_lip(struct Scsi_Host *shost)
+ {
+@@ -1253,6 +1280,8 @@ struct fc_function_template qla2xxx_tran
+       .show_rport_dev_loss_tmo = 1,
+       .issue_fc_host_lip = qla2x00_issue_lip,
++      .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
++      .terminate_rport_io = qla2x00_terminate_rport_io,
+       .get_fc_host_stats = qla2x00_get_fc_host_stats,
+       .vport_create = qla24xx_vport_create,
+@@ -1296,6 +1325,8 @@ struct fc_function_template qla2xxx_tran
+       .show_rport_dev_loss_tmo = 1,
+       .issue_fc_host_lip = qla2x00_issue_lip,
++      .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
++      .terminate_rport_io = qla2x00_terminate_rport_io,
+       .get_fc_host_stats = qla2x00_get_fc_host_stats,
+ };
+--- a/drivers/scsi/qla2xxx/qla_def.h
++++ b/drivers/scsi/qla2xxx/qla_def.h
+@@ -1544,7 +1544,6 @@ typedef struct fc_port {
+       int login_retry;
+       atomic_t port_down_timer;
+-      spinlock_t rport_lock;
+       struct fc_rport *rport, *drport;
+       u32 supported_classes;
+--- a/drivers/scsi/qla2xxx/qla_gbl.h
++++ b/drivers/scsi/qla2xxx/qla_gbl.h
+@@ -71,6 +71,8 @@ extern int qla2x00_post_aen_work(struct 
+ extern int qla2x00_post_hwe_work(struct scsi_qla_host *, uint16_t , uint16_t,
+     uint16_t, uint16_t);
++extern void qla2x00_abort_fcport_cmds(fc_port_t *);
++
+ /*
+  * Global Functions in qla_mid.c source file.
+  */
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -1864,12 +1864,11 @@ qla2x00_rport_del(void *data)
+ {
+       fc_port_t *fcport = data;
+       struct fc_rport *rport;
+-      unsigned long flags;
+-      spin_lock_irqsave(&fcport->rport_lock, flags);
++      spin_lock_irq(fcport->ha->host->host_lock);
+       rport = fcport->drport;
+       fcport->drport = NULL;
+-      spin_unlock_irqrestore(&fcport->rport_lock, flags);
++      spin_unlock_irq(fcport->ha->host->host_lock);
+       if (rport)
+               fc_remote_port_delete(rport);
+ }
+@@ -1898,7 +1897,6 @@ qla2x00_alloc_fcport(scsi_qla_host_t *ha
+       atomic_set(&fcport->state, FCS_UNCONFIGURED);
+       fcport->flags = FCF_RLC_SUPPORT;
+       fcport->supported_classes = FC_COS_UNSPECIFIED;
+-      spin_lock_init(&fcport->rport_lock);
+       return fcport;
+ }
+@@ -2243,28 +2241,24 @@ qla2x00_reg_remote_port(scsi_qla_host_t 
+ {
+       struct fc_rport_identifiers rport_ids;
+       struct fc_rport *rport;
+-      unsigned long flags;
+       if (fcport->drport)
+               qla2x00_rport_del(fcport);
+-      if (fcport->rport)
+-              return;
+       rport_ids.node_name = wwn_to_u64(fcport->node_name);
+       rport_ids.port_name = wwn_to_u64(fcport->port_name);
+       rport_ids.port_id = fcport->d_id.b.domain << 16 |
+           fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
+       rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
+-      rport = fc_remote_port_add(ha->host, 0, &rport_ids);
++      fcport->rport = rport = fc_remote_port_add(ha->host, 0, &rport_ids);
+       if (!rport) {
+               qla_printk(KERN_WARNING, ha,
+                   "Unable to allocate fc remote port!\n");
+               return;
+       }
+-      spin_lock_irqsave(&fcport->rport_lock, flags);
+-      fcport->rport = rport;
++      spin_lock_irq(fcport->ha->host->host_lock);
+       *((fc_port_t **)rport->dd_data) = fcport;
+-      spin_unlock_irqrestore(&fcport->rport_lock, flags);
++      spin_unlock_irq(fcport->ha->host->host_lock);
+       rport->supported_classes = fcport->supported_classes;
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -388,7 +388,7 @@ qla2x00_queuecommand(struct scsi_cmnd *c
+       }
+       /* Close window on fcport/rport state-transitioning. */
+-      if (!*(fc_port_t **)rport->dd_data) {
++      if (fcport->drport) {
+               cmd->result = DID_IMM_RETRY << 16;
+               goto qc_fail_command;
+       }
+@@ -455,7 +455,7 @@ qla24xx_queuecommand(struct scsi_cmnd *c
+       }
+       /* Close window on fcport/rport state-transitioning. */
+-      if (!*(fc_port_t **)rport->dd_data) {
++      if (fcport->drport) {
+               cmd->result = DID_IMM_RETRY << 16;
+               goto qc24_fail_command;
+       }
+@@ -617,6 +617,40 @@ qla2x00_wait_for_loop_ready(scsi_qla_hos
+       return (return_status);
+ }
++void
++qla2x00_abort_fcport_cmds(fc_port_t *fcport)
++{
++      int cnt;
++      unsigned long flags;
++      srb_t *sp;
++      scsi_qla_host_t *ha = fcport->ha;
++      scsi_qla_host_t *pha = to_qla_parent(ha);
++
++      spin_lock_irqsave(&pha->hardware_lock, flags);
++      for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
++              sp = pha->outstanding_cmds[cnt];
++              if (!sp)
++                      continue;
++              if (sp->fcport != fcport)
++                      continue;
++
++              spin_unlock_irqrestore(&pha->hardware_lock, flags);
++              if (ha->isp_ops->abort_command(ha, sp)) {
++                      DEBUG2(qla_printk(KERN_WARNING, ha,
++                          "Abort failed --  %lx\n", sp->cmd->serial_number));
++              } else {
++                      if (qla2x00_eh_wait_on_command(ha, sp->cmd) !=
++                          QLA_SUCCESS)
++                              DEBUG2(qla_printk(KERN_WARNING, ha,
++                                  "Abort failed while waiting --  %lx\n",
++                                  sp->cmd->serial_number));
++
++              }
++              spin_lock_irqsave(&pha->hardware_lock, flags);
++      }
++      spin_unlock_irqrestore(&pha->hardware_lock, flags);
++}
++
+ static void
+ qla2x00_block_error_handler(struct scsi_cmnd *cmnd)
+ {
+@@ -1813,7 +1847,6 @@ static inline void
+ qla2x00_schedule_rport_del(struct scsi_qla_host *ha, fc_port_t *fcport,
+     int defer)
+ {
+-      unsigned long flags;
+       struct fc_rport *rport;
+       if (!fcport->rport)
+@@ -1821,19 +1854,13 @@ qla2x00_schedule_rport_del(struct scsi_q
+       rport = fcport->rport;
+       if (defer) {
+-              spin_lock_irqsave(&fcport->rport_lock, flags);
++              spin_lock_irq(ha->host->host_lock);
+               fcport->drport = rport;
+-              fcport->rport = NULL;
+-              *(fc_port_t **)rport->dd_data = NULL;
+-              spin_unlock_irqrestore(&fcport->rport_lock, flags);
++              spin_unlock_irq(ha->host->host_lock);
+               set_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags);
+-      } else {
+-              spin_lock_irqsave(&fcport->rport_lock, flags);
+-              fcport->rport = NULL;
+-              *(fc_port_t **)rport->dd_data = NULL;
+-              spin_unlock_irqrestore(&fcport->rport_lock, flags);
++              qla2xxx_wake_dpc(ha);
++      } else
+               fc_remote_port_delete(rport);
+-      }
+ }
+ /*
diff --git a/queue-2.6.26/qla2xxx-set-an-rport-s-dev_loss_tmo-value-in-a-consistent-manner.patch b/queue-2.6.26/qla2xxx-set-an-rport-s-dev_loss_tmo-value-in-a-consistent-manner.patch
new file mode 100644 (file)
index 0000000..b4e725a
--- /dev/null
@@ -0,0 +1,83 @@
+From andrew.vasquez@qlogic.com  Mon Aug 18 10:34:07 2008
+From: Andrew Vasquez <andrew.vasquez@qlogic.com>
+Date: Thu, 14 Aug 2008 09:39:35 -0700
+Subject: qla2xxx: Set an rport's dev_loss_tmo value in a consistent manner.
+To: stable@kernel.org
+Message-ID: <20080814163935.GD75223@plap4-2.qlogic.org>
+
+From: Andrew Vasquez <andrew.vasquez@qlogic.com>
+
+[ Upstream commit 85821c906cf3563a00a3d98fa380a2581a7a5ff1 ]
+
+As there's no point in adding a fixed-fudge value (originally 5
+seconds), honor the user settings only.  We also remove the
+driver's dead-callback get_rport_dev_loss_tmo function
+(qla2x00_get_rport_loss_tmo()).
+
+Signed-off-by: Andrew Vasquez <andrew.vasquez@qlogic.com>
+Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/scsi/qla2xxx/qla_attr.c |   20 ++------------------
+ drivers/scsi/qla2xxx/qla_os.c   |    2 +-
+ 2 files changed, 3 insertions(+), 19 deletions(-)
+
+--- a/drivers/scsi/qla2xxx/qla_attr.c
++++ b/drivers/scsi/qla2xxx/qla_attr.c
+@@ -972,26 +972,12 @@ qla2x00_get_starget_port_id(struct scsi_
+ }
+ static void
+-qla2x00_get_rport_loss_tmo(struct fc_rport *rport)
+-{
+-      struct Scsi_Host *host = rport_to_shost(rport);
+-      scsi_qla_host_t *ha = shost_priv(host);
+-
+-      rport->dev_loss_tmo = ha->port_down_retry_count + 5;
+-}
+-
+-static void
+ qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
+ {
+-      struct Scsi_Host *host = rport_to_shost(rport);
+-      scsi_qla_host_t *ha = shost_priv(host);
+-
+       if (timeout)
+-              ha->port_down_retry_count = timeout;
++              rport->dev_loss_tmo = timeout;
+       else
+-              ha->port_down_retry_count = 1;
+-
+-      rport->dev_loss_tmo = ha->port_down_retry_count + 5;
++              rport->dev_loss_tmo = 1;
+ }
+ static void
+@@ -1275,7 +1261,6 @@ struct fc_function_template qla2xxx_tran
+       .get_starget_port_id  = qla2x00_get_starget_port_id,
+       .show_starget_port_id = 1,
+-      .get_rport_dev_loss_tmo = qla2x00_get_rport_loss_tmo,
+       .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
+       .show_rport_dev_loss_tmo = 1,
+@@ -1320,7 +1305,6 @@ struct fc_function_template qla2xxx_tran
+       .get_starget_port_id  = qla2x00_get_starget_port_id,
+       .show_starget_port_id = 1,
+-      .get_rport_dev_loss_tmo = qla2x00_get_rport_loss_tmo,
+       .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
+       .show_rport_dev_loss_tmo = 1,
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -1107,7 +1107,7 @@ qla2xxx_slave_configure(struct scsi_devi
+       else
+               scsi_deactivate_tcq(sdev, ha->max_q_depth);
+-      rport->dev_loss_tmo = ha->port_down_retry_count + 5;
++      rport->dev_loss_tmo = ha->port_down_retry_count;
+       return 0;
+ }
index bd2f53cacbd26ae992a46bb7598b5dfe8360c2c8..ad2834605be6b6a9ef4044ede5b583bcef01e1ad 100644 (file)
@@ -49,3 +49,12 @@ rtl8187-fix-lockups-due-to-concurrent-access-to-config-routine.patch
 0004-sparc64-Make-global-reg-dumping-even-more-useful.patch
 0005-sparc64-Implement-IRQ-stacks.patch
 0006-sparc64-Handle-stack-trace-attempts-before-irqstack.patch
+x86-fix-spin_is_contended.patch
+x86-fix-setup-code-crashes-on-my-old-486-box.patch
+qla2xxx-add-dev_loss_tmo_callbk-terminate_rport_io-callback-support.patch
+qla2xxx-set-an-rport-s-dev_loss_tmo-value-in-a-consistent-manner.patch
+usb-storage-revert-dma-alignment-change-for-wireless-usb.patch
+usb-storage-automatically-recognize-bad-residues.patch
+cifs-properly-account-for-new-user-field-in-spnego-upcall-string-allocation.patch
+pci-limit-vpd-length-for-broadcom-5708s.patch
+crypto-padlock-fix-via-padlock-instruction-usage-with-irq_ts_save-restore.patch
diff --git a/queue-2.6.26/usb-storage-automatically-recognize-bad-residues.patch b/queue-2.6.26/usb-storage-automatically-recognize-bad-residues.patch
new file mode 100644 (file)
index 0000000..0e7398b
--- /dev/null
@@ -0,0 +1,55 @@
+From stern@rowland.harvard.edu  Mon Aug 18 10:39:06 2008
+From: Alan Stern <stern@rowland.harvard.edu>
+Date: Tue, 5 Aug 2008 10:46:23 -0400 (EDT)
+Subject: usb-storage: automatically recognize bad residues
+To: stable@kernel.org
+Message-ID: <Pine.LNX.4.44L0.0808051042120.2518-100000@iolanthe.rowland.org>
+
+From: Alan Stern <stern@rowland.harvard.edu>
+
+commit 59f4ff2ecff4cef36378928cec891785b402e80c upstream
+
+This patch (as1119b) will help to reduce the clutter of usb-storage's
+unusual_devs file by automatically detecting some devices that need
+the IGNORE_RESIDUE flag.  The idea is that devices should never return
+a non-zero residue for an INQUIRY or a READ CAPACITY command unless
+they failed to transfer all the requested data.  So if one of these
+commands transfers a standard amount of data but there is a positive
+residue, we know that the residue is bogus and we can set the flag.
+
+This fixes the problems reported in Bugzilla #11125.
+
+Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
+Tested-by: Matthew Frost <artusemrys@sbcglobal.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/usb/storage/transport.c |   17 +++++++++++++++--
+ 1 file changed, 15 insertions(+), 2 deletions(-)
+
+--- a/drivers/usb/storage/transport.c
++++ b/drivers/usb/storage/transport.c
+@@ -1034,8 +1034,21 @@ int usb_stor_Bulk_transport(struct scsi_
+       /* try to compute the actual residue, based on how much data
+        * was really transferred and what the device tells us */
+-      if (residue) {
+-              if (!(us->flags & US_FL_IGNORE_RESIDUE)) {
++      if (residue && !(us->flags & US_FL_IGNORE_RESIDUE)) {
++
++              /* Heuristically detect devices that generate bogus residues
++               * by seeing what happens with INQUIRY and READ CAPACITY
++               * commands.
++               */
++              if (bcs->Status == US_BULK_STAT_OK &&
++                              scsi_get_resid(srb) == 0 &&
++                                      ((srb->cmnd[0] == INQUIRY &&
++                                              transfer_length == 36) ||
++                                      (srb->cmnd[0] == READ_CAPACITY &&
++                                              transfer_length == 8))) {
++                      us->flags |= US_FL_IGNORE_RESIDUE;
++
++              } else {
+                       residue = min(residue, transfer_length);
+                       scsi_set_resid(srb, max(scsi_get_resid(srb),
+                                                              (int) residue));
diff --git a/queue-2.6.26/usb-storage-revert-dma-alignment-change-for-wireless-usb.patch b/queue-2.6.26/usb-storage-revert-dma-alignment-change-for-wireless-usb.patch
new file mode 100644 (file)
index 0000000..996bdec
--- /dev/null
@@ -0,0 +1,76 @@
+From stern@rowland.harvard.edu  Mon Aug 18 10:37:50 2008
+From: Alan Stern <stern@rowland.harvard.edu>
+Date: Mon, 11 Aug 2008 10:08:17 -0400 (EDT)
+Subject: usb-storage: revert DMA-alignment change for Wireless USB
+To: stable@kernel.org
+Message-ID: <Pine.LNX.4.44L0.0808111005210.2546-100000@iolanthe.rowland.org>
+
+From: Alan Stern <stern@rowland.harvard.edu>
+
+commit f756cbd458ab71c996a069cb3928fb1e2d7cd9cc upstream.
+
+This patch (as1110) reverts an earlier patch meant to help with
+Wireless USB host controllers.  These controllers can have bulk
+maxpacket values larger than 512, which puts unusual constraints on
+the sizes of scatter-gather list elements.  However it turns out that
+the block layer does not provide the support we need to enforce these
+constraints; merely changing the DMA alignment mask doesn't help.
+Hence there's no reason to keep the original patch.  The Wireless USB
+problem will have to be solved a different way.
+
+In addition, there is a reason to get rid of the earlier patch.  By
+dereferencing a pointer stored in the ep_in array of struct
+usb_device, the current code risks an invalid memory access when it
+runs concurrently with device removal.  The members of that array are
+cleared before the driver's disconnect method is called, so it should
+not try to use them.
+
+Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/usb/storage/scsiglue.c |   25 +++++++++++++++----------
+ 1 file changed, 15 insertions(+), 10 deletions(-)
+
+--- a/drivers/usb/storage/scsiglue.c
++++ b/drivers/usb/storage/scsiglue.c
+@@ -73,7 +73,6 @@ static const char* host_info(struct Scsi
+ static int slave_alloc (struct scsi_device *sdev)
+ {
+       struct us_data *us = host_to_us(sdev->host);
+-      struct usb_host_endpoint *bulk_in_ep;
+       /*
+        * Set the INQUIRY transfer length to 36.  We don't use any of
+@@ -82,16 +81,22 @@ static int slave_alloc (struct scsi_devi
+        */
+       sdev->inquiry_len = 36;
+-      /* Scatter-gather buffers (all but the last) must have a length
+-       * divisible by the bulk maxpacket size.  Otherwise a data packet
+-       * would end up being short, causing a premature end to the data
+-       * transfer.  We'll use the maxpacket value of the bulk-IN pipe
+-       * to set the SCSI device queue's DMA alignment mask.
++      /* USB has unusual DMA-alignment requirements: Although the
++       * starting address of each scatter-gather element doesn't matter,
++       * the length of each element except the last must be divisible
++       * by the Bulk maxpacket value.  There's currently no way to
++       * express this by block-layer constraints, so we'll cop out
++       * and simply require addresses to be aligned at 512-byte
++       * boundaries.  This is okay since most block I/O involves
++       * hardware sectors that are multiples of 512 bytes in length,
++       * and since host controllers up through USB 2.0 have maxpacket
++       * values no larger than 512.
++       *
++       * But it doesn't suffice for Wireless USB, where Bulk maxpacket
++       * values can be as large as 2048.  To make that work properly
++       * will require changes to the block layer.
+        */
+-      bulk_in_ep = us->pusb_dev->ep_in[usb_pipeendpoint(us->recv_bulk_pipe)];
+-      blk_queue_update_dma_alignment(sdev->request_queue,
+-                      le16_to_cpu(bulk_in_ep->desc.wMaxPacketSize) - 1);
+-                      /* wMaxPacketSize must be a power of 2 */
++      blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
+       /*
+        * The UFI spec treates the Peripheral Qualifier bits in an
diff --git a/queue-2.6.26/x86-fix-setup-code-crashes-on-my-old-486-box.patch b/queue-2.6.26/x86-fix-setup-code-crashes-on-my-old-486-box.patch
new file mode 100644 (file)
index 0000000..4c1a11d
--- /dev/null
@@ -0,0 +1,101 @@
+From jejb@kernel.org  Mon Aug 18 10:27:40 2008
+From: Joerg Roedel <joro@8bytes.org>
+Date: Sun, 17 Aug 2008 00:25:07 GMT
+Subject: x86: fix setup code crashes on my old 486 box
+To: jejb@kernel.org, stable@kernel.org
+Message-ID: <200808170025.m7H0P7Ho026184@hera.kernel.org>
+
+From: Joerg Roedel <joro@8bytes.org>
+
+commit 7b27718bdb1b70166383dec91391df5534d449ee upstream
+
+yesterday I tried to reactivate my old 486 box and wanted to install a
+current Linux with latest kernel on it. But it turned out that the
+latest kernel does not boot because the machine crashes early in the
+setup code.
+
+After some debugging it turned out that the problem is the query_ist()
+function. If this interrupt with that function is called the machine
+simply locks up. It looks like a BIOS bug. Looking for a workaround for
+this problem I wrote the attached patch. It checks for the CPUID
+instruction and if it is not implemented it does not call the speedstep
+BIOS function. As far as I know speedstep should be available since some
+Pentium earliest.
+
+Alan Cox observed that it's available since the Pentium II, so cpuid
+levels 4 and 5 can be excluded altogether.
+
+H. Peter Anvin cleaned up the code some more:
+
+> Right in concept, but I dislike the implementation (duplication of the
+> CPU detect code we already have).  Could you try this patch and see if
+> it works for you?
+
+which, with a small modification to fix a build error with it the
+resulting kernel boots on my machine.
+
+Signed-off-by: Joerg Roedel <joro@8bytes.org>
+Signed-off-by: "H. Peter Anvin" <hpa@zytor.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/boot/boot.h     |    8 ++++++++
+ arch/x86/boot/cpucheck.c |    8 +-------
+ arch/x86/boot/main.c     |    4 ++++
+ 3 files changed, 13 insertions(+), 7 deletions(-)
+
+--- a/arch/x86/boot/boot.h
++++ b/arch/x86/boot/boot.h
+@@ -25,6 +25,8 @@
+ #include <asm/boot.h>
+ #include <asm/setup.h>
++#define NCAPINTS   8
++
+ /* Useful macros */
+ #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
+@@ -242,6 +244,12 @@ int cmdline_find_option(const char *opti
+ int cmdline_find_option_bool(const char *option);
+ /* cpu.c, cpucheck.c */
++struct cpu_features {
++      int level;              /* Family, or 64 for x86-64 */
++      int model;
++      u32 flags[NCAPINTS];
++};
++extern struct cpu_features cpu;
+ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr);
+ int validate_cpu(void);
+--- a/arch/x86/boot/cpucheck.c
++++ b/arch/x86/boot/cpucheck.c
+@@ -30,13 +30,7 @@
+ #include <asm/required-features.h>
+ #include <asm/msr-index.h>
+-struct cpu_features {
+-      int level;              /* Family, or 64 for x86-64 */
+-      int model;
+-      u32 flags[NCAPINTS];
+-};
+-
+-static struct cpu_features cpu;
++struct cpu_features cpu;
+ static u32 cpu_vendor[3];
+ static u32 err_flags[NCAPINTS];
+--- a/arch/x86/boot/main.c
++++ b/arch/x86/boot/main.c
+@@ -73,6 +73,10 @@ static void keyboard_set_repeat(void)
+  */
+ static void query_ist(void)
+ {
++      /* Some 486 BIOSes apparently crash on this call */
++      if (cpu.level < 6)
++              return;
++
+       asm("int $0x15"
+           : "=a" (boot_params.ist_info.signature),
+             "=b" (boot_params.ist_info.command),
diff --git a/queue-2.6.26/x86-fix-spin_is_contended.patch b/queue-2.6.26/x86-fix-spin_is_contended.patch
new file mode 100644 (file)
index 0000000..ca88675
--- /dev/null
@@ -0,0 +1,43 @@
+From jejb@kernel.org  Mon Aug 18 10:26:52 2008
+From: Jan Beulich <jbeulich@novell.com>
+Date: Sun, 17 Aug 2008 00:25:05 GMT
+Subject: x86: fix spin_is_contended()
+To: jejb@kernel.org, stable@kernel.org
+Message-ID: <200808170025.m7H0P5Gq026121@hera.kernel.org>
+
+From: Jan Beulich <jbeulich@novell.com>
+
+commit 7bc069c6bc4ede519a7116be1b9e149a1dbf787a upstream
+
+The masked difference is what needs to be compared against 1, rather
+than the difference of masked values (which can be negative).
+
+Signed-off-by: Jan Beulich <jbeulich@novell.com>
+Acked-by: Nick Piggin <npiggin@suse.de>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/asm-x86/spinlock.h |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/include/asm-x86/spinlock.h
++++ b/include/asm-x86/spinlock.h
+@@ -65,7 +65,7 @@ static inline int __raw_spin_is_contende
+ {
+       int tmp = ACCESS_ONCE(lock->slock);
+-      return (((tmp >> 8) & 0xff) - (tmp & 0xff)) > 1;
++      return (((tmp >> 8) - tmp) & 0xff) > 1;
+ }
+ static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
+@@ -129,7 +129,7 @@ static inline int __raw_spin_is_contende
+ {
+       int tmp = ACCESS_ONCE(lock->slock);
+-      return (((tmp >> 16) & 0xffff) - (tmp & 0xffff)) > 1;
++      return (((tmp >> 16) - tmp) & 0xffff) > 1;
+ }
+ static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)