--- /dev/null
+From c441b8d2cb2194b05550a558d6d95d8944e56a84 Mon Sep 17 00:00:00 2001
+From: Michael Chan <mchan@broadcom.com>
+Date: Tue, 27 Apr 2010 11:28:09 +0000
+Subject: bnx2: Fix lost MSI-X problem on 5709 NICs.
+
+From: Michael Chan <mchan@broadcom.com>
+
+commit c441b8d2cb2194b05550a558d6d95d8944e56a84 upstream.
+
+It has been reported that under certain heavy traffic conditions in MSI-X
+mode, the driver can lose an MSI-X vector causing all packets in the
+associated rx/tx ring pair to be dropped. The problem is caused by
+the chip dropping the write to unmask the MSI-X vector by the kernel
+(when migrating the IRQ for example).
+
+This can be prevented by increasing the GRC timeout value for these
+register read and write operations.
+
+Thanks to Dell for helping us debug this problem.
+
+Signed-off-by: Michael Chan <mchan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/bnx2.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/bnx2.c
++++ b/drivers/net/bnx2.c
+@@ -4752,8 +4752,12 @@ bnx2_reset_chip(struct bnx2 *bp, u32 res
+ rc = bnx2_alloc_bad_rbuf(bp);
+ }
+
+- if (bp->flags & BNX2_FLAG_USING_MSIX)
++ if (bp->flags & BNX2_FLAG_USING_MSIX) {
+ bnx2_setup_msix_tbl(bp);
++ /* Prevent MSIX table reads and write from timing out */
++ REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
++ BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
++ }
+
+ return rc;
+ }
--- /dev/null
+From 56151e753468e34aeb322af4b0309ab727c97d2e Mon Sep 17 00:00:00 2001
+From: Wufei <fei.wu@windriver.com>
+Date: Wed, 28 Apr 2010 17:42:32 -0400
+Subject: kgdb: don't needlessly skip PAGE_USER test for Fsl booke
+
+From: Wufei <fei.wu@windriver.com>
+
+commit 56151e753468e34aeb322af4b0309ab727c97d2e upstream.
+
+The bypassing of this test is a leftover from 2.4 vintage
+kernels, and is no longer appropriate, or even used by KGDB.
+Currently KGDB uses probe_kernel_write() for all access to
+memory via the KGDB core, so it can simply be deleted.
+
+This fixes CVE-2010-1446.
+
+CC: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+CC: Paul Mackerras <paulus@samba.org>
+CC: Kumar Gala <galak@kernel.crashing.org>
+Signed-off-by: Wufei <fei.wu@windriver.com>
+Signed-off-by: Jason Wessel <jason.wessel@windriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/powerpc/mm/fsl_booke_mmu.c | 5 -----
+ 1 file changed, 5 deletions(-)
+
+--- a/arch/powerpc/mm/fsl_booke_mmu.c
++++ b/arch/powerpc/mm/fsl_booke_mmu.c
+@@ -131,15 +131,10 @@ void settlbcam(int index, unsigned long
+ TLBCAM[index].MAS3 = (phys & PAGE_MASK) | MAS3_SX | MAS3_SR;
+ TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_SW : 0);
+
+-#ifndef CONFIG_KGDB /* want user access for breakpoints */
+ if (flags & _PAGE_USER) {
+ TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR;
+ TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0);
+ }
+-#else
+- TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR;
+- TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0);
+-#endif
+
+ tlbcam_addrs[index].start = virt;
+ tlbcam_addrs[index].limit = virt + size - 1;
--- /dev/null
+From mtosatti@redhat.com Fri May 7 15:13:09 2010
+From: Marcelo Tosatti <mtosatti@redhat.com>
+Date: Tue, 27 Apr 2010 13:35:26 -0300
+Subject: KVM: remove unused load_segment_descriptor_to_kvm_desct
+To: Greg KH <gregkh@suse.de>
+Cc: kvm <kvm@vger.kernel.org>, Jan Kiszka <jan.kiszka@web.de>, stable@kernel.org, Gleb Natapov <gleb@redhat.com>, Avi Kivity <avi@redhat.com>
+Message-ID: <20100427163526.GA25766@amt.cnet>
+Content-Disposition: inline
+
+From: Marcelo Tosatti <mtosatti@redhat.com>
+
+Commit 78ce64a384 in v2.6.32.12 introduced a warning due to unused
+load_segment_descriptor_to_kvm_desct helper, which has been opencoded by
+this commit.
+
+On upstream, the helper was removed as part of a different commit.
+
+Remove the now unused function.
+
+Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kvm/x86.c | 12 ------------
+ 1 file changed, 12 deletions(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -4155,18 +4155,6 @@ static u16 get_segment_selector(struct k
+ return kvm_seg.selector;
+ }
+
+-static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
+- u16 selector,
+- struct kvm_segment *kvm_seg)
+-{
+- struct desc_struct seg_desc;
+-
+- if (load_guest_segment_descriptor(vcpu, selector, &seg_desc))
+- return 1;
+- seg_desct_to_kvm_desct(&seg_desc, selector, kvm_seg);
+- return 0;
+-}
+-
+ static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg)
+ {
+ struct kvm_segment segvar = {
--- /dev/null
+From 5fd4514bb351b5ecb0da3692fff70741e5ed200c Mon Sep 17 00:00:00 2001
+From: Carlos O'Donell <carlos@codesourcery.com>
+Date: Mon, 22 Feb 2010 23:25:59 +0000
+Subject: parisc: Set PCI CLS early in boot.
+
+From: Carlos O'Donell <carlos@codesourcery.com>
+
+commit 5fd4514bb351b5ecb0da3692fff70741e5ed200c upstream.
+
+Set the PCI CLS early in the boot process to prevent
+device failures. In pcibios_set_master use the new
+pci_cache_line_size instead of a hard-coded value.
+
+Signed-off-by: Carlos O'Donell <carlos@codesourcery.com>
+Reviewed-by: Grant Grundler <grundler@google.com>
+Signed-off-by: Kyle McMartin <kyle@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/parisc/kernel/pci.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/arch/parisc/kernel/pci.c
++++ b/arch/parisc/kernel/pci.c
+@@ -18,7 +18,6 @@
+
+ #include <asm/io.h>
+ #include <asm/system.h>
+-#include <asm/cache.h> /* for L1_CACHE_BYTES */
+ #include <asm/superio.h>
+
+ #define DEBUG_RESOURCES 0
+@@ -123,6 +122,10 @@ static int __init pcibios_init(void)
+ } else {
+ printk(KERN_WARNING "pci_bios != NULL but init() is!\n");
+ }
++
++ /* Set the CLS for PCI as early as possible. */
++ pci_cache_line_size = pci_dfl_cache_line_size;
++
+ return 0;
+ }
+
+@@ -171,7 +174,7 @@ void pcibios_set_master(struct pci_dev *
+ ** upper byte is PCI_LATENCY_TIMER.
+ */
+ pci_write_config_word(dev, PCI_CACHE_LINE_SIZE,
+- (0x80 << 8) | (L1_CACHE_BYTES / sizeof(u32)));
++ (0x80 << 8) | pci_cache_line_size);
+ }
+
+
--- /dev/null
+From 0f00a206ccb1dc644b6770ef25f185610fee6962 Mon Sep 17 00:00:00 2001
+From: Lalit Chandivade <lalit.chandivade@qlogic.com>
+Date: Tue, 13 Oct 2009 15:16:52 -0700
+Subject: [SCSI] qla2xxx: Properly handle UNDERRUN completion statuses.
+
+From: Lalit Chandivade <lalit.chandivade@qlogic.com>
+
+commit 0f00a206ccb1dc644b6770ef25f185610fee6962 upstream.
+
+Correct issues where the lower scsi-status would be improperly
+cleared, instead, allow the midlayer to process the status after
+the proper residual-count checks are performed. Finally,
+validate firmware status flags prior to assigning values from the
+FCP_RSP frame.
+
+Signed-off-by: Lalit Chandivade <lalit.chandivade@qlogic.com>
+Signed-off-by: Michael Hernandez <michael.hernandez@qlogic.com>
+Signed-off-by: Ravi Anand <ravi.anand@qlogic.com>
+Signed-off-by: Andrew Vasquez <andrew.vasquez@qlogic.com>
+Signed-off-by: Giridhar Malavali <giridhar.malavali@qlogic.com>
+Signed-off-by: James Bottomley <James.Bottomley@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/scsi/qla2xxx/qla_isr.c | 120 +++++++++++++++++++----------------------
+ 1 file changed, 57 insertions(+), 63 deletions(-)
+
+--- a/drivers/scsi/qla2xxx/qla_isr.c
++++ b/drivers/scsi/qla2xxx/qla_isr.c
+@@ -1347,16 +1347,22 @@ qla2x00_status_entry(scsi_qla_host_t *vh
+
+ sense_len = rsp_info_len = resid_len = fw_resid_len = 0;
+ if (IS_FWI2_CAPABLE(ha)) {
+- sense_len = le32_to_cpu(sts24->sense_len);
+- rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
+- resid_len = le32_to_cpu(sts24->rsp_residual_count);
+- fw_resid_len = le32_to_cpu(sts24->residual_len);
++ if (scsi_status & SS_SENSE_LEN_VALID)
++ sense_len = le32_to_cpu(sts24->sense_len);
++ if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
++ rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
++ if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
++ resid_len = le32_to_cpu(sts24->rsp_residual_count);
++ if (comp_status == CS_DATA_UNDERRUN)
++ fw_resid_len = le32_to_cpu(sts24->residual_len);
+ rsp_info = sts24->data;
+ sense_data = sts24->data;
+ host_to_fcp_swap(sts24->data, sizeof(sts24->data));
+ } else {
+- sense_len = le16_to_cpu(sts->req_sense_length);
+- rsp_info_len = le16_to_cpu(sts->rsp_info_len);
++ if (scsi_status & SS_SENSE_LEN_VALID)
++ sense_len = le16_to_cpu(sts->req_sense_length);
++ if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
++ rsp_info_len = le16_to_cpu(sts->rsp_info_len);
+ resid_len = le32_to_cpu(sts->residual_length);
+ rsp_info = sts->rsp_info;
+ sense_data = sts->req_sense_data;
+@@ -1443,38 +1449,62 @@ qla2x00_status_entry(scsi_qla_host_t *vh
+ break;
+
+ case CS_DATA_UNDERRUN:
+- resid = resid_len;
++ DEBUG2(printk(KERN_INFO
++ "scsi(%ld:%d:%d) UNDERRUN status detected 0x%x-0x%x. "
++ "resid=0x%x fw_resid=0x%x cdb=0x%x os_underflow=0x%x\n",
++ vha->host_no, cp->device->id, cp->device->lun, comp_status,
++ scsi_status, resid_len, fw_resid_len, cp->cmnd[0],
++ cp->underflow));
++
+ /* Use F/W calculated residual length. */
+- if (IS_FWI2_CAPABLE(ha)) {
+- if (!(scsi_status & SS_RESIDUAL_UNDER)) {
+- lscsi_status = 0;
+- } else if (resid != fw_resid_len) {
+- scsi_status &= ~SS_RESIDUAL_UNDER;
+- lscsi_status = 0;
++ resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
++ scsi_set_resid(cp, resid);
++ if (scsi_status & SS_RESIDUAL_UNDER) {
++ if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
++ DEBUG2(printk(
++ "scsi(%ld:%d:%d:%d) Dropped frame(s) "
++ "detected (%x of %x bytes)...residual "
++ "length mismatch...retrying command.\n",
++ vha->host_no, cp->device->channel,
++ cp->device->id, cp->device->lun, resid,
++ scsi_bufflen(cp)));
++
++ cp->result = DID_ERROR << 16 | lscsi_status;
++ break;
+ }
+- resid = fw_resid_len;
+- }
+
+- if (scsi_status & SS_RESIDUAL_UNDER) {
+- scsi_set_resid(cp, resid);
+- } else {
+- DEBUG2(printk(KERN_INFO
+- "scsi(%ld:%d:%d) UNDERRUN status detected "
+- "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x "
+- "os_underflow=0x%x\n", vha->host_no,
+- cp->device->id, cp->device->lun, comp_status,
+- scsi_status, resid_len, resid, cp->cmnd[0],
+- cp->underflow));
++ if (!lscsi_status &&
++ ((unsigned)(scsi_bufflen(cp) - resid) <
++ cp->underflow)) {
++ qla_printk(KERN_INFO, ha,
++ "scsi(%ld:%d:%d:%d): Mid-layer underflow "
++ "detected (%x of %x bytes)...returning "
++ "error status.\n", vha->host_no,
++ cp->device->channel, cp->device->id,
++ cp->device->lun, resid, scsi_bufflen(cp));
+
++ cp->result = DID_ERROR << 16;
++ break;
++ }
++ } else if (!lscsi_status) {
++ DEBUG2(printk(
++ "scsi(%ld:%d:%d:%d) Dropped frame(s) detected "
++ "(%x of %x bytes)...firmware reported underrun..."
++ "retrying command.\n", vha->host_no,
++ cp->device->channel, cp->device->id,
++ cp->device->lun, resid, scsi_bufflen(cp)));
++
++ cp->result = DID_ERROR << 16;
++ break;
+ }
+
++ cp->result = DID_OK << 16 | lscsi_status;
++
+ /*
+ * Check to see if SCSI Status is non zero. If so report SCSI
+ * Status.
+ */
+ if (lscsi_status != 0) {
+- cp->result = DID_OK << 16 | lscsi_status;
+-
+ if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
+ DEBUG2(printk(KERN_INFO
+ "scsi(%ld): QUEUE FULL status detected "
+@@ -1501,42 +1531,6 @@ qla2x00_status_entry(scsi_qla_host_t *vh
+ break;
+
+ qla2x00_handle_sense(sp, sense_data, sense_len, rsp);
+- } else {
+- /*
+- * If RISC reports underrun and target does not report
+- * it then we must have a lost frame, so tell upper
+- * layer to retry it by reporting an error.
+- */
+- if (!(scsi_status & SS_RESIDUAL_UNDER)) {
+- DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
+- "frame(s) detected (%x of %x bytes)..."
+- "retrying command.\n",
+- vha->host_no, cp->device->channel,
+- cp->device->id, cp->device->lun, resid,
+- scsi_bufflen(cp)));
+-
+- scsi_set_resid(cp, resid);
+- cp->result = DID_ERROR << 16;
+- break;
+- }
+-
+- /* Handle mid-layer underflow */
+- if ((unsigned)(scsi_bufflen(cp) - resid) <
+- cp->underflow) {
+- qla_printk(KERN_INFO, ha,
+- "scsi(%ld:%d:%d:%d): Mid-layer underflow "
+- "detected (%x of %x bytes)...returning "
+- "error status.\n", vha->host_no,
+- cp->device->channel, cp->device->id,
+- cp->device->lun, resid,
+- scsi_bufflen(cp));
+-
+- cp->result = DID_ERROR << 16;
+- break;
+- }
+-
+- /* Everybody online, looking good... */
+- cp->result = DID_OK << 16;
+ }
+ break;
+
--- /dev/null
+From 78f1cd02457252e1ffbc6caa44a17424a45286b8 Mon Sep 17 00:00:00 2001
+From: Francois Romieu <romieu@fr.zoreil.com>
+Date: Sat, 27 Mar 2010 19:35:46 -0700
+Subject: r8169: fix broken register writes
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Francois Romieu <romieu@fr.zoreil.com>
+
+commit 78f1cd02457252e1ffbc6caa44a17424a45286b8 upstream.
+
+This is quite similar to b39fe41f481d20c201012e4483e76c203802dda7
+though said registers are not even documented as 64-bit registers
+- as opposed to the initial TxDescStartAddress ones - but as single
+bytes which must be combined into 32 bits at the MMIO read/write
+level before being merged into a 64 bit logical entity.
+
+Credits go to Ben Hutchings <ben@decadent.org.uk> for the MAR
+registers (aka "multicast is broken for ages on ARM) and to
+Timo Teräs <timo.teras@iki.fi> for the MAC registers.
+
+Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/r8169.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/r8169.c
++++ b/drivers/net/r8169.c
+@@ -2832,8 +2832,8 @@ static void rtl_rar_set(struct rtl8169_p
+ spin_lock_irq(&tp->lock);
+
+ RTL_W8(Cfg9346, Cfg9346_Unlock);
+- RTL_W32(MAC0, low);
+ RTL_W32(MAC4, high);
++ RTL_W32(MAC0, low);
+ RTL_W8(Cfg9346, Cfg9346_Lock);
+
+ spin_unlock_irq(&tp->lock);
+@@ -4814,8 +4814,8 @@ static void rtl_set_rx_mode(struct net_d
+ mc_filter[1] = swab32(data);
+ }
+
+- RTL_W32(MAR0 + 0, mc_filter[0]);
+ RTL_W32(MAR0 + 4, mc_filter[1]);
++ RTL_W32(MAR0 + 0, mc_filter[0]);
+
+ RTL_W32(RxConfig, tmp);
+
--- /dev/null
+From 908ba2bfd22253f26fa910cd855e4ccffb1467d0 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?fran=C3=A7ois=20romieu?= <romieu@fr.zoreil.com>
+Date: Mon, 26 Apr 2010 11:42:58 +0000
+Subject: r8169: more broken register writes workaround
+
+From: =?UTF-8?q?fran=C3=A7ois=20romieu?= <romieu@fr.zoreil.com>
+
+commit 908ba2bfd22253f26fa910cd855e4ccffb1467d0 upstream.
+
+78f1cd02457252e1ffbc6caa44a17424a45286b8 ("fix broken register writes")
+does not work for Al Viro's r8169 (XID 18000000).
+
+Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/r8169.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/net/r8169.c
++++ b/drivers/net/r8169.c
+@@ -2832,8 +2832,13 @@ static void rtl_rar_set(struct rtl8169_p
+ spin_lock_irq(&tp->lock);
+
+ RTL_W8(Cfg9346, Cfg9346_Unlock);
++
+ RTL_W32(MAC4, high);
++ RTL_R32(MAC4);
++
+ RTL_W32(MAC0, low);
++ RTL_R32(MAC0);
++
+ RTL_W8(Cfg9346, Cfg9346_Lock);
+
+ spin_unlock_irq(&tp->lock);
--- /dev/null
+From 4c020a961a812ffae9846b917304cea504c3a733 Mon Sep 17 00:00:00 2001
+From: David Dillow <dave@thedillows.org>
+Date: Wed, 3 Mar 2010 16:33:10 +0000
+Subject: r8169: use correct barrier between cacheable and non-cacheable memory
+
+From: David Dillow <dave@thedillows.org>
+
+commit 4c020a961a812ffae9846b917304cea504c3a733 upstream.
+
+r8169 needs certain writes to be visible to other CPUs or the NIC before
+touching the hardware, but was using smp_wmb() which is only required to
+order cacheable memory access. Switch to wmb() which is required to
+order both cacheable and non-cacheable memory.
+
+Noticed by Catalin Marinas and Paul Mackerras.
+
+Signed-off-by: David Dillow <dave@thedillows.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/r8169.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/r8169.c
++++ b/drivers/net/r8169.c
+@@ -4316,7 +4316,7 @@ static netdev_tx_t rtl8169_start_xmit(st
+
+ tp->cur_tx += frags + 1;
+
+- smp_wmb();
++ wmb();
+
+ RTL_W8(TxPoll, NPQ); /* set polling bit */
+
+@@ -4676,7 +4676,7 @@ static int rtl8169_poll(struct napi_stru
+ * until it does.
+ */
+ tp->intr_mask = 0xffff;
+- smp_wmb();
++ wmb();
+ RTL_W16(IntrMask, tp->intr_event);
+ }
+
--- /dev/null
+From e7efe5932b1d3916c79326a4221693ea90a900e2 Mon Sep 17 00:00:00 2001
+From: Douglas Gilbert <dgilbert@interlog.com>
+Date: Sun, 3 Jan 2010 13:51:15 -0500
+Subject: [SCSI] skip sense logging for some ATA PASS-THROUGH cdbs
+
+From: Douglas Gilbert <dgilbert@interlog.com>
+
+commit e7efe5932b1d3916c79326a4221693ea90a900e2 upstream.
+
+Further to the lsml thread titled:
+"does scsi_io_completion need to dump sense data for ata pass through (ck_cond =
+1) ?"
+
+This is a patch to skip logging when the sense data is
+associated with a SENSE_KEY of "RECOVERED_ERROR" and the
+additional sense code is "ATA PASS-THROUGH INFORMATION
+AVAILABLE". This only occurs with the SAT ATA PASS-THROUGH
+commands when CK_COND=1 (in the cdb). It indicates that
+the sense data contains ATA registers.
+
+Smartmontools uses such commands on ATA disks connected via
+SAT. Periodic checks such as those done by smartd cause
+nuisance entries into logs that are:
+ - neither errors nor warnings
+ - pointless unless the cdb that caused them are also logged
+
+Signed-off-by: Douglas Gilbert <dgilbert@interlog.com>
+Signed-off-by: James Bottomley <James.Bottomley@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/scsi/scsi_lib.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -773,8 +773,14 @@ void scsi_io_completion(struct scsi_cmnd
+ * we already took a copy of the original into rq->errors which
+ * is what gets returned to the user
+ */
+- if (sense_valid && sshdr.sense_key == RECOVERED_ERROR) {
+- if (!(req->cmd_flags & REQ_QUIET))
++ if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
++ /* if ATA PASS-THROUGH INFORMATION AVAILABLE skip
++ * print since caller wants ATA registers. Only occurs on
++ * SCSI ATA PASS_THROUGH commands when CK_COND=1
++ */
++ if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
++ ;
++ else if (!(req->cmd_flags & REQ_QUIET))
+ scsi_print_sense("", cmd);
+ result = 0;
+ /* BLOCK_PC may have set error */
--- /dev/null
+From dc8bf1b1a6edfc92465526de19772061302f0929 Mon Sep 17 00:00:00 2001
+From: Andre Detsch <adetsch@br.ibm.com>
+Date: Mon, 26 Apr 2010 07:27:07 +0000
+Subject: tg3: Fix INTx fallback when MSI fails
+
+From: Andre Detsch <adetsch@br.ibm.com>
+
+commit dc8bf1b1a6edfc92465526de19772061302f0929 upstream.
+
+tg3: Fix INTx fallback when MSI fails
+
+MSI setup changes the value of irq_vec in struct tg3 *tp.
+This attribute must be taken into account and restored before
+we try to do a new request_irq for INTx fallback.
+
+In powerpc, the original code was leading to an EINVAL return within
+request_irq, because the driver was trying to use the disabled MSI
+virtual irq number instead of tp->pdev->irq.
+
+Signed-off-by: Andre Detsch <adetsch@br.ibm.com>
+Acked-by: Michael Chan <mchan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Cc: Brandon Philips <bphilips@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/tg3.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/tg3.c
++++ b/drivers/net/tg3.c
+@@ -8168,6 +8168,7 @@ static int tg3_test_msi(struct tg3 *tp)
+ pci_disable_msi(tp->pdev);
+
+ tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
++ tp->napi[0].irq_vec = tp->pdev->irq;
+
+ err = tg3_request_irq(tp, 0);
+ if (err)
--- /dev/null
+From david@fromorbit.com Fri May 7 15:27:13 2010
+From: Dave Chinner <david@fromorbit.com>
+Date: Tue, 4 May 2010 12:58:20 +1000
+Subject: xfs: add a shrinker to background inode reclaim
+To: stable@kernel.org
+Cc: xfs@oss.sgi.com
+Message-ID: <20100504025820.GI2591@dastard>
+
+From: Dave Chinner <dchinner@redhat.com>
+
+commit 9bf729c0af67897ea8498ce17c29b0683f7f2028 upstream
+
+On low memory boxes or those with highmem, kernel can OOM before the
+background reclaims inodes via xfssyncd. Add a shrinker to run inode
+reclaim so that it inode reclaim is expedited when memory is low.
+
+This is more complex than it needs to be because the VM folk don't
+want a context added to the shrinker infrastructure. Hence we need
+to add a global list of XFS mount structures so the shrinker can
+traverse them.
+
+Signed-off-by: Dave Chinner <dchinner@redhat.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+
+---
+ fs/xfs/linux-2.6/xfs_super.c | 5 +
+ fs/xfs/linux-2.6/xfs_sync.c | 107 ++++++++++++++++++++++++++++++++++++++---
+ fs/xfs/linux-2.6/xfs_sync.h | 7 ++
+ fs/xfs/quota/xfs_qm_syscalls.c | 3 -
+ fs/xfs/xfs_ag.h | 1
+ fs/xfs/xfs_mount.h | 1
+ 6 files changed, 115 insertions(+), 9 deletions(-)
+
+--- a/fs/xfs/linux-2.6/xfs_super.c
++++ b/fs/xfs/linux-2.6/xfs_super.c
+@@ -1164,6 +1164,7 @@ xfs_fs_put_super(
+
+ xfs_unmountfs(mp);
+ xfs_freesb(mp);
++ xfs_inode_shrinker_unregister(mp);
+ xfs_icsb_destroy_counters(mp);
+ xfs_close_devices(mp);
+ xfs_dmops_put(mp);
+@@ -1555,6 +1556,8 @@ xfs_fs_fill_super(
+ if (error)
+ goto fail_vnrele;
+
++ xfs_inode_shrinker_register(mp);
++
+ kfree(mtpt);
+
+ xfs_itrace_exit(XFS_I(sb->s_root->d_inode));
+@@ -1894,6 +1897,7 @@ init_xfs_fs(void)
+ goto out_cleanup_procfs;
+
+ vfs_initquota();
++ xfs_inode_shrinker_init();
+
+ error = register_filesystem(&xfs_fs_type);
+ if (error)
+@@ -1923,6 +1927,7 @@ exit_xfs_fs(void)
+ {
+ vfs_exitquota();
+ unregister_filesystem(&xfs_fs_type);
++ xfs_inode_shrinker_destroy();
+ xfs_sysctl_unregister();
+ xfs_cleanup_procfs();
+ xfs_buf_terminate();
+--- a/fs/xfs/linux-2.6/xfs_sync.c
++++ b/fs/xfs/linux-2.6/xfs_sync.c
+@@ -94,7 +94,8 @@ xfs_inode_ag_walk(
+ struct xfs_perag *pag, int flags),
+ int flags,
+ int tag,
+- int exclusive)
++ int exclusive,
++ int *nr_to_scan)
+ {
+ struct xfs_perag *pag = &mp->m_perag[ag];
+ uint32_t first_index;
+@@ -134,7 +135,7 @@ restart:
+ if (error == EFSCORRUPTED)
+ break;
+
+- } while (1);
++ } while ((*nr_to_scan)--);
+
+ if (skipped) {
+ delay(1);
+@@ -152,23 +153,30 @@ xfs_inode_ag_iterator(
+ struct xfs_perag *pag, int flags),
+ int flags,
+ int tag,
+- int exclusive)
++ int exclusive,
++ int *nr_to_scan)
+ {
+ int error = 0;
+ int last_error = 0;
+ xfs_agnumber_t ag;
++ int nr;
+
++ nr = nr_to_scan ? *nr_to_scan : INT_MAX;
+ for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) {
+ if (!mp->m_perag[ag].pag_ici_init)
+ continue;
+ error = xfs_inode_ag_walk(mp, ag, execute, flags, tag,
+- exclusive);
++ exclusive, &nr);
+ if (error) {
+ last_error = error;
+ if (error == EFSCORRUPTED)
+ break;
+ }
++ if (nr <= 0)
++ break;
+ }
++ if (nr_to_scan)
++ *nr_to_scan = nr;
+ return XFS_ERROR(last_error);
+ }
+
+@@ -288,7 +296,7 @@ xfs_sync_data(
+ ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0);
+
+ error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags,
+- XFS_ICI_NO_TAG, 0);
++ XFS_ICI_NO_TAG, 0, NULL);
+ if (error)
+ return XFS_ERROR(error);
+
+@@ -310,7 +318,7 @@ xfs_sync_attr(
+ ASSERT((flags & ~SYNC_WAIT) == 0);
+
+ return xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags,
+- XFS_ICI_NO_TAG, 0);
++ XFS_ICI_NO_TAG, 0, NULL);
+ }
+
+ STATIC int
+@@ -678,6 +686,7 @@ __xfs_inode_set_reclaim_tag(
+ radix_tree_tag_set(&pag->pag_ici_root,
+ XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
+ XFS_ICI_RECLAIM_TAG);
++ pag->pag_ici_reclaimable++;
+ }
+
+ /*
+@@ -709,6 +718,7 @@ __xfs_inode_clear_reclaim_tag(
+ {
+ radix_tree_tag_clear(&pag->pag_ici_root,
+ XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
++ pag->pag_ici_reclaimable--;
+ }
+
+ STATIC int
+@@ -769,5 +779,88 @@ xfs_reclaim_inodes(
+ int mode)
+ {
+ return xfs_inode_ag_iterator(mp, xfs_reclaim_inode, mode,
+- XFS_ICI_RECLAIM_TAG, 1);
++ XFS_ICI_RECLAIM_TAG, 1, NULL);
++}
++
++/*
++ * Shrinker infrastructure.
++ *
++ * This is all far more complex than it needs to be. It adds a global list of
++ * mounts because the shrinkers can only call a global context. We need to make
++ * the shrinkers pass a context to avoid the need for global state.
++ */
++static LIST_HEAD(xfs_mount_list);
++static struct rw_semaphore xfs_mount_list_lock;
++
++static int
++xfs_reclaim_inode_shrink(
++ int nr_to_scan,
++ gfp_t gfp_mask)
++{
++ struct xfs_mount *mp;
++ xfs_agnumber_t ag;
++ int reclaimable = 0;
++
++ if (nr_to_scan) {
++ if (!(gfp_mask & __GFP_FS))
++ return -1;
++
++ down_read(&xfs_mount_list_lock);
++ list_for_each_entry(mp, &xfs_mount_list, m_mplist) {
++ xfs_inode_ag_iterator(mp, xfs_reclaim_inode, 0,
++ XFS_ICI_RECLAIM_TAG, 1, &nr_to_scan);
++ if (nr_to_scan <= 0)
++ break;
++ }
++ up_read(&xfs_mount_list_lock);
++ }
++
++ down_read(&xfs_mount_list_lock);
++ list_for_each_entry(mp, &xfs_mount_list, m_mplist) {
++ for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) {
++
++ if (!mp->m_perag[ag].pag_ici_init)
++ continue;
++ reclaimable += mp->m_perag[ag].pag_ici_reclaimable;
++ }
++ }
++ up_read(&xfs_mount_list_lock);
++ return reclaimable;
++}
++
++static struct shrinker xfs_inode_shrinker = {
++ .shrink = xfs_reclaim_inode_shrink,
++ .seeks = DEFAULT_SEEKS,
++};
++
++void __init
++xfs_inode_shrinker_init(void)
++{
++ init_rwsem(&xfs_mount_list_lock);
++ register_shrinker(&xfs_inode_shrinker);
++}
++
++void
++xfs_inode_shrinker_destroy(void)
++{
++ ASSERT(list_empty(&xfs_mount_list));
++ unregister_shrinker(&xfs_inode_shrinker);
++}
++
++void
++xfs_inode_shrinker_register(
++ struct xfs_mount *mp)
++{
++ down_write(&xfs_mount_list_lock);
++ list_add_tail(&mp->m_mplist, &xfs_mount_list);
++ up_write(&xfs_mount_list_lock);
++}
++
++void
++xfs_inode_shrinker_unregister(
++ struct xfs_mount *mp)
++{
++ down_write(&xfs_mount_list_lock);
++ list_del(&mp->m_mplist);
++ up_write(&xfs_mount_list_lock);
+ }
+--- a/fs/xfs/linux-2.6/xfs_sync.h
++++ b/fs/xfs/linux-2.6/xfs_sync.h
+@@ -54,6 +54,11 @@ void __xfs_inode_clear_reclaim_tag(struc
+ int xfs_sync_inode_valid(struct xfs_inode *ip, struct xfs_perag *pag);
+ int xfs_inode_ag_iterator(struct xfs_mount *mp,
+ int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags),
+- int flags, int tag, int write_lock);
++ int flags, int tag, int write_lock, int *nr_to_scan);
++
++void xfs_inode_shrinker_init(void);
++void xfs_inode_shrinker_destroy(void);
++void xfs_inode_shrinker_register(struct xfs_mount *mp);
++void xfs_inode_shrinker_unregister(struct xfs_mount *mp);
+
+ #endif
+--- a/fs/xfs/quota/xfs_qm_syscalls.c
++++ b/fs/xfs/quota/xfs_qm_syscalls.c
+@@ -893,7 +893,8 @@ xfs_qm_dqrele_all_inodes(
+ uint flags)
+ {
+ ASSERT(mp->m_quotainfo);
+- xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags, XFS_ICI_NO_TAG, 0);
++ xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags,
++ XFS_ICI_NO_TAG, 0, NULL);
+ }
+
+ /*------------------------------------------------------------------------*/
+--- a/fs/xfs/xfs_ag.h
++++ b/fs/xfs/xfs_ag.h
+@@ -215,6 +215,7 @@ typedef struct xfs_perag
+ int pag_ici_init; /* incore inode cache initialised */
+ rwlock_t pag_ici_lock; /* incore inode lock */
+ struct radix_tree_root pag_ici_root; /* incore inode cache root */
++ int pag_ici_reclaimable; /* reclaimable inodes */
+ #endif
+ } xfs_perag_t;
+
+--- a/fs/xfs/xfs_mount.h
++++ b/fs/xfs/xfs_mount.h
+@@ -243,6 +243,7 @@ typedef struct xfs_mount {
+ wait_queue_head_t m_wait_single_sync_task;
+ __int64_t m_update_flags; /* sb flags we need to update
+ on the next remount,rw */
++ struct list_head m_mplist; /* inode shrinker mount list */
+ } xfs_mount_t;
+
+ /*