--- /dev/null
+From 2a908002c7b1b666616103e9df2419b38d7c6f1f Mon Sep 17 00:00:00 2001
+From: Frans Pop <elendil@planet.nl>
+Date: Wed, 26 Aug 2009 14:29:29 -0700
+Subject: ACPI processor: force throttling state when BIOS returns incorrect value
+
+From: Frans Pop <elendil@planet.nl>
+
+commit 2a908002c7b1b666616103e9df2419b38d7c6f1f upstream.
+
+If the BIOS reports an invalid throttling state (which seems to be
+fairly common after system boot), a reset is done to state T0.
+Because of a check in acpi_processor_get_throttling_ptc(), the reset
+never actually gets executed, which results in the error reoccurring
+on every access of for example /proc/acpi/processor/CPU0/throttling.
+
+Add a 'force' option to acpi_processor_set_throttling() to ensure
+the reset really takes effect.
+
+Addresses http://bugzilla.kernel.org/show_bug.cgi?id=13389
+
+This patch, together with the next one, fixes a regression introduced in
+2.6.30, listed on the regression list. They have been available for 2.5
+months now in bugzilla, but have not been picked up, despite various
+reminders and without any reason given.
+
+Google shows that numerous people are hitting this issue. The issue is in
+itself relatively minor, but the bug in the code is clear.
+
+The patches have been in all my kernels and today testing has shown that
+throttling works correctly with the patches applied when the system
+overheats (http://bugzilla.kernel.org/show_bug.cgi?id=13918#c14).
+
+Signed-off-by: Frans Pop <elendil@planet.nl>
+Acked-by: Zhang Rui <rui.zhang@intel.com>
+Cc: Len Brown <lenb@kernel.org>
+Cc: "Rafael J. Wysocki" <rjw@sisk.pl>
+Cc: Rusty Russell <rusty@rustcorp.com.au>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/acpi/processor_thermal.c | 6 +++---
+ drivers/acpi/processor_throttling.c | 26 ++++++++++++++------------
+ include/acpi/processor.h | 5 +++--
+ 3 files changed, 20 insertions(+), 17 deletions(-)
+
+--- a/drivers/acpi/processor_thermal.c
++++ b/drivers/acpi/processor_thermal.c
+@@ -66,7 +66,7 @@ static int acpi_processor_apply_limit(st
+ if (pr->limit.thermal.tx > tx)
+ tx = pr->limit.thermal.tx;
+
+- result = acpi_processor_set_throttling(pr, tx);
++ result = acpi_processor_set_throttling(pr, tx, false);
+ if (result)
+ goto end;
+ }
+@@ -421,12 +421,12 @@ processor_set_cur_state(struct thermal_c
+
+ if (state <= max_pstate) {
+ if (pr->flags.throttling && pr->throttling.state)
+- result = acpi_processor_set_throttling(pr, 0);
++ result = acpi_processor_set_throttling(pr, 0, false);
+ cpufreq_set_cur_state(pr->id, state);
+ } else {
+ cpufreq_set_cur_state(pr->id, max_pstate);
+ result = acpi_processor_set_throttling(pr,
+- state - max_pstate);
++ state - max_pstate, false);
+ }
+ return result;
+ }
+--- a/drivers/acpi/processor_throttling.c
++++ b/drivers/acpi/processor_throttling.c
+@@ -62,7 +62,8 @@ struct throttling_tstate {
+ #define THROTTLING_POSTCHANGE (2)
+
+ static int acpi_processor_get_throttling(struct acpi_processor *pr);
+-int acpi_processor_set_throttling(struct acpi_processor *pr, int state);
++int acpi_processor_set_throttling(struct acpi_processor *pr,
++ int state, bool force);
+
+ static int acpi_processor_update_tsd_coord(void)
+ {
+@@ -361,7 +362,7 @@ int acpi_processor_tstate_has_changed(st
+ */
+ target_state = throttling_limit;
+ }
+- return acpi_processor_set_throttling(pr, target_state);
++ return acpi_processor_set_throttling(pr, target_state, false);
+ }
+
+ /*
+@@ -842,7 +843,7 @@ static int acpi_processor_get_throttling
+ ACPI_WARNING((AE_INFO,
+ "Invalid throttling state, reset"));
+ state = 0;
+- ret = acpi_processor_set_throttling(pr, state);
++ ret = acpi_processor_set_throttling(pr, state, true);
+ if (ret)
+ return ret;
+ }
+@@ -915,7 +916,7 @@ static int acpi_processor_get_fadt_info(
+ }
+
+ static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
+- int state)
++ int state, bool force)
+ {
+ u32 value = 0;
+ u32 duty_mask = 0;
+@@ -930,7 +931,7 @@ static int acpi_processor_set_throttling
+ if (!pr->flags.throttling)
+ return -ENODEV;
+
+- if (state == pr->throttling.state)
++ if (!force && (state == pr->throttling.state))
+ return 0;
+
+ if (state < pr->throttling_platform_limit)
+@@ -988,7 +989,7 @@ static int acpi_processor_set_throttling
+ }
+
+ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
+- int state)
++ int state, bool force)
+ {
+ int ret;
+ acpi_integer value;
+@@ -1002,7 +1003,7 @@ static int acpi_processor_set_throttling
+ if (!pr->flags.throttling)
+ return -ENODEV;
+
+- if (state == pr->throttling.state)
++ if (!force && (state == pr->throttling.state))
+ return 0;
+
+ if (state < pr->throttling_platform_limit)
+@@ -1018,7 +1019,8 @@ static int acpi_processor_set_throttling
+ return 0;
+ }
+
+-int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
++int acpi_processor_set_throttling(struct acpi_processor *pr,
++ int state, bool force)
+ {
+ cpumask_var_t saved_mask;
+ int ret = 0;
+@@ -1070,7 +1072,7 @@ int acpi_processor_set_throttling(struct
+ /* FIXME: use work_on_cpu() */
+ set_cpus_allowed_ptr(current, cpumask_of(pr->id));
+ ret = p_throttling->acpi_processor_set_throttling(pr,
+- t_state.target_state);
++ t_state.target_state, force);
+ } else {
+ /*
+ * When the T-state coordination is SW_ALL or HW_ALL,
+@@ -1103,7 +1105,7 @@ int acpi_processor_set_throttling(struct
+ set_cpus_allowed_ptr(current, cpumask_of(i));
+ ret = match_pr->throttling.
+ acpi_processor_set_throttling(
+- match_pr, t_state.target_state);
++ match_pr, t_state.target_state, force);
+ }
+ }
+ /*
+@@ -1201,7 +1203,7 @@ int acpi_processor_get_throttling_info(s
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Disabling throttling (was T%d)\n",
+ pr->throttling.state));
+- result = acpi_processor_set_throttling(pr, 0);
++ result = acpi_processor_set_throttling(pr, 0, false);
+ if (result)
+ goto end;
+ }
+@@ -1307,7 +1309,7 @@ static ssize_t acpi_processor_write_thro
+ if (strcmp(tmpbuf, charp) != 0)
+ return -EINVAL;
+
+- result = acpi_processor_set_throttling(pr, state_val);
++ result = acpi_processor_set_throttling(pr, state_val, false);
+ if (result)
+ return result;
+
+--- a/include/acpi/processor.h
++++ b/include/acpi/processor.h
+@@ -174,7 +174,7 @@ struct acpi_processor_throttling {
+ cpumask_var_t shared_cpu_map;
+ int (*acpi_processor_get_throttling) (struct acpi_processor * pr);
+ int (*acpi_processor_set_throttling) (struct acpi_processor * pr,
+- int state);
++ int state, bool force);
+
+ u32 address;
+ u8 duty_offset;
+@@ -320,7 +320,8 @@ static inline int acpi_processor_ppc_has
+ /* in processor_throttling.c */
+ int acpi_processor_tstate_has_changed(struct acpi_processor *pr);
+ int acpi_processor_get_throttling_info(struct acpi_processor *pr);
+-extern int acpi_processor_set_throttling(struct acpi_processor *pr, int state);
++extern int acpi_processor_set_throttling(struct acpi_processor *pr,
++ int state, bool force);
+ extern const struct file_operations acpi_processor_throttling_fops;
+ extern void acpi_processor_throttling_init(void);
+ /* in processor_idle.c */
--- /dev/null
+From 51b89f7a6615eca184aa0b85db5781d931e9c8d1 Mon Sep 17 00:00:00 2001
+From: Fenghua Yu <[fenghua.yu@intel.com]>
+Date: Tue, 11 Aug 2009 14:52:10 -0700
+Subject: Bug Fix arch/ia64/kernel/pci-dma.c: fix recursive dma_supported() call in iommu_dma_supported()
+
+From: Fenghua Yu <[fenghua.yu@intel.com]>
+
+commit 51b89f7a6615eca184aa0b85db5781d931e9c8d1 upstream.
+
+In commit 160c1d8e40866edfeae7d68816b7005d70acf391,
+dma_ops->dma_supported = iommu_dma_supported;
+
+This dma_ops->dma_supported is first called in platform_dma_init() during kernel
+boot. Then dma_ops->dma_supported will be called recursively in
+iommu_dma_supported.
+
+Kernel can not boot because kernel can not get out of iommu_dma_supported until
+it runs out of stack memory.
+
+Signed-off-by: Fenghua Yu <fenghua.yu@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/ia64/kernel/pci-dma.c | 5 -----
+ 1 file changed, 5 deletions(-)
+
+--- a/arch/ia64/kernel/pci-dma.c
++++ b/arch/ia64/kernel/pci-dma.c
+@@ -67,11 +67,6 @@ iommu_dma_init(void)
+
+ int iommu_dma_supported(struct device *dev, u64 mask)
+ {
+- struct dma_map_ops *ops = platform_dma_get_ops(dev);
+-
+- if (ops->dma_supported)
+- return ops->dma_supported(dev, mask);
+-
+ /* Copied from i386. Doesn't make much sense, because it will
+ only work for pci_alloc_coherent.
+ The caller just has to use GFP_DMA in this case. */
--- /dev/null
+From 0083fc2c50e6c5127c2802ad323adf8143ab7856 Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Sat, 1 Aug 2009 10:34:56 -0700
+Subject: do_sigaltstack: avoid copying 'stack_t' as a structure to user space
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit 0083fc2c50e6c5127c2802ad323adf8143ab7856 upstream.
+
+Ulrich Drepper correctly points out that there is generally padding in
+the structure on 64-bit hosts, and that copying the structure from
+kernel to user space can leak information from the kernel stack in those
+padding bytes.
+
+Avoid the whole issue by just copying the three members one by one
+instead, which also means that the function also can avoid the need for
+a stack frame. This also happens to match how we copy the new structure
+from user space, so it all even makes sense.
+
+[ The obvious solution of adding a memset() generates horrid code, gcc
+ does really stupid things. ]
+
+Reported-by: Ulrich Drepper <drepper@redhat.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -2454,11 +2454,9 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s
+ stack_t oss;
+ int error;
+
+- if (uoss) {
+- oss.ss_sp = (void __user *) current->sas_ss_sp;
+- oss.ss_size = current->sas_ss_size;
+- oss.ss_flags = sas_ss_flags(sp);
+- }
++ oss.ss_sp = (void __user *) current->sas_ss_sp;
++ oss.ss_size = current->sas_ss_size;
++ oss.ss_flags = sas_ss_flags(sp);
+
+ if (uss) {
+ void __user *ss_sp;
+@@ -2501,13 +2499,16 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s
+ current->sas_ss_size = ss_size;
+ }
+
++ error = 0;
+ if (uoss) {
+ error = -EFAULT;
+- if (copy_to_user(uoss, &oss, sizeof(oss)))
++ if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
+ goto out;
++ error = __put_user(oss.ss_sp, &uoss->ss_sp) |
++ __put_user(oss.ss_size, &uoss->ss_size) |
++ __put_user(oss.ss_flags, &uoss->ss_flags);
+ }
+
+- error = 0;
+ out:
+ return error;
+ }
--- /dev/null
+From jim@jtan.com Thu Sep 3 15:20:36 2009
+From: Jean-Francois Moine <moinejf@free.fr>
+Date: Wed, 19 Aug 2009 17:46:18 -0400
+Subject: gspca - ov534: Fix ov772x
+To: stable@kernel.org
+Cc: Jim Paris <jim@jtan.com>, linux-media@vger.kernel.org
+Message-ID: <1250718378-25759-1-git-send-email-jim@jtan.com>
+
+From: Jean-Francois Moine <moinejf@free.fr>
+
+The scan of the image packets of the sensor ov772x was broken when
+the sensor ov965x was added.
+
+[ Based on upstream c874f3aa, modified slightly for v2.6.30.5 ]
+
+Signed-off-by: Jim Paris <jim@jtan.com>
+Acked-by: Jean-Francois Moine <moinejf@free.fr>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/media/video/gspca/ov534.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/media/video/gspca/ov534.c
++++ b/drivers/media/video/gspca/ov534.c
+@@ -832,9 +832,11 @@ static void sd_pkt_scan(struct gspca_dev
+ __u32 this_pts;
+ u16 this_fid;
+ int remaining_len = len;
++ int payload_len;
+
++ payload_len = (sd->sensor == SENSOR_OV772X) ? 2048 : 2040;
+ do {
+- len = min(remaining_len, 2040); /*fixme: was 2048*/
++ len = min(remaining_len, payload_len);
+
+ /* Payloads are prefixed with a UVC-style header. We
+ consider a frame to start when the FID toggles, or the PTS
--- /dev/null
+From ca6982b858e1d08010c1d29d8e8255b2ac2ad70a Mon Sep 17 00:00:00 2001
+From: Bruno Prémont <bonbons@linux-vserver.org>
+Date: Sun, 23 Aug 2009 19:06:28 -0700
+Subject: ipv6: Fix commit 63d9950b08184e6531adceb65f64b429909cc101 (ipv6: Make v4-mapped bindings consistent with IPv4)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Bruno Prémont <bonbons@linux-vserver.org>
+
+commit ca6982b858e1d08010c1d29d8e8255b2ac2ad70a upstream.
+
+Commit 63d9950b08184e6531adceb65f64b429909cc101
+ (ipv6: Make v4-mapped bindings consistent with IPv4)
+changes behavior of inet6_bind() for v4-mapped addresses so it should
+behave the same way as inet_bind().
+
+During this change setting of err to -EADDRNOTAVAIL got lost:
+
+af_inet.c:469 inet_bind()
+ err = -EADDRNOTAVAIL;
+ if (!sysctl_ip_nonlocal_bind &&
+ !(inet->freebind || inet->transparent) &&
+ addr->sin_addr.s_addr != htonl(INADDR_ANY) &&
+ chk_addr_ret != RTN_LOCAL &&
+ chk_addr_ret != RTN_MULTICAST &&
+ chk_addr_ret != RTN_BROADCAST)
+ goto out;
+
+
+af_inet6.c:463 inet6_bind()
+ if (addr_type == IPV6_ADDR_MAPPED) {
+ int chk_addr_ret;
+
+ /* Binding to v4-mapped address on a v6-only socket
+ * makes no sense
+ */
+ if (np->ipv6only) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* Reproduce AF_INET checks to make the bindings consitant */
+ v4addr = addr->sin6_addr.s6_addr32[3];
+ chk_addr_ret = inet_addr_type(net, v4addr);
+ if (!sysctl_ip_nonlocal_bind &&
+ !(inet->freebind || inet->transparent) &&
+ v4addr != htonl(INADDR_ANY) &&
+ chk_addr_ret != RTN_LOCAL &&
+ chk_addr_ret != RTN_MULTICAST &&
+ chk_addr_ret != RTN_BROADCAST)
+ goto out;
+ } else {
+
+
+Signed-off-by: Bruno Prémont <bonbons@linux-vserver.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+
+---
+ net/ipv6/af_inet6.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/net/ipv6/af_inet6.c
++++ b/net/ipv6/af_inet6.c
+@@ -294,8 +294,10 @@ int inet6_bind(struct socket *sock, stru
+ v4addr != htonl(INADDR_ANY) &&
+ chk_addr_ret != RTN_LOCAL &&
+ chk_addr_ret != RTN_MULTICAST &&
+- chk_addr_ret != RTN_BROADCAST)
++ chk_addr_ret != RTN_BROADCAST) {
++ err = -EADDRNOTAVAIL;
+ goto out;
++ }
+ } else {
+ if (addr_type != IPV6_ADDR_ANY) {
+ struct net_device *dev = NULL;
--- /dev/null
+From sgruszka@redhat.com Thu Sep 3 14:54:39 2009
+From: Stanislaw Gruszka <sgruszka@redhat.com>
+Date: Thu, 13 Aug 2009 14:29:08 +0200
+Subject: iwl3945: fix rfkill switch
+To: stable@kernel.org
+Cc: Stanislaw Gruszka <sgruszka@redhat.com>, Zhu Yi <yi.zhu@intel.com>, Reinette Chatre <reinette.chatre@intel.com>, linux-wireless@vger.kernel.org, "John W. Linville" <linville@tuxdriver.com>
+Message-ID: <1250166548-2548-1-git-send-email-sgruszka@redhat.com>
+
+From: Stanislaw Gruszka <sgruszka@redhat.com>
+
+(Not needed upstream, due to the major rewrite in 2.6.31)
+
+Due to rfkill and iwlwifi mishmash of SW / HW killswitch representation,
+we have race conditions which make unable turn wifi radio on, after enable
+and disable again killswitch. I can observe this problem on my laptop
+with iwl3945 device.
+
+In rfkill core HW switch and SW switch are separate 'states'. Device can
+be only in one of 3 states: RFKILL_STATE_SOFT_BLOCKED, RFKILL_STATE_UNBLOCKED,
+RFKILL_STATE_HARD_BLOCKED. Whereas in iwlwifi driver we have separate bits
+STATUS_RF_KILL_HW and STATUS_RF_KILL_SW for HW and SW switches - radio can be
+turned on, only if both bits are cleared.
+
+In this particular race conditions, radio can not be turned on if in driver
+STATUS_RF_KILL_SW bit is set, and rfkill core is in state
+RFKILL_STATE_HARD_BLOCKED, because rfkill core is unable to call
+rfkill->toggle_radio(). This situation can be entered in case:
+
+- killswitch is turned on
+- rfkill core 'see' button change first and move to RFKILL_STATE_SOFT_BLOCKED
+ also call ->toggle_radio() and STATE_RF_KILL_SW in driver is set
+- iwl3945 get info about button from hardware to set STATUS_RF_KILL_HW bit and
+ force rfkill to move to RFKILL_STATE_HARD_BLOCKED
+- killsiwtch is turend off
+- driver clear STATUS_RF_KILL_HW
+- rfkill core is unable to clear STATUS_RF_KILL_SW in driver
+
+Additionally call to rfkill_epo() when STATUS_RF_KILL_HW in driver is set
+cause move to the same situation.
+
+In 2.6.31 this problem is fixed due to _total_ rewrite of rfkill subsystem.
+This is a quite small fix for 2.6.30.x in iwlwifi driver. We are changing
+internal rfkill state to always have below relations true:
+
+STATUS_RF_KILL_HW=1 STATUS_RF_KILL_SW=1 <-> RFKILL_STATUS_SOFT_BLOCKED
+STATUS_RF_KILL_HW=0 STATUS_RF_KILL_SW=1 <-> RFKILL_STATUS_SOFT_BLOCKED
+STATUS_RF_KILL_HW=1 STATUS_RF_KILL_SW=0 <-> RFKILL_STATUS_HARD_BLOCKED
+STATUS_RF_KILL_HW=0 STATUS_RF_KILL_SW=0 <-> RFKILL_STATUS_UNBLOCKED
+
+Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
+Acked-by: Reinette Chatre <reinette.chatre@intel.com>
+Acked-by: John W. Linville <linville@tuxdriver.com>
+
+
+---
+ drivers/net/wireless/iwlwifi/iwl-rfkill.c | 26 ++++++++++++++++----------
+ 1 file changed, 16 insertions(+), 10 deletions(-)
+
+--- a/drivers/net/wireless/iwlwifi/iwl-rfkill.c
++++ b/drivers/net/wireless/iwlwifi/iwl-rfkill.c
+@@ -53,22 +53,31 @@ static int iwl_rfkill_soft_rf_kill(void
+ switch (state) {
+ case RFKILL_STATE_UNBLOCKED:
+ if (iwl_is_rfkill_hw(priv)) {
++ /* pass error to rfkill core, make it state HARD
++ * BLOCKED (rfkill->mutex taken) and disable
++ * software kill switch */
+ err = -EBUSY;
+- goto out_unlock;
++ priv->rfkill->state = RFKILL_STATE_HARD_BLOCKED;
+ }
+ iwl_radio_kill_sw_enable_radio(priv);
+ break;
+ case RFKILL_STATE_SOFT_BLOCKED:
+ iwl_radio_kill_sw_disable_radio(priv);
++ /* rfkill->mutex is taken */
++ if (priv->rfkill->state == RFKILL_STATE_HARD_BLOCKED) {
++ /* force rfkill core state to be SOFT BLOCKED,
++ * otherwise core will be unable to disable software
++ * kill switch */
++ priv->rfkill->state = RFKILL_STATE_SOFT_BLOCKED;
++ }
+ break;
+ default:
+ IWL_WARN(priv, "we received unexpected RFKILL state %d\n",
+ state);
+ break;
+ }
+-out_unlock:
+- mutex_unlock(&priv->mutex);
+
++ mutex_unlock(&priv->mutex);
+ return err;
+ }
+
+@@ -132,14 +141,11 @@ void iwl_rfkill_set_hw_state(struct iwl_
+ if (!priv->rfkill)
+ return;
+
+- if (iwl_is_rfkill_hw(priv)) {
++ if (iwl_is_rfkill_sw(priv))
++ rfkill_force_state(priv->rfkill, RFKILL_STATE_SOFT_BLOCKED);
++ else if (iwl_is_rfkill_hw(priv))
+ rfkill_force_state(priv->rfkill, RFKILL_STATE_HARD_BLOCKED);
+- return;
+- }
+-
+- if (!iwl_is_rfkill_sw(priv))
+- rfkill_force_state(priv->rfkill, RFKILL_STATE_UNBLOCKED);
+ else
+- rfkill_force_state(priv->rfkill, RFKILL_STATE_SOFT_BLOCKED);
++ rfkill_force_state(priv->rfkill, RFKILL_STATE_UNBLOCKED);
+ }
+ EXPORT_SYMBOL(iwl_rfkill_set_hw_state);
--- /dev/null
+From 99f1b01562b7dcae75b043114f76163fbf84fcab Mon Sep 17 00:00:00 2001
+From: Reinette Chatre <reinette.chatre@intel.com>
+Date: Mon, 3 Aug 2009 12:10:16 -0700
+Subject: iwlagn: do not send key clear commands when rfkill enabled
+
+From: Reinette Chatre <reinette.chatre@intel.com>
+
+commit 99f1b01562b7dcae75b043114f76163fbf84fcab upstream.
+
+Do all key clearing except sending sommands to device when rfkill
+enabled. When rfkill enabled the interface is brought down and will
+be brought back up correctly after rfkill is enabled again.
+
+Same change is not needed for iwl3945 as it ignores return code when
+sending key clearing command to device.
+
+This fixes http://bugzilla.kernel.org/show_bug.cgi?id=13742
+
+Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
+Tested-by: Frans Pop <elendil@planet.nl>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/wireless/iwlwifi/iwl-sta.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
++++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
+@@ -560,6 +560,8 @@ int iwl_remove_default_wep_key(struct iw
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
++ IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
++ keyconf->keyidx);
+
+ if (!test_and_clear_bit(keyconf->keyidx, &priv->ucode_key_table))
+ IWL_ERR(priv, "index %d not used in uCode key table.\n",
+@@ -567,6 +569,11 @@ int iwl_remove_default_wep_key(struct iw
+
+ priv->default_wep_key--;
+ memset(&priv->wep_keys[keyconf->keyidx], 0, sizeof(priv->wep_keys[0]));
++ if (iwl_is_rfkill(priv)) {
++ IWL_DEBUG_WEP(priv, "Not sending REPLY_WEPKEY command due to RFKILL.\n");
++ spin_unlock_irqrestore(&priv->sta_lock, flags);
++ return 0;
++ }
+ ret = iwl_send_static_wepkey_cmd(priv, 1);
+ IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n",
+ keyconf->keyidx, ret);
+@@ -847,6 +854,11 @@ int iwl_remove_dynamic_key(struct iwl_pr
+ priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
++ if (iwl_is_rfkill(priv)) {
++ IWL_DEBUG_WEP(priv, "Not sending REPLY_ADD_STA command because RFKILL enabled. \n");
++ spin_unlock_irqrestore(&priv->sta_lock, flags);
++ return 0;
++ }
+ ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+ return ret;
--- /dev/null
+From oleg@redhat.com Thu Sep 3 15:22:09 2009
+From: Oleg Nesterov <oleg@redhat.com>
+Date: Mon, 24 Aug 2009 12:45:29 +0200
+Subject: kthreads: fix kthread_create() vs kthread_stop() race
+To: Greg Kroah-Hartman <gregkh@suse.de>, Rusty Russell <rusty@rustcorp.com.au>
+Cc: Robert Peterson <rpeterso@redhat.com>, stable@kernel.org, linux-kernel@vger.kernel.org
+Message-ID: <20090824104529.GA6899@redhat.com>
+Content-Disposition: inline
+
+From: Oleg Nesterov <oleg@redhat.com>
+
+The bug should be "accidently" fixed by recent changes in 2.6.31,
+all kernels <= 2.6.30 need the fix. The problem was never noticed before,
+it was found because it causes mysterious failures with GFS mount/umount.
+
+Credits to Robert Peterson. He blaimed kthread.c from the very beginning.
+But, despite my promise, I forgot to inspect the old implementation until
+he did a lot of testing and reminded me. This led to huge delay in fixing
+this bug.
+
+kthread_stop() does put_task_struct(k) before it clears kthread_stop_info.k.
+This means another kthread_create() can re-use this task_struct, but the
+new kthread can still see kthread_should_stop() == T and exit even without
+calling threadfn().
+
+Reported-by: Robert Peterson <rpeterso@redhat.com>
+Tested-by: Robert Peterson <rpeterso@redhat.com>
+Signed-off-by: Oleg Nesterov <oleg@redhat.com>
+Acked-by: Rusty Russell <rusty@rustcorp.com.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/kthread.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/kthread.c
++++ b/kernel/kthread.c
+@@ -216,12 +216,12 @@ int kthread_stop(struct task_struct *k)
+ /* Now set kthread_should_stop() to true, and wake it up. */
+ kthread_stop_info.k = k;
+ wake_up_process(k);
+- put_task_struct(k);
+
+ /* Once it dies, reset stop ptr, gather result and we're done. */
+ wait_for_completion(&kthread_stop_info.done);
+ kthread_stop_info.k = NULL;
+ ret = kthread_stop_info.err;
++ put_task_struct(k);
+ mutex_unlock(&kthread_stop_lock);
+
+ trace_sched_kthread_stop_ret(ret);
--- /dev/null
+From 7831387bda72af3059be48d39846d3eb6d8ce2f6 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Fri, 7 Aug 2009 01:59:15 +0900
+Subject: libata: OCZ Vertex can't do HPA
+
+From: Tejun Heo <tj@kernel.org>
+
+commit 7831387bda72af3059be48d39846d3eb6d8ce2f6 upstream.
+
+OCZ Vertex SSD can't do HPA and not in a usual way. It reports HPA,
+allows unlocking but then fails all IOs which fall in the unlocked
+area. Quirk it so that HPA unlocking is not used for the device.
+
+Reported by Daniel Perup in bnc#522414.
+
+ https://bugzilla.novell.com/show_bug.cgi?id=522414
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Reported-by: Daniel Perup <probe@spray.se>
+Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/ata/libata-core.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4271,6 +4271,9 @@ static const struct ata_blacklist_entry
+ { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
+ { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
+
++ /* this one allows HPA unlocking but fails IOs on the area */
++ { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
++
+ /* Devices which report 1 sector over size HPA */
+ { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
+ { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
--- /dev/null
+From 388ce4beb7135722c584b0af18f215e3ec657adf Mon Sep 17 00:00:00 2001
+From: Kashyap, Desai <kashyap.desai@lsi.com>
+Date: Fri, 14 Aug 2009 15:01:35 +0530
+Subject: [SCSI] mpt2sas: fix config request and diag reset deadlock
+
+From: Kashyap, Desai <kashyap.desai@lsi.com>
+
+commit 388ce4beb7135722c584b0af18f215e3ec657adf upstream.
+
+Moving the setting and clearing of the mutex's to
+_config_request. There was a mutex deadlock when diag reset is called from
+inside _config_request, so diag reset was moved to outside the mutexs.
+
+Signed-off-by: James Bottomley <James.Bottomley@suse.de>
+Signed-off-by: Kashyap Desai <kashyap.desai@lsi.com>
+Signed-off-by: Eric Moore <Eric.moore@lsi.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/scsi/mpt2sas/mpt2sas_config.c | 85 ++++++++--------------------------
+ 1 file changed, 20 insertions(+), 65 deletions(-)
+
+--- a/drivers/scsi/mpt2sas/mpt2sas_config.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_config.c
+@@ -236,12 +236,14 @@ _config_request(struct MPT2SAS_ADAPTER *
+ Mpi2ConfigRequest_t *config_request;
+ int r;
+ u8 retry_count;
+- u8 issue_reset;
++ u8 issue_host_reset = 0;
+ u16 wait_state_count;
+
++ mutex_lock(&ioc->config_cmds.mutex);
+ if (ioc->config_cmds.status != MPT2_CMD_NOT_USED) {
+ printk(MPT2SAS_ERR_FMT "%s: config_cmd in use\n",
+ ioc->name, __func__);
++ mutex_unlock(&ioc->config_cmds.mutex);
+ return -EAGAIN;
+ }
+ retry_count = 0;
+@@ -260,8 +262,8 @@ _config_request(struct MPT2SAS_ADAPTER *
+ printk(MPT2SAS_ERR_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+- ioc->config_cmds.status = MPT2_CMD_NOT_USED;
+- return -EFAULT;
++ r = -EFAULT;
++ goto out;
+ }
+ ssleep(1);
+ ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
+@@ -277,8 +279,8 @@ _config_request(struct MPT2SAS_ADAPTER *
+ if (!smid) {
+ printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+- ioc->config_cmds.status = MPT2_CMD_NOT_USED;
+- return -EAGAIN;
++ r = -EAGAIN;
++ goto out;
+ }
+
+ r = 0;
+@@ -298,9 +300,15 @@ _config_request(struct MPT2SAS_ADAPTER *
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2ConfigRequest_t)/4);
+- if (!(ioc->config_cmds.status & MPT2_CMD_RESET))
+- issue_reset = 1;
+- goto issue_host_reset;
++ retry_count++;
++ if (ioc->config_cmds.smid == smid)
++ mpt2sas_base_free_smid(ioc, smid);
++ if ((ioc->shost_recovery) ||
++ (ioc->config_cmds.status & MPT2_CMD_RESET))
++ goto retry_config;
++ issue_host_reset = 1;
++ r = -EFAULT;
++ goto out;
+ }
+ if (ioc->config_cmds.status & MPT2_CMD_REPLY_VALID)
+ memcpy(mpi_reply, ioc->config_cmds.reply,
+@@ -308,21 +316,13 @@ _config_request(struct MPT2SAS_ADAPTER *
+ if (retry_count)
+ printk(MPT2SAS_INFO_FMT "%s: retry completed!!\n",
+ ioc->name, __func__);
++out:
+ ioc->config_cmds.status = MPT2_CMD_NOT_USED;
+- return r;
+-
+- issue_host_reset:
+- if (issue_reset)
++ mutex_unlock(&ioc->config_cmds.mutex);
++ if (issue_host_reset)
+ mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+- ioc->config_cmds.status = MPT2_CMD_NOT_USED;
+- if (!retry_count) {
+- printk(MPT2SAS_INFO_FMT "%s: attempting retry\n",
+- ioc->name, __func__);
+- retry_count++;
+- goto retry_config;
+- }
+- return -EFAULT;
++ return r;
+ }
+
+ /**
+@@ -381,7 +381,6 @@ mpt2sas_config_get_manufacturing_pg0(str
+ int r;
+ struct config_request mem;
+
+- mutex_lock(&ioc->config_cmds.mutex);
+ memset(config_page, 0, sizeof(Mpi2ManufacturingPage0_t));
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+@@ -423,7 +422,6 @@ mpt2sas_config_get_manufacturing_pg0(str
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+@@ -444,7 +442,6 @@ mpt2sas_config_get_bios_pg2(struct MPT2S
+ int r;
+ struct config_request mem;
+
+- mutex_lock(&ioc->config_cmds.mutex);
+ memset(config_page, 0, sizeof(Mpi2BiosPage2_t));
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+@@ -486,7 +483,6 @@ mpt2sas_config_get_bios_pg2(struct MPT2S
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+@@ -507,7 +503,6 @@ mpt2sas_config_get_bios_pg3(struct MPT2S
+ int r;
+ struct config_request mem;
+
+- mutex_lock(&ioc->config_cmds.mutex);
+ memset(config_page, 0, sizeof(Mpi2BiosPage3_t));
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+@@ -549,7 +544,6 @@ mpt2sas_config_get_bios_pg3(struct MPT2S
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+@@ -570,7 +564,6 @@ mpt2sas_config_get_iounit_pg0(struct MPT
+ int r;
+ struct config_request mem;
+
+- mutex_lock(&ioc->config_cmds.mutex);
+ memset(config_page, 0, sizeof(Mpi2IOUnitPage0_t));
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+@@ -612,7 +605,6 @@ mpt2sas_config_get_iounit_pg0(struct MPT
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+@@ -633,7 +625,6 @@ mpt2sas_config_get_iounit_pg1(struct MPT
+ int r;
+ struct config_request mem;
+
+- mutex_lock(&ioc->config_cmds.mutex);
+ memset(config_page, 0, sizeof(Mpi2IOUnitPage1_t));
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+@@ -675,7 +666,6 @@ mpt2sas_config_get_iounit_pg1(struct MPT
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+@@ -696,7 +686,6 @@ mpt2sas_config_set_iounit_pg1(struct MPT
+ int r;
+ struct config_request mem;
+
+- mutex_lock(&ioc->config_cmds.mutex);
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+@@ -738,7 +727,6 @@ mpt2sas_config_set_iounit_pg1(struct MPT
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+@@ -759,7 +747,6 @@ mpt2sas_config_get_ioc_pg8(struct MPT2SA
+ int r;
+ struct config_request mem;
+
+- mutex_lock(&ioc->config_cmds.mutex);
+ memset(config_page, 0, sizeof(Mpi2IOCPage8_t));
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+@@ -801,7 +788,6 @@ mpt2sas_config_get_ioc_pg8(struct MPT2SA
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+@@ -824,7 +810,6 @@ mpt2sas_config_get_sas_device_pg0(struct
+ int r;
+ struct config_request mem;
+
+- mutex_lock(&ioc->config_cmds.mutex);
+ memset(config_page, 0, sizeof(Mpi2SasDevicePage0_t));
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+@@ -869,7 +854,6 @@ mpt2sas_config_get_sas_device_pg0(struct
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+@@ -892,7 +876,6 @@ mpt2sas_config_get_sas_device_pg1(struct
+ int r;
+ struct config_request mem;
+
+- mutex_lock(&ioc->config_cmds.mutex);
+ memset(config_page, 0, sizeof(Mpi2SasDevicePage1_t));
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+@@ -937,7 +920,6 @@ mpt2sas_config_get_sas_device_pg1(struct
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+@@ -959,7 +941,6 @@ mpt2sas_config_get_number_hba_phys(struc
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasIOUnitPage0_t config_page;
+
+- mutex_lock(&ioc->config_cmds.mutex);
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+@@ -1008,7 +989,6 @@ mpt2sas_config_get_number_hba_phys(struc
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+@@ -1032,8 +1012,6 @@ mpt2sas_config_get_sas_iounit_pg0(struct
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+ struct config_request mem;
+-
+- mutex_lock(&ioc->config_cmds.mutex);
+ memset(config_page, 0, sz);
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+@@ -1076,7 +1054,6 @@ mpt2sas_config_get_sas_iounit_pg0(struct
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+@@ -1101,7 +1078,6 @@ mpt2sas_config_get_sas_iounit_pg1(struct
+ int r;
+ struct config_request mem;
+
+- mutex_lock(&ioc->config_cmds.mutex);
+ memset(config_page, 0, sz);
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+@@ -1144,7 +1120,6 @@ mpt2sas_config_get_sas_iounit_pg1(struct
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+@@ -1167,7 +1142,6 @@ mpt2sas_config_get_expander_pg0(struct M
+ int r;
+ struct config_request mem;
+
+- mutex_lock(&ioc->config_cmds.mutex);
+ memset(config_page, 0, sizeof(Mpi2ExpanderPage0_t));
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+@@ -1212,7 +1186,6 @@ mpt2sas_config_get_expander_pg0(struct M
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+@@ -1236,7 +1209,6 @@ mpt2sas_config_get_expander_pg1(struct M
+ int r;
+ struct config_request mem;
+
+- mutex_lock(&ioc->config_cmds.mutex);
+ memset(config_page, 0, sizeof(Mpi2ExpanderPage1_t));
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+@@ -1283,7 +1255,6 @@ mpt2sas_config_get_expander_pg1(struct M
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+@@ -1306,7 +1277,6 @@ mpt2sas_config_get_enclosure_pg0(struct
+ int r;
+ struct config_request mem;
+
+- mutex_lock(&ioc->config_cmds.mutex);
+ memset(config_page, 0, sizeof(Mpi2SasEnclosurePage0_t));
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+@@ -1351,7 +1321,6 @@ mpt2sas_config_get_enclosure_pg0(struct
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+@@ -1373,7 +1342,6 @@ mpt2sas_config_get_phy_pg0(struct MPT2SA
+ int r;
+ struct config_request mem;
+
+- mutex_lock(&ioc->config_cmds.mutex);
+ memset(config_page, 0, sizeof(Mpi2SasPhyPage0_t));
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+@@ -1419,7 +1387,6 @@ mpt2sas_config_get_phy_pg0(struct MPT2SA
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+@@ -1441,7 +1408,6 @@ mpt2sas_config_get_phy_pg1(struct MPT2SA
+ int r;
+ struct config_request mem;
+
+- mutex_lock(&ioc->config_cmds.mutex);
+ memset(config_page, 0, sizeof(Mpi2SasPhyPage1_t));
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+@@ -1487,7 +1453,6 @@ mpt2sas_config_get_phy_pg1(struct MPT2SA
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+@@ -1511,7 +1476,6 @@ mpt2sas_config_get_raid_volume_pg1(struc
+ int r;
+ struct config_request mem;
+
+- mutex_lock(&ioc->config_cmds.mutex);
+ memset(config_page, 0, sizeof(Mpi2RaidVolPage1_t));
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+@@ -1554,7 +1518,6 @@ mpt2sas_config_get_raid_volume_pg1(struc
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+@@ -1578,7 +1541,6 @@ mpt2sas_config_get_number_pds(struct MPT
+ struct config_request mem;
+ u16 ioc_status;
+
+- mutex_lock(&ioc->config_cmds.mutex);
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ *num_pds = 0;
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+@@ -1626,7 +1588,6 @@ mpt2sas_config_get_number_pds(struct MPT
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+@@ -1651,7 +1612,6 @@ mpt2sas_config_get_raid_volume_pg0(struc
+ int r;
+ struct config_request mem;
+
+- mutex_lock(&ioc->config_cmds.mutex);
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ memset(config_page, 0, sz);
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+@@ -1693,7 +1653,6 @@ mpt2sas_config_get_raid_volume_pg0(struc
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+@@ -1717,7 +1676,6 @@ mpt2sas_config_get_phys_disk_pg0(struct
+ int r;
+ struct config_request mem;
+
+- mutex_lock(&ioc->config_cmds.mutex);
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ memset(config_page, 0, sizeof(Mpi2RaidPhysDiskPage0_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+@@ -1760,7 +1718,6 @@ mpt2sas_config_get_phys_disk_pg0(struct
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
+@@ -1784,7 +1741,6 @@ mpt2sas_config_get_volume_handle(struct
+ struct config_request mem;
+ u16 ioc_status;
+
+- mutex_lock(&ioc->config_cmds.mutex);
+ *volume_handle = 0;
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+@@ -1848,7 +1804,6 @@ mpt2sas_config_get_volume_handle(struct
+ _config_free_config_dma_memory(ioc, &mem);
+
+ out:
+- mutex_unlock(&ioc->config_cmds.mutex);
+ return r;
+ }
+
--- /dev/null
+From e7432675f8ca868a4af365759a8d4c3779a3d922 Mon Sep 17 00:00:00 2001
+From: Sunil Mushran <sunil.mushran@oracle.com>
+Date: Thu, 6 Aug 2009 16:12:58 -0700
+Subject: ocfs2: Initialize the cluster we're writing to in a non-sparse extend
+
+From: Sunil Mushran <sunil.mushran@oracle.com>
+
+commit e7432675f8ca868a4af365759a8d4c3779a3d922 upstream.
+
+In a non-sparse extend, we correctly allocate (and zero) the clusters between
+the old_i_size and pos, but we don't zero the portions of the cluster we're
+writing to outside of pos<->len.
+
+It handles clustersize > pagesize and blocksize < pagesize.
+
+[Cleaned up by Joel Becker.]
+
+Signed-off-by: Sunil Mushran <sunil.mushran@oracle.com>
+Signed-off-by: Joel Becker <joel.becker@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/ocfs2/aops.c | 66 +++++++++++++++++++++++++++++++++++++++-----------------
+ 1 file changed, 47 insertions(+), 19 deletions(-)
+
+--- a/fs/ocfs2/aops.c
++++ b/fs/ocfs2/aops.c
+@@ -894,18 +894,17 @@ struct ocfs2_write_cluster_desc {
+ */
+ unsigned c_new;
+ unsigned c_unwritten;
++ unsigned c_needs_zero;
+ };
+
+-static inline int ocfs2_should_zero_cluster(struct ocfs2_write_cluster_desc *d)
+-{
+- return d->c_new || d->c_unwritten;
+-}
+-
+ struct ocfs2_write_ctxt {
+ /* Logical cluster position / len of write */
+ u32 w_cpos;
+ u32 w_clen;
+
++ /* First cluster allocated in a nonsparse extend */
++ u32 w_first_new_cpos;
++
+ struct ocfs2_write_cluster_desc w_desc[OCFS2_MAX_CLUSTERS_PER_PAGE];
+
+ /*
+@@ -983,6 +982,7 @@ static int ocfs2_alloc_write_ctxt(struct
+ return -ENOMEM;
+
+ wc->w_cpos = pos >> osb->s_clustersize_bits;
++ wc->w_first_new_cpos = UINT_MAX;
+ cend = (pos + len - 1) >> osb->s_clustersize_bits;
+ wc->w_clen = cend - wc->w_cpos + 1;
+ get_bh(di_bh);
+@@ -1217,20 +1217,18 @@ out:
+ */
+ static int ocfs2_write_cluster(struct address_space *mapping,
+ u32 phys, unsigned int unwritten,
++ unsigned int should_zero,
+ struct ocfs2_alloc_context *data_ac,
+ struct ocfs2_alloc_context *meta_ac,
+ struct ocfs2_write_ctxt *wc, u32 cpos,
+ loff_t user_pos, unsigned user_len)
+ {
+- int ret, i, new, should_zero = 0;
++ int ret, i, new;
+ u64 v_blkno, p_blkno;
+ struct inode *inode = mapping->host;
+ struct ocfs2_extent_tree et;
+
+ new = phys == 0 ? 1 : 0;
+- if (new || unwritten)
+- should_zero = 1;
+-
+ if (new) {
+ u32 tmp_pos;
+
+@@ -1341,7 +1339,9 @@ static int ocfs2_write_cluster_by_desc(s
+ local_len = osb->s_clustersize - cluster_off;
+
+ ret = ocfs2_write_cluster(mapping, desc->c_phys,
+- desc->c_unwritten, data_ac, meta_ac,
++ desc->c_unwritten,
++ desc->c_needs_zero,
++ data_ac, meta_ac,
+ wc, desc->c_cpos, pos, local_len);
+ if (ret) {
+ mlog_errno(ret);
+@@ -1391,14 +1391,14 @@ static void ocfs2_set_target_boundaries(
+ * newly allocated cluster.
+ */
+ desc = &wc->w_desc[0];
+- if (ocfs2_should_zero_cluster(desc))
++ if (desc->c_needs_zero)
+ ocfs2_figure_cluster_boundaries(osb,
+ desc->c_cpos,
+ &wc->w_target_from,
+ NULL);
+
+ desc = &wc->w_desc[wc->w_clen - 1];
+- if (ocfs2_should_zero_cluster(desc))
++ if (desc->c_needs_zero)
+ ocfs2_figure_cluster_boundaries(osb,
+ desc->c_cpos,
+ NULL,
+@@ -1466,13 +1466,28 @@ static int ocfs2_populate_write_desc(str
+ phys++;
+ }
+
++ /*
++ * If w_first_new_cpos is < UINT_MAX, we have a non-sparse
++ * file that got extended. w_first_new_cpos tells us
++ * where the newly allocated clusters are so we can
++ * zero them.
++ */
++ if (desc->c_cpos >= wc->w_first_new_cpos) {
++ BUG_ON(phys == 0);
++ desc->c_needs_zero = 1;
++ }
++
+ desc->c_phys = phys;
+ if (phys == 0) {
+ desc->c_new = 1;
++ desc->c_needs_zero = 1;
+ *clusters_to_alloc = *clusters_to_alloc + 1;
+ }
+- if (ext_flags & OCFS2_EXT_UNWRITTEN)
++
++ if (ext_flags & OCFS2_EXT_UNWRITTEN) {
+ desc->c_unwritten = 1;
++ desc->c_needs_zero = 1;
++ }
+
+ num_clusters--;
+ }
+@@ -1632,10 +1647,13 @@ static int ocfs2_expand_nonsparse_inode(
+ if (newsize <= i_size_read(inode))
+ return 0;
+
+- ret = ocfs2_extend_no_holes(inode, newsize, newsize - len);
++ ret = ocfs2_extend_no_holes(inode, newsize, pos);
+ if (ret)
+ mlog_errno(ret);
+
++ wc->w_first_new_cpos =
++ ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode));
++
+ return ret;
+ }
+
+@@ -1644,7 +1662,7 @@ int ocfs2_write_begin_nolock(struct addr
+ struct page **pagep, void **fsdata,
+ struct buffer_head *di_bh, struct page *mmap_page)
+ {
+- int ret, credits = OCFS2_INODE_UPDATE_CREDITS;
++ int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS;
+ unsigned int clusters_to_alloc, extents_to_split;
+ struct ocfs2_write_ctxt *wc;
+ struct inode *inode = mapping->host;
+@@ -1722,8 +1740,19 @@ int ocfs2_write_begin_nolock(struct addr
+
+ }
+
+- ocfs2_set_target_boundaries(osb, wc, pos, len,
+- clusters_to_alloc + extents_to_split);
++ /*
++ * We have to zero sparse allocated clusters, unwritten extent clusters,
++ * and non-sparse clusters we just extended. For non-sparse writes,
++ * we know zeros will only be needed in the first and/or last cluster.
++ */
++ if (clusters_to_alloc || extents_to_split ||
++ wc->w_desc[0].c_needs_zero ||
++ wc->w_desc[wc->w_clen - 1].c_needs_zero)
++ cluster_of_pages = 1;
++ else
++ cluster_of_pages = 0;
++
++ ocfs2_set_target_boundaries(osb, wc, pos, len, cluster_of_pages);
+
+ handle = ocfs2_start_trans(osb, credits);
+ if (IS_ERR(handle)) {
+@@ -1756,8 +1785,7 @@ int ocfs2_write_begin_nolock(struct addr
+ * extent.
+ */
+ ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos,
+- clusters_to_alloc + extents_to_split,
+- mmap_page);
++ cluster_of_pages, mmap_page);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_quota;
--- /dev/null
+From be9e8cd75ce8d94ae4aab721fdcc337fa78a9090 Mon Sep 17 00:00:00 2001
+From: Kashyap, Desai <kashyap.desai@lsi.com>
+Date: Fri, 7 Aug 2009 19:36:43 +0530
+Subject: SCSI: mpt2sas: Excessive log info causes sas iounit page time out
+
+From: Kashyap, Desai <kashyap.desai@lsi.com>
+
+commit be9e8cd75ce8d94ae4aab721fdcc337fa78a9090 upstream.
+
+Inhibit 0x3117 loginfos - during cable pull, there are too many printks going
+to the syslog, this is have impact on how fast the interrupt routine can handle
+keeping up with command completions; this was the root cause to the config
+pages timeouts.
+
+Signed-off-by: Kashyap Desai <kashyap.desai@lsi.com>
+Signed-off-by: James Bottomley <James.Bottomley@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/scsi/mpt2sas/mpt2sas_base.c | 4 ++++
+ drivers/scsi/mpt2sas/mpt2sas_scsih.c | 4 ++++
+ 2 files changed, 8 insertions(+)
+
+--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
+@@ -440,6 +440,10 @@ _base_sas_log_info(struct MPT2SAS_ADAPTE
+ if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
+ return;
+
++ /* each nexus loss loginfo */
++ if (log_info == 0x31170000)
++ return;
++
+ /* eat the loginfos associated with task aborts */
+ if (ioc->ignore_loginfos && (log_info == 30050000 || log_info ==
+ 0x31140000 || log_info == 0x31130000))
+--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+@@ -2560,6 +2560,10 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAP
+ char *desc_ioc_state = NULL;
+ char *desc_scsi_status = NULL;
+ char *desc_scsi_state = ioc->tmp_string;
++ u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
++
++ if (log_info == 0x31170000)
++ return;
+
+ switch (ioc_status) {
+ case MPI2_IOCSTATUS_SUCCESS:
--- /dev/null
+From 20f5895d55d9281830bfb7819c5c5b70b05297a6 Mon Sep 17 00:00:00 2001
+From: Kashyap, Desai <kashyap.desai@lsi.com>
+Date: Fri, 7 Aug 2009 19:34:26 +0530
+Subject: SCSI: mpt2sas: Expander fix oops saying "Already part of another port"
+
+From: Kashyap, Desai <kashyap.desai@lsi.com>
+
+commit 20f5895d55d9281830bfb7819c5c5b70b05297a6 upstream.
+
+Kernel panic is seen because driver did not tear down the port which should
+be dnoe using mpt2sas_transport_port_remove(). without this fix When expander
+is added back we would oops inside sas_port_add_phy.
+
+Signed-off-by: Kashyap Desai <kashyap.desai@lsi.com>
+Signed-off-by: James Bottomley <James.Bottomley@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/scsi/mpt2sas/mpt2sas_scsih.c | 21 +++++++++++++++------
+ 1 file changed, 15 insertions(+), 6 deletions(-)
+
+--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+@@ -3205,7 +3205,7 @@ _scsih_expander_add(struct MPT2SAS_ADAPT
+ __le64 sas_address;
+ int i;
+ unsigned long flags;
+- struct _sas_port *mpt2sas_port;
++ struct _sas_port *mpt2sas_port = NULL;
+ int rc = 0;
+
+ if (!handle)
+@@ -3297,12 +3297,20 @@ _scsih_expander_add(struct MPT2SAS_ADAPT
+ &expander_pg1, i, handle))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+- continue;
++ rc = -1;
++ goto out_fail;
+ }
+ sas_expander->phy[i].handle = handle;
+ sas_expander->phy[i].phy_id = i;
+- mpt2sas_transport_add_expander_phy(ioc, &sas_expander->phy[i],
+- expander_pg1, sas_expander->parent_dev);
++
++ if ((mpt2sas_transport_add_expander_phy(ioc,
++ &sas_expander->phy[i], expander_pg1,
++ sas_expander->parent_dev))) {
++ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
++ ioc->name, __FILE__, __LINE__, __func__);
++ rc = -1;
++ goto out_fail;
++ }
+ }
+
+ if (sas_expander->enclosure_handle) {
+@@ -3319,8 +3327,9 @@ _scsih_expander_add(struct MPT2SAS_ADAPT
+
+ out_fail:
+
+- if (sas_expander)
+- kfree(sas_expander->phy);
++ if (mpt2sas_port)
++ mpt2sas_transport_port_remove(ioc, sas_expander->sas_address,
++ sas_expander->parent_handle);
+ kfree(sas_expander);
+ return rc;
+ }
--- /dev/null
+From e4750c989f732555fca86dd73d488c79972362db Mon Sep 17 00:00:00 2001
+From: Kashyap, Desai <kashyap.desai@lsi.com>
+Date: Fri, 7 Aug 2009 19:37:59 +0530
+Subject: SCSI: mpt2sas: fix crash due to Watchdog is active while OS in standby mode
+
+From: Kashyap, Desai <kashyap.desai@lsi.com>
+
+commit e4750c989f732555fca86dd73d488c79972362db upstream.
+
+Fix oops ocurring at hibernation time. This oops was due to the firmware fault
+watchdog timer still running after we freed resources. To fix the issue we need
+to terminate the watchdog timer at hibernation time.
+
+Signed-off-by: Kashyap Desai <kashyap.desai@lsi.com>
+Signed-off-by: James Bottomley <James.Bottomley@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/scsi/mpt2sas/mpt2sas_base.c | 88 +++++++++++++++++++++++------------
+ drivers/scsi/mpt2sas/mpt2sas_base.h | 2
+ drivers/scsi/mpt2sas/mpt2sas_scsih.c | 2
+ 3 files changed, 64 insertions(+), 28 deletions(-)
+
+--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
+@@ -119,6 +119,64 @@ _base_fault_reset_work(struct work_struc
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+ }
+
++/**
++ * mpt2sas_base_start_watchdog - start the fault_reset_work_q
++ * @ioc: pointer to scsi command object
++ * Context: sleep.
++ *
++ * Return nothing.
++ */
++void
++mpt2sas_base_start_watchdog(struct MPT2SAS_ADAPTER *ioc)
++{
++ unsigned long flags;
++
++ if (ioc->fault_reset_work_q)
++ return;
++
++ /* initialize fault polling */
++ INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
++ snprintf(ioc->fault_reset_work_q_name,
++ sizeof(ioc->fault_reset_work_q_name), "poll_%d_status", ioc->id);
++ ioc->fault_reset_work_q =
++ create_singlethread_workqueue(ioc->fault_reset_work_q_name);
++ if (!ioc->fault_reset_work_q) {
++ printk(MPT2SAS_ERR_FMT "%s: failed (line=%d)\n",
++ ioc->name, __func__, __LINE__);
++ return;
++ }
++ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
++ if (ioc->fault_reset_work_q)
++ queue_delayed_work(ioc->fault_reset_work_q,
++ &ioc->fault_reset_work,
++ msecs_to_jiffies(FAULT_POLLING_INTERVAL));
++ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
++}
++
++/**
++ * mpt2sas_base_stop_watchdog - stop the fault_reset_work_q
++ * @ioc: pointer to scsi command object
++ * Context: sleep.
++ *
++ * Return nothing.
++ */
++void
++mpt2sas_base_stop_watchdog(struct MPT2SAS_ADAPTER *ioc)
++{
++ unsigned long flags;
++ struct workqueue_struct *wq;
++
++ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
++ wq = ioc->fault_reset_work_q;
++ ioc->fault_reset_work_q = NULL;
++ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
++ if (wq) {
++ if (!cancel_delayed_work(&ioc->fault_reset_work))
++ flush_workqueue(wq);
++ destroy_workqueue(wq);
++ }
++}
++
+ #ifdef CONFIG_SCSI_MPT2SAS_LOGGING
+ /**
+ * _base_sas_ioc_info - verbose translation of the ioc status
+@@ -3209,7 +3267,6 @@ int
+ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
+ {
+ int r, i;
+- unsigned long flags;
+
+ dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
+ __func__));
+@@ -3292,23 +3349,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPT
+ if (r)
+ goto out_free_resources;
+
+- /* initialize fault polling */
+- INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
+- snprintf(ioc->fault_reset_work_q_name,
+- sizeof(ioc->fault_reset_work_q_name), "poll_%d_status", ioc->id);
+- ioc->fault_reset_work_q =
+- create_singlethread_workqueue(ioc->fault_reset_work_q_name);
+- if (!ioc->fault_reset_work_q) {
+- printk(MPT2SAS_ERR_FMT "%s: failed (line=%d)\n",
+- ioc->name, __func__, __LINE__);
+- goto out_free_resources;
+- }
+- spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+- if (ioc->fault_reset_work_q)
+- queue_delayed_work(ioc->fault_reset_work_q,
+- &ioc->fault_reset_work,
+- msecs_to_jiffies(FAULT_POLLING_INTERVAL));
+- spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
++ mpt2sas_base_start_watchdog(ioc);
+ return 0;
+
+ out_free_resources:
+@@ -3341,20 +3382,11 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPT
+ void
+ mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc)
+ {
+- unsigned long flags;
+- struct workqueue_struct *wq;
+
+ dexitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
+ __func__));
+
+- spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+- wq = ioc->fault_reset_work_q;
+- ioc->fault_reset_work_q = NULL;
+- spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+- if (!cancel_delayed_work(&ioc->fault_reset_work))
+- flush_workqueue(wq);
+- destroy_workqueue(wq);
+-
++ mpt2sas_base_stop_watchdog(ioc);
+ mpt2sas_base_free_resources(ioc);
+ _base_release_memory_pools(ioc);
+ kfree(ioc->pfacts);
+--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
++++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
+@@ -672,6 +672,8 @@ typedef void (*MPT_CALLBACK)(struct MPT2
+
+ /* base shared API */
+ extern struct list_head mpt2sas_ioc_list;
++void mpt2sas_base_start_watchdog(struct MPT2SAS_ADAPTER *ioc);
++void mpt2sas_base_stop_watchdog(struct MPT2SAS_ADAPTER *ioc);
+
+ int mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc);
+ void mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc);
+--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+@@ -5599,6 +5599,7 @@ scsih_suspend(struct pci_dev *pdev, pm_m
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+ u32 device_state;
+
++ mpt2sas_base_stop_watchdog(ioc);
+ flush_scheduled_work();
+ scsi_block_requests(shost);
+ device_state = pci_choose_state(pdev, state);
+@@ -5641,6 +5642,7 @@ scsih_resume(struct pci_dev *pdev)
+
+ mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, SOFT_RESET);
+ scsi_unblock_requests(shost);
++ mpt2sas_base_start_watchdog(ioc);
+ return 0;
+ }
+ #endif /* CONFIG_PM */
--- /dev/null
+From 6bd4e1e4d6023f4da069fd68729c502cc4e6dfd0 Mon Sep 17 00:00:00 2001
+From: Kashyap, Desai <kashyap.desai@lsi.com>
+Date: Fri, 7 Aug 2009 19:37:14 +0530
+Subject: SCSI: mpt2sas: fix infinite loop inside config request
+
+From: Kashyap, Desai <kashyap.desai@lsi.com>
+
+commit 6bd4e1e4d6023f4da069fd68729c502cc4e6dfd0 upstream.
+
+This restriction is introduced just to avoid loop of
+config_request. Retry must be limited so we have restricted
+config request to maximum 2 times.
+
+Signed-off-by: Kashyap Desai <kashyap.desai@lsi.com>
+Signed-off-by: James Bottomley <James.Bottomley@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c
+index 58cfb97..1c6658c 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_config.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_config.c
+@@ -247,6 +247,12 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
+ retry_count = 0;
+
+ retry_config:
++ if (retry_count) {
++ if (retry_count > 2) /* attempt only 2 retries */
++ return -EFAULT;
++ printk(MPT2SAS_INFO_FMT "%s: attempting retry (%d)\n",
++ ioc->name, __func__, retry_count);
++ }
+ wait_state_count = 0;
+ ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
--- /dev/null
+From fcfe6392d18283df3c561b5ef59c330d485ff8ca Mon Sep 17 00:00:00 2001
+From: Kashyap, Desai <kashyap.desai@lsi.com>
+Date: Fri, 7 Aug 2009 19:38:48 +0530
+Subject: SCSI: mpt2sas: fix oops because drv data points to NULL on resume from hibernate
+
+From: Kashyap, Desai <kashyap.desai@lsi.com>
+
+commit fcfe6392d18283df3c561b5ef59c330d485ff8ca upstream.
+
+Fix another ocurring when the system resumes. This oops was due to driver
+setting the pci drvdata to NULL on the prior hibernation. Becuase it was
+set to NULL, upon resmume we assume the pci drvdata is non-zero, and we oops.
+To fix the ooops, we don't set pci drvdata to NULL at hibernation time.
+
+Signed-off-by: Kashyap Desai <kashyap.desai@lsi.com>
+Signed-off-by: James Bottomley <James.Bottomley@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/scsi/mpt2sas/mpt2sas_base.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
+@@ -1171,7 +1171,6 @@ mpt2sas_base_map_resources(struct MPT2SA
+ }
+ }
+
+- pci_set_drvdata(pdev, ioc->shost);
+ _base_mask_interrupts(ioc);
+ r = _base_enable_msix(ioc);
+ if (r)
+@@ -1194,7 +1193,6 @@ mpt2sas_base_map_resources(struct MPT2SA
+ ioc->pci_irq = -1;
+ pci_release_selected_regions(ioc->pdev, ioc->bars);
+ pci_disable_device(pdev);
+- pci_set_drvdata(pdev, NULL);
+ return r;
+ }
+
+@@ -3253,7 +3251,6 @@ mpt2sas_base_free_resources(struct MPT2S
+ ioc->chip_phys = 0;
+ pci_release_selected_regions(ioc->pdev, ioc->bars);
+ pci_disable_device(pdev);
+- pci_set_drvdata(pdev, NULL);
+ return;
+ }
+
+@@ -3275,6 +3272,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPT
+ if (r)
+ return r;
+
++ pci_set_drvdata(ioc->pdev, ioc->shost);
+ r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
+ if (r)
+ goto out_free_resources;
+@@ -3357,6 +3355,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPT
+ ioc->remove_host = 1;
+ mpt2sas_base_free_resources(ioc);
+ _base_release_memory_pools(ioc);
++ pci_set_drvdata(ioc->pdev, NULL);
+ kfree(ioc->tm_cmds.reply);
+ kfree(ioc->transport_cmds.reply);
+ kfree(ioc->config_cmds.reply);
+@@ -3389,6 +3388,7 @@ mpt2sas_base_detach(struct MPT2SAS_ADAPT
+ mpt2sas_base_stop_watchdog(ioc);
+ mpt2sas_base_free_resources(ioc);
+ _base_release_memory_pools(ioc);
++ pci_set_drvdata(ioc->pdev, NULL);
+ kfree(ioc->pfacts);
+ kfree(ioc->ctl_cmds.reply);
+ kfree(ioc->base_cmds.reply);
--- /dev/null
+From 15052c9e85bf0cdadcb69eb89623bf12bad8b4f8 Mon Sep 17 00:00:00 2001
+From: Kashyap, Desai <kashyap.desai@lsi.com>
+Date: Fri, 7 Aug 2009 19:33:17 +0530
+Subject: SCSI: mpt2sas: Introduced check for enclosure_handle to avoid crash
+
+From: Kashyap, Desai <kashyap.desai@lsi.com>
+
+commit 15052c9e85bf0cdadcb69eb89623bf12bad8b4f8 upstream.
+
+Kernel panic is seen because of enclosure_handle received from FW is zero.
+Check is introduced before calling mpt2sas_config_get_enclosure_pg0.
+
+Signed-off-by: Kashyap Desai <kashyap.desai@lsi.com>
+Signed-off-by: James Bottomley <James.Bottomley@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/scsi/mpt2sas/mpt2sas_scsih.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+@@ -3442,12 +3442,11 @@ _scsih_add_device(struct MPT2SAS_ADAPTER
+ sas_device->hidden_raid_component = is_pd;
+
+ /* get enclosure_logical_id */
+- if (!(mpt2sas_config_get_enclosure_pg0(ioc, &mpi_reply, &enclosure_pg0,
+- MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
+- sas_device->enclosure_handle))) {
++ if (sas_device->enclosure_handle && !(mpt2sas_config_get_enclosure_pg0(
++ ioc, &mpi_reply, &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
++ sas_device->enclosure_handle)))
+ sas_device->enclosure_logical_id =
+ le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
+- }
+
+ /* get device name */
+ sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
--- /dev/null
+From 62727a7ba43c0abf2673e3877079c136a9721792 Mon Sep 17 00:00:00 2001
+From: Kashyap, Desai <kashyap.desai@lsi.com>
+Date: Fri, 7 Aug 2009 19:35:18 +0530
+Subject: SCSI: mpt2sas: Raid 10 Value is showing as Raid 1E in /va/log/messages
+
+From: Kashyap, Desai <kashyap.desai@lsi.com>
+
+commit 62727a7ba43c0abf2673e3877079c136a9721792 upstream.
+
+When a volume is activated, the driver will recieve a pair of ir config change
+events to remove the foreign volume, then add the native.
+In the process of the removal event, the hidden raid componet is removed from
+the parent.When the disks is added back, the adding of the port fails becuase
+there is no instance of the device in its parent.
+To fix this issue, the driver needs to call mpt2sas_transport_update_links()
+prior to calling _scsih_add_device. In addition, we added sanity checks on
+volume add and removal to ignore events for foreign volumes.
+
+Signed-off-by: Kashyap Desai <kashyap.desai@lsi.com>
+Signed-off-by: James Bottomley <James.Bottomley@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/scsi/mpt2sas/mpt2sas_scsih.c | 90 ++++++++++++++++++++++++-----------
+ 1 file changed, 64 insertions(+), 26 deletions(-)
+
+--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+@@ -4037,12 +4037,6 @@ _scsih_sas_volume_add(struct MPT2SAS_ADA
+ u16 handle = le16_to_cpu(element->VolDevHandle);
+ int rc;
+
+-#if 0 /* RAID_HACKS */
+- if (le32_to_cpu(event_data->Flags) &
+- MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
+- return;
+-#endif
+-
+ mpt2sas_config_get_volume_wwid(ioc, handle, &wwid);
+ if (!wwid) {
+ printk(MPT2SAS_ERR_FMT
+@@ -4097,12 +4091,6 @@ _scsih_sas_volume_delete(struct MPT2SAS_
+ unsigned long flags;
+ struct MPT2SAS_TARGET *sas_target_priv_data;
+
+-#if 0 /* RAID_HACKS */
+- if (le32_to_cpu(event_data->Flags) &
+- MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
+- return;
+-#endif
+-
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+@@ -4215,14 +4203,38 @@ _scsih_sas_pd_add(struct MPT2SAS_ADAPTER
+ struct _sas_device *sas_device;
+ unsigned long flags;
+ u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
++ Mpi2ConfigReply_t mpi_reply;
++ Mpi2SasDevicePage0_t sas_device_pg0;
++ u32 ioc_status;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+- if (sas_device)
++ if (sas_device) {
+ sas_device->hidden_raid_component = 1;
+- else
+- _scsih_add_device(ioc, handle, 0, 1);
++ return;
++ }
++
++ if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
++ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
++ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
++ ioc->name, __FILE__, __LINE__, __func__);
++ return;
++ }
++
++ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
++ MPI2_IOCSTATUS_MASK;
++ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
++ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
++ ioc->name, __FILE__, __LINE__, __func__);
++ return;
++ }
++
++ _scsih_link_change(ioc,
++ le16_to_cpu(sas_device_pg0.ParentDevHandle),
++ handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
++
++ _scsih_add_device(ioc, handle, 0, 1);
+ }
+
+ #ifdef CONFIG_SCSI_MPT2SAS_LOGGING
+@@ -4322,12 +4334,15 @@ _scsih_sas_ir_config_change_event(struct
+ {
+ Mpi2EventIrConfigElement_t *element;
+ int i;
++ u8 foreign_config;
+
+ #ifdef CONFIG_SCSI_MPT2SAS_LOGGING
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+ _scsih_sas_ir_config_change_event_debug(ioc, event_data);
+
+ #endif
++ foreign_config = (le32_to_cpu(event_data->Flags) &
++ MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
+
+ element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
+ for (i = 0; i < event_data->NumElements; i++, element++) {
+@@ -4335,11 +4350,13 @@ _scsih_sas_ir_config_change_event(struct
+ switch (element->ReasonCode) {
+ case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
+ case MPI2_EVENT_IR_CHANGE_RC_ADDED:
+- _scsih_sas_volume_add(ioc, element);
++ if (!foreign_config)
++ _scsih_sas_volume_add(ioc, element);
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
+ case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
+- _scsih_sas_volume_delete(ioc, element);
++ if (!foreign_config)
++ _scsih_sas_volume_delete(ioc, element);
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
+ _scsih_sas_pd_hide(ioc, element);
+@@ -4458,6 +4475,9 @@ _scsih_sas_ir_physical_disk_event(struct
+ u32 state;
+ struct _sas_device *sas_device;
+ unsigned long flags;
++ Mpi2ConfigReply_t mpi_reply;
++ Mpi2SasDevicePage0_t sas_device_pg0;
++ u32 ioc_status;
+
+ if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
+ return;
+@@ -4474,22 +4494,40 @@ _scsih_sas_ir_physical_disk_event(struct
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ switch (state) {
+-#if 0
+- case MPI2_RAID_PD_STATE_OFFLINE:
+- if (sas_device)
+- _scsih_remove_device(ioc, handle);
+- break;
+-#endif
+ case MPI2_RAID_PD_STATE_ONLINE:
+ case MPI2_RAID_PD_STATE_DEGRADED:
+ case MPI2_RAID_PD_STATE_REBUILDING:
+ case MPI2_RAID_PD_STATE_OPTIMAL:
+- if (sas_device)
++ if (sas_device) {
+ sas_device->hidden_raid_component = 1;
+- else
+- _scsih_add_device(ioc, handle, 0, 1);
++ return;
++ }
++
++ if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply,
++ &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
++ handle))) {
++ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
++ ioc->name, __FILE__, __LINE__, __func__);
++ return;
++ }
++
++ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
++ MPI2_IOCSTATUS_MASK;
++ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
++ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
++ ioc->name, __FILE__, __LINE__, __func__);
++ return;
++ }
++
++ _scsih_link_change(ioc,
++ le16_to_cpu(sas_device_pg0.ParentDevHandle),
++ handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
++
++ _scsih_add_device(ioc, handle, 0, 1);
++
+ break;
+
++ case MPI2_RAID_PD_STATE_OFFLINE:
+ case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
+ case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
+ case MPI2_RAID_PD_STATE_HOT_SPARE:
kvm-fix-ack-not-being-delivered-when-msi-present.patch
kvm-avoid-redelivery-of-edge-interrupt-before-next-edge.patch
kvm-fix-kvm_get_msr_index_list.patch
+iwl3945-fix-rfkill-switch.patch
+iwlagn-do-not-send-key-clear-commands-when-rfkill-enabled.patch
+libata-ocz-vertex-can-t-do-hpa.patch
+scsi-mpt2sas-introduced-check-for-enclosure_handle-to-avoid-crash.patch
+scsi-mpt2sas-expander-fix-oops-saying-already-part-of-another-port.patch
+scsi-mpt2sas-raid-10-value-is-showing-as-raid-1e-in-va-log-messages.patch
+scsi-mpt2sas-excessive-log-info-causes-sas-iounit-page-time-out.patch
+scsi-mpt2sas-fix-infinite-loop-inside-config-request.patch
+scsi-mpt2sas-fix-crash-due-to-watchdog-is-active-while-os-in-standby-mode.patch
+scsi-mpt2sas-fix-oops-because-drv-data-points-to-null-on-resume-from-hibernate.patch
+mpt2sas-fix-config-request-and-diag-reset-deadlock.patch
+do_sigaltstack-avoid-copying-stack_t-as-a-structure-to-user-space.patch
+bug-fix-arch-ia64-kernel-pci-dma.c-fix-recursive-dma_supported-call-in-iommu_dma_supported.patch
+x86-amd-don-t-probe-for-extended-apic-id-if-apics-are-disabled.patch
+ocfs2-initialize-the-cluster-we-re-writing-to-in-a-non-sparse-extend.patch
+acpi-processor-force-throttling-state-when-bios-returns-incorrect-value.patch
+vfs-fix-inode_init_always-calling-convention.patch
+vfs-add-__destroy_inode.patch
+xfs-fix-freeing-of-inodes-not-yet-added-to-the-inode-cache.patch
+xfs-fix-spin_is_locked-assert-on-uni-processor-builds.patch
+gspca-ov534-fix-ov772x.patch
+kthreads-fix-kthread_create-vs-kthread_stop-race.patch
+ipv6-fix-commit-63d9950b08184e6531adceb65f64b429909cc101.patch
--- /dev/null
+From hch@infradead.org Thu Sep 3 15:16:38 2009
+From: Christoph Hellwig <hch@infradead.org>
+Date: Wed, 19 Aug 2009 14:43:00 -0400
+Subject: vfs: add __destroy_inode
+To: stable@kernel.org
+Cc: xfs@oss.sgi.com
+Message-ID: <20090819184512.134691786@bombadil.infradead.org>
+
+From: Christoph Hellwig <hch@infradead.org>
+
+backport of upstream commit 2e00c97e2c1d2ffc9e26252ca26b237678b0b772
+
+When we want to tear down an inode that lost the add to the cache race
+in XFS we must not call into ->destroy_inode because that would delete
+the inode that won the race from the inode cache radix tree.
+
+This patch provides the __destroy_inode helper needed to fix this,
+the actual fix will be in th next patch. As XFS was the only reason
+destroy_inode was exported we shift the export to the new __destroy_inode.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Eric Sandeen <sandeen@sandeen.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/inode.c | 10 +++++++---
+ include/linux/fs.h | 1 +
+ 2 files changed, 8 insertions(+), 3 deletions(-)
+
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -220,18 +220,22 @@ static struct inode *alloc_inode(struct
+ return inode;
+ }
+
+-void destroy_inode(struct inode *inode)
++void __destroy_inode(struct inode *inode)
+ {
+ BUG_ON(inode_has_buffers(inode));
+ ima_inode_free(inode);
+ security_inode_free(inode);
++}
++EXPORT_SYMBOL(__destroy_inode);
++
++void destroy_inode(struct inode *inode)
++{
++ __destroy_inode(inode);
+ if (inode->i_sb->s_op->destroy_inode)
+ inode->i_sb->s_op->destroy_inode(inode);
+ else
+ kmem_cache_free(inode_cachep, (inode));
+ }
+-EXPORT_SYMBOL(destroy_inode);
+-
+
+ /*
+ * These are initializations that only need to be done
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -2162,6 +2162,7 @@ extern void __iget(struct inode * inode)
+ extern void iget_failed(struct inode *);
+ extern void clear_inode(struct inode *);
+ extern void destroy_inode(struct inode *);
++extern void __destroy_inode(struct inode *);
+ extern struct inode *new_inode(struct super_block *);
+ extern int should_remove_suid(struct dentry *);
+ extern int file_remove_suid(struct file *);
--- /dev/null
+From hch@infradead.org Thu Sep 3 15:15:55 2009
+From: Christoph Hellwig <hch@infradead.org>
+Date: Wed, 19 Aug 2009 14:42:59 -0400
+Subject: vfs: fix inode_init_always calling convention
+To: stable@kernel.org
+Cc: xfs@oss.sgi.com
+Message-ID: <20090819184511.968291288@bombadil.infradead.org>
+
+From: Christoph Hellwig <hch@infradead.org>
+
+backport of upstream commit 54e346215e4fe2ca8c94c54e546cc61902060510
+
+Currently inode_init_always calls into ->destroy_inode if the additional
+initialization fails. That's not only counter-intuitive because
+inode_init_always did not allocate the inode structure, but in case of
+XFS it's actively harmful as ->destroy_inode might delete the inode from
+a radix-tree that has never been added. This in turn might end up
+deleting the inode for the same inum that has been instanciated by
+another process and cause lots of cause subtile problems.
+
+Also in the case of re-initializing a reclaimable inode in XFS it would
+free an inode we still want to keep alive.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Eric Sandeen <sandeen@sandeen.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/inode.c | 30 +++++++++++++++++-------------
+ fs/xfs/xfs_iget.c | 17 +++++------------
+ include/linux/fs.h | 2 +-
+ 3 files changed, 23 insertions(+), 26 deletions(-)
+
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -118,12 +118,11 @@ static void wake_up_inode(struct inode *
+ * These are initializations that need to be done on every inode
+ * allocation as the fields are not initialised by slab allocation.
+ */
+-struct inode *inode_init_always(struct super_block *sb, struct inode *inode)
++int inode_init_always(struct super_block *sb, struct inode *inode)
+ {
+ static const struct address_space_operations empty_aops;
+ static struct inode_operations empty_iops;
+ static const struct file_operations empty_fops;
+-
+ struct address_space *const mapping = &inode->i_data;
+
+ inode->i_sb = sb;
+@@ -150,7 +149,7 @@ struct inode *inode_init_always(struct s
+ inode->dirtied_when = 0;
+
+ if (security_inode_alloc(inode))
+- goto out_free_inode;
++ goto out;
+
+ /* allocate and initialize an i_integrity */
+ if (ima_inode_alloc(inode))
+@@ -189,16 +188,12 @@ struct inode *inode_init_always(struct s
+ inode->i_private = NULL;
+ inode->i_mapping = mapping;
+
+- return inode;
++ return 0;
+
+ out_free_security:
+ security_inode_free(inode);
+-out_free_inode:
+- if (inode->i_sb->s_op->destroy_inode)
+- inode->i_sb->s_op->destroy_inode(inode);
+- else
+- kmem_cache_free(inode_cachep, (inode));
+- return NULL;
++out:
++ return -ENOMEM;
+ }
+ EXPORT_SYMBOL(inode_init_always);
+
+@@ -211,9 +206,18 @@ static struct inode *alloc_inode(struct
+ else
+ inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);
+
+- if (inode)
+- return inode_init_always(sb, inode);
+- return NULL;
++ if (!inode)
++ return NULL;
++
++ if (unlikely(inode_init_always(sb, inode))) {
++ if (inode->i_sb->s_op->destroy_inode)
++ inode->i_sb->s_op->destroy_inode(inode);
++ else
++ kmem_cache_free(inode_cachep, inode);
++ return NULL;
++ }
++
++ return inode;
+ }
+
+ void destroy_inode(struct inode *inode)
+--- a/fs/xfs/xfs_iget.c
++++ b/fs/xfs/xfs_iget.c
+@@ -63,6 +63,10 @@ xfs_inode_alloc(
+ ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
+ if (!ip)
+ return NULL;
++ if (inode_init_always(mp->m_super, VFS_I(ip))) {
++ kmem_zone_free(xfs_inode_zone, ip);
++ return NULL;
++ }
+
+ ASSERT(atomic_read(&ip->i_iocount) == 0);
+ ASSERT(atomic_read(&ip->i_pincount) == 0);
+@@ -104,17 +108,6 @@ xfs_inode_alloc(
+ #ifdef XFS_DIR2_TRACE
+ ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_NOFS);
+ #endif
+- /*
+- * Now initialise the VFS inode. We do this after the xfs_inode
+- * initialisation as internal failures will result in ->destroy_inode
+- * being called and that will pass down through the reclaim path and
+- * free the XFS inode. This path requires the XFS inode to already be
+- * initialised. Hence if this call fails, the xfs_inode has already
+- * been freed and we should not reference it at all in the error
+- * handling.
+- */
+- if (!inode_init_always(mp->m_super, VFS_I(ip)))
+- return NULL;
+
+ /* prevent anyone from using this yet */
+ VFS_I(ip)->i_state = I_NEW|I_LOCK;
+@@ -166,7 +159,7 @@ xfs_iget_cache_hit(
+ * errors cleanly, then tag it so it can be set up correctly
+ * later.
+ */
+- if (!inode_init_always(mp->m_super, VFS_I(ip))) {
++ if (inode_init_always(mp->m_super, VFS_I(ip))) {
+ error = ENOMEM;
+ goto out_error;
+ }
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -2135,7 +2135,7 @@ extern loff_t default_llseek(struct file
+
+ extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin);
+
+-extern struct inode * inode_init_always(struct super_block *, struct inode *);
++extern int inode_init_always(struct super_block *, struct inode *);
+ extern void inode_init_once(struct inode *);
+ extern void inode_add_to_lists(struct super_block *, struct inode *);
+ extern void iput(struct inode *);
--- /dev/null
+From 2cb078603abb612e3bcd428fb8122c3d39e08832 Mon Sep 17 00:00:00 2001
+From: Jeremy Fitzhardinge <jeremy@goop.org>
+Date: Wed, 22 Jul 2009 09:59:35 -0700
+Subject: x86, amd: Don't probe for extended APIC ID if APICs are disabled
+
+From: Jeremy Fitzhardinge <jeremy@goop.org>
+
+commit 2cb078603abb612e3bcd428fb8122c3d39e08832 upstream.
+
+If we've logically disabled apics, don't probe the PCI space for the
+AMD extended APIC ID.
+
+[ Impact: prevent boot crash under Xen. ]
+
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
+Reported-by: Bastian Blank <bastian@waldi.eu.org>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Cc: Andreas Herrmann <andreas.herrmann3@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/cpu/amd.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -356,7 +356,7 @@ static void __cpuinit early_init_amd(str
+ #endif
+ #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
+ /* check CPU config space for extended APIC ID */
+- if (c->x86 >= 0xf) {
++ if (cpu_has_apic && c->x86 >= 0xf) {
+ unsigned int val;
+ val = read_pci_config(0, 24, 0, 0x68);
+ if ((val & ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18)))
--- /dev/null
+From hch@infradead.org Thu Sep 3 15:17:19 2009
+From: Christoph Hellwig <hch@infradead.org>
+Date: Wed, 19 Aug 2009 14:43:01 -0400
+Subject: xfs: fix freeing of inodes not yet added to the inode cache
+To: stable@kernel.org
+Cc: xfs@oss.sgi.com
+Message-ID: <20090819184512.292693252@bombadil.infradead.org>
+
+From: Christoph Hellwig <hch@infradead.org>
+
+upstream commit b36ec0428a06fcbdb67d61e9e664154e5dd9a8c7
+
+When freeing an inode that lost race getting added to the inode cache we
+must not call into ->destroy_inode, because that would delete the inode
+that won the race from the inode cache radix tree.
+
+This patch uses splits a new xfs_inode_free helper out of xfs_ireclaim
+and uses that plus __destroy_inode to make sure we really only free
+the memory allocted for the inode that lost the race, and not mess with
+the inode cache state.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Eric Sandeen <sandeen@sandeen.net>
+Reported-by: Alex Samad <alex@samad.com.au>
+Reported-by: Andrew Randrianasulu <randrik@mail.ru>
+Reported-by: Stephane <sharnois@max-t.com>
+Reported-by: Tommy <tommy@news-service.com>
+Reported-by: Miah Gregory <mace@darksilence.net>
+Reported-by: Gabriel Barazer <gabriel@oxeva.fr>
+Reported-by: Leandro Lucarella <llucax@gmail.com>
+Reported-by: Daniel Burr <dburr@fami.com.au>
+Reported-by: Nickolay <newmail@spaces.ru>
+Reported-by: Michael Guntsche <mike@it-loops.com>
+Reported-by: Dan Carley <dan.carley+linuxkern-bugs@gmail.com>
+Reported-by: Michael Ole Olsen <gnu@gmx.net>
+Reported-by: Michael Weissenbacher <mw@dermichi.com>
+Reported-by: Martin Spott <Martin.Spott@mgras.net>
+Reported-by: Christian Kujau <lists@nerdbynature.de>
+Tested-by: Michael Guntsche <mike@it-loops.com>
+Tested-by: Dan Carley <dan.carley+linuxkern-bugs@gmail.com>
+Tested-by: Christian Kujau <lists@nerdbynature.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/xfs/xfs_iget.c | 125 ++++++++++++++++++++++++++++-------------------------
+ fs/xfs/xfs_inode.h | 17 -------
+ 2 files changed, 68 insertions(+), 74 deletions(-)
+
+--- a/fs/xfs/xfs_iget.c
++++ b/fs/xfs/xfs_iget.c
+@@ -115,6 +115,71 @@ xfs_inode_alloc(
+ return ip;
+ }
+
++STATIC void
++xfs_inode_free(
++ struct xfs_inode *ip)
++{
++ switch (ip->i_d.di_mode & S_IFMT) {
++ case S_IFREG:
++ case S_IFDIR:
++ case S_IFLNK:
++ xfs_idestroy_fork(ip, XFS_DATA_FORK);
++ break;
++ }
++
++ if (ip->i_afp)
++ xfs_idestroy_fork(ip, XFS_ATTR_FORK);
++
++#ifdef XFS_INODE_TRACE
++ ktrace_free(ip->i_trace);
++#endif
++#ifdef XFS_BMAP_TRACE
++ ktrace_free(ip->i_xtrace);
++#endif
++#ifdef XFS_BTREE_TRACE
++ ktrace_free(ip->i_btrace);
++#endif
++#ifdef XFS_RW_TRACE
++ ktrace_free(ip->i_rwtrace);
++#endif
++#ifdef XFS_ILOCK_TRACE
++ ktrace_free(ip->i_lock_trace);
++#endif
++#ifdef XFS_DIR2_TRACE
++ ktrace_free(ip->i_dir_trace);
++#endif
++
++ if (ip->i_itemp) {
++ /*
++ * Only if we are shutting down the fs will we see an
++ * inode still in the AIL. If it is there, we should remove
++ * it to prevent a use-after-free from occurring.
++ */
++ xfs_log_item_t *lip = &ip->i_itemp->ili_item;
++ struct xfs_ail *ailp = lip->li_ailp;
++
++ ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
++ XFS_FORCED_SHUTDOWN(ip->i_mount));
++ if (lip->li_flags & XFS_LI_IN_AIL) {
++ spin_lock(&ailp->xa_lock);
++ if (lip->li_flags & XFS_LI_IN_AIL)
++ xfs_trans_ail_delete(ailp, lip);
++ else
++ spin_unlock(&ailp->xa_lock);
++ }
++ xfs_inode_item_destroy(ip);
++ ip->i_itemp = NULL;
++ }
++
++ /* asserts to verify all state is correct here */
++ ASSERT(atomic_read(&ip->i_iocount) == 0);
++ ASSERT(atomic_read(&ip->i_pincount) == 0);
++ ASSERT(!spin_is_locked(&ip->i_flags_lock));
++ ASSERT(completion_done(&ip->i_flush));
++
++ kmem_zone_free(xfs_inode_zone, ip);
++}
++
+ /*
+ * Check the validity of the inode we just found it the cache
+ */
+@@ -291,7 +356,8 @@ out_preload_end:
+ if (lock_flags)
+ xfs_iunlock(ip, lock_flags);
+ out_destroy:
+- xfs_destroy_inode(ip);
++ __destroy_inode(VFS_I(ip));
++ xfs_inode_free(ip);
+ return error;
+ }
+
+@@ -499,62 +565,7 @@ xfs_ireclaim(
+ XFS_QM_DQDETACH(ip->i_mount, ip);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
+
+- switch (ip->i_d.di_mode & S_IFMT) {
+- case S_IFREG:
+- case S_IFDIR:
+- case S_IFLNK:
+- xfs_idestroy_fork(ip, XFS_DATA_FORK);
+- break;
+- }
+-
+- if (ip->i_afp)
+- xfs_idestroy_fork(ip, XFS_ATTR_FORK);
+-
+-#ifdef XFS_INODE_TRACE
+- ktrace_free(ip->i_trace);
+-#endif
+-#ifdef XFS_BMAP_TRACE
+- ktrace_free(ip->i_xtrace);
+-#endif
+-#ifdef XFS_BTREE_TRACE
+- ktrace_free(ip->i_btrace);
+-#endif
+-#ifdef XFS_RW_TRACE
+- ktrace_free(ip->i_rwtrace);
+-#endif
+-#ifdef XFS_ILOCK_TRACE
+- ktrace_free(ip->i_lock_trace);
+-#endif
+-#ifdef XFS_DIR2_TRACE
+- ktrace_free(ip->i_dir_trace);
+-#endif
+- if (ip->i_itemp) {
+- /*
+- * Only if we are shutting down the fs will we see an
+- * inode still in the AIL. If it is there, we should remove
+- * it to prevent a use-after-free from occurring.
+- */
+- xfs_log_item_t *lip = &ip->i_itemp->ili_item;
+- struct xfs_ail *ailp = lip->li_ailp;
+-
+- ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
+- XFS_FORCED_SHUTDOWN(ip->i_mount));
+- if (lip->li_flags & XFS_LI_IN_AIL) {
+- spin_lock(&ailp->xa_lock);
+- if (lip->li_flags & XFS_LI_IN_AIL)
+- xfs_trans_ail_delete(ailp, lip);
+- else
+- spin_unlock(&ailp->xa_lock);
+- }
+- xfs_inode_item_destroy(ip);
+- ip->i_itemp = NULL;
+- }
+- /* asserts to verify all state is correct here */
+- ASSERT(atomic_read(&ip->i_iocount) == 0);
+- ASSERT(atomic_read(&ip->i_pincount) == 0);
+- ASSERT(!spin_is_locked(&ip->i_flags_lock));
+- ASSERT(completion_done(&ip->i_flush));
+- kmem_zone_free(xfs_inode_zone, ip);
++ xfs_inode_free(ip);
+ }
+
+ /*
+--- a/fs/xfs/xfs_inode.h
++++ b/fs/xfs/xfs_inode.h
+@@ -309,23 +309,6 @@ static inline struct inode *VFS_I(struct
+ }
+
+ /*
+- * Get rid of a partially initialized inode.
+- *
+- * We have to go through destroy_inode to make sure allocations
+- * from init_inode_always like the security data are undone.
+- *
+- * We mark the inode bad so that it takes the short cut in
+- * the reclaim path instead of going through the flush path
+- * which doesn't make sense for an inode that has never seen the
+- * light of day.
+- */
+-static inline void xfs_destroy_inode(struct xfs_inode *ip)
+-{
+- make_bad_inode(VFS_I(ip));
+- return destroy_inode(VFS_I(ip));
+-}
+-
+-/*
+ * i_flags helper functions
+ */
+ static inline void
--- /dev/null
+From hch@infradead.org Thu Sep 3 15:18:02 2009
+From: Christoph Hellwig <hch@infradead.org>
+Date: Wed, 19 Aug 2009 14:43:02 -0400
+Subject: xfs: fix spin_is_locked assert on uni-processor builds
+To: stable@kernel.org
+Cc: Felix Blyakher <felixb@sgi.com>, xfs@oss.sgi.com
+Message-ID: <20090819184512.452411477@bombadil.infradead.org>
+
+From: Christoph Hellwig <hch@infradead.org>
+
+upstream commit a8914f3a6d72c97328597a556a99daaf5cc288ae
+
+Without SMP or preemption spin_is_locked always returns false,
+so we can't do an assert with it. Instead use assert_spin_locked,
+which does the right thing on all builds.
+
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Eric Sandeen <sandeen@sandeen.net>
+Reported-by: Johannes Engel <jcnengel@googlemail.com>
+Tested-by: Johannes Engel <jcnengel@googlemail.com>
+Signed-off-by: Felix Blyakher <felixb@sgi.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/xfs/xfs_log.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/xfs/xfs_log.c
++++ b/fs/xfs/xfs_log.c
+@@ -3180,7 +3180,7 @@ try_again:
+ STATIC void
+ xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog)
+ {
+- ASSERT(spin_is_locked(&log->l_icloglock));
++ assert_spin_locked(&log->l_icloglock);
+
+ if (iclog->ic_state == XLOG_STATE_ACTIVE) {
+ xlog_state_switch_iclogs(log, iclog, 0);