--- /dev/null
+From bdc731bc5fcd1794e9ac8ac80c389d302381c123 Mon Sep 17 00:00:00 2001
+From: Stefan Bader <stefan.bader@canonical.com>
+Date: Mon, 21 Dec 2009 16:20:04 -0800
+Subject: acerhdf: limit modalias matching to supported
+
+From: Stefan Bader <stefan.bader@canonical.com>
+
+commit bdc731bc5fcd1794e9ac8ac80c389d302381c123 upstream.
+
+BugLink: https://bugs.launchpad.net/ubuntu/+bug/435958
+
+The module alias currently matches any Acer computer but when loaded the
+BIOS checks will only succeed on Aspire One models. This causes a invalid
+BIOS warning for all other models (seen on Aspire 4810T). This is not
+fatal but worries users that see this message. Limiting the moule alias
+to models starting with AOA or DOA for Packard Bell.
+
+Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
+Acked-by: Borislav Petkov <petkovbb@gmail.com>
+Acked-by: Peter Feuerer <peter@piie.net>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Len Brown <len.brown@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/platform/x86/acerhdf.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/drivers/platform/x86/acerhdf.c
++++ b/drivers/platform/x86/acerhdf.c
+@@ -640,9 +640,10 @@ static void __exit acerhdf_exit(void)
+ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Peter Feuerer");
+ MODULE_DESCRIPTION("Aspire One temperature and fan driver");
+-MODULE_ALIAS("dmi:*:*Acer*:*:");
+-MODULE_ALIAS("dmi:*:*Gateway*:*:");
+-MODULE_ALIAS("dmi:*:*Packard Bell*:*:");
++MODULE_ALIAS("dmi:*:*Acer*:pnAOA*:");
++MODULE_ALIAS("dmi:*:*Gateway*:pnAOA*:");
++MODULE_ALIAS("dmi:*:*Packard Bell*:pnAOA*:");
++MODULE_ALIAS("dmi:*:*Packard Bell*:pnDOA*:");
+
+ module_init(acerhdf_init);
+ module_exit(acerhdf_exit);
--- /dev/null
+From 55b313f249e11b815fd0be51869f166aaf368f44 Mon Sep 17 00:00:00 2001
+From: Alexey Starikovskiy <astarikovskiy@suse.de>
+Date: Tue, 22 Dec 2009 02:42:52 -0500
+Subject: ACPI: EC: Fix MSI DMI detection
+
+From: Alexey Starikovskiy <astarikovskiy@suse.de>
+
+commit 55b313f249e11b815fd0be51869f166aaf368f44 upstream.
+
+MSI strings should be ORed, not ANDed.
+
+Reference: http://bugzilla.kernel.org/show_bug.cgi?id=14446
+
+Signed-off-by: Alexey Starikovskiy <astarikovskiy@suse.de>
+Signed-off-by: Len Brown <len.brown@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/acpi/ec.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -916,6 +916,7 @@ static int ec_validate_ecdt(const struct
+ /* MSI EC needs special treatment, enable it */
+ static int ec_flag_msi(const struct dmi_system_id *id)
+ {
++ printk(KERN_DEBUG PREFIX "Detected MSI hardware, enabling workarounds.\n");
+ EC_FLAGS_MSI = 1;
+ EC_FLAGS_VALIDATE_ECDT = 1;
+ return 0;
+@@ -928,8 +929,13 @@ static struct dmi_system_id __initdata e
+ DMI_MATCH(DMI_BOARD_NAME, "JFL92") }, NULL},
+ {
+ ec_flag_msi, "MSI hardware", {
+- DMI_MATCH(DMI_BIOS_VENDOR, "Micro-Star"),
+- DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star") }, NULL},
++ DMI_MATCH(DMI_BIOS_VENDOR, "Micro-Star")}, NULL},
++ {
++ ec_flag_msi, "MSI hardware", {
++ DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star")}, NULL},
++ {
++ ec_flag_msi, "MSI hardware", {
++ DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star")}, NULL},
+ {
+ ec_validate_ecdt, "ASUS hardware", {
+ DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL},
--- /dev/null
+From 13c199c0d0cf78b27592991129fb8cbcfc5164de Mon Sep 17 00:00:00 2001
+From: Zhao Yakui <yakui.zhao@intel.com>
+Date: Tue, 15 Dec 2009 22:01:57 +0800
+Subject: ACPI: Use the return result of ACPI lid notifier chain correctly
+
+From: Zhao Yakui <yakui.zhao@intel.com>
+
+commit 13c199c0d0cf78b27592991129fb8cbcfc5164de upstream.
+
+On some laptops it will return NOTIFY_OK(non-zero) when calling the ACPI LID
+notifier. Then it is used as the result of ACPI LID resume function, which
+will complain the following warning message in course of suspend/resume:
+
+ >PM: Device PNP0C0D:00 failed to resume: error 1
+
+This patch is to eliminate the above warning message.
+
+http://bugzilla.kernel.org/show_bug.cgi?id=14782
+
+Signed-off-by: Zhao Yakui <yakui.zhao@intel.com>
+Signed-off-by: Len Brown <len.brown@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/acpi/button.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/drivers/acpi/button.c
++++ b/drivers/acpi/button.c
+@@ -282,6 +282,13 @@ static int acpi_lid_send_state(struct ac
+ if (ret == NOTIFY_DONE)
+ ret = blocking_notifier_call_chain(&acpi_lid_notifier, state,
+ device);
++ if (ret == NOTIFY_DONE || ret == NOTIFY_OK) {
++ /*
++ * It is also regarded as success if the notifier_chain
++ * returns NOTIFY_OK or NOTIFY_DONE.
++ */
++ ret = 0;
++ }
+ return ret;
+ }
+
--- /dev/null
+From b462707e7ccad058ae151e5c5b06eb5cadcb737f Mon Sep 17 00:00:00 2001
+From: Dmitry Monakhov <dmonakhov@openvz.org>
+Date: Mon, 14 Dec 2009 15:21:12 +0300
+Subject: Add unlocked version of inode_add_bytes() function
+
+From: Dmitry Monakhov <dmonakhov@openvz.org>
+
+commit b462707e7ccad058ae151e5c5b06eb5cadcb737f upstream.
+
+Quota code requires unlocked version of this function. Off course
+we can just copy-paste the code, but copy-pasting is always an evil.
+
+Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/stat.c | 10 ++++++++--
+ include/linux/fs.h | 1 +
+ 2 files changed, 9 insertions(+), 2 deletions(-)
+
+--- a/fs/stat.c
++++ b/fs/stat.c
+@@ -401,9 +401,9 @@ SYSCALL_DEFINE4(fstatat64, int, dfd, cha
+ }
+ #endif /* __ARCH_WANT_STAT64 */
+
+-void inode_add_bytes(struct inode *inode, loff_t bytes)
++/* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
++void __inode_add_bytes(struct inode *inode, loff_t bytes)
+ {
+- spin_lock(&inode->i_lock);
+ inode->i_blocks += bytes >> 9;
+ bytes &= 511;
+ inode->i_bytes += bytes;
+@@ -411,6 +411,12 @@ void inode_add_bytes(struct inode *inode
+ inode->i_blocks++;
+ inode->i_bytes -= 512;
+ }
++}
++
++void inode_add_bytes(struct inode *inode, loff_t bytes)
++{
++ spin_lock(&inode->i_lock);
++ __inode_add_bytes(inode, bytes);
+ spin_unlock(&inode->i_lock);
+ }
+
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -2314,6 +2314,7 @@ extern const struct inode_operations pag
+ extern int generic_readlink(struct dentry *, char __user *, int);
+ extern void generic_fillattr(struct inode *, struct kstat *);
+ extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
++void __inode_add_bytes(struct inode *inode, loff_t bytes);
+ void inode_add_bytes(struct inode *inode, loff_t bytes);
+ void inode_sub_bytes(struct inode *inode, loff_t bytes);
+ loff_t inode_get_bytes(struct inode *inode);
--- /dev/null
+From 035eb0cff0671ada49ba9f3e5c9e7b0cb950efea Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Thu, 17 Dec 2009 15:00:26 +0100
+Subject: ALSA: hda - Fix missing capsrc_nids for ALC88x
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit 035eb0cff0671ada49ba9f3e5c9e7b0cb950efea upstream.
+
+Some model quirks missed the corresponding capsrc_nids. This resulted in
+non-working capture source selection.
+
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ sound/pci/hda/patch_realtek.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9141,6 +9141,8 @@ static struct alc_config_preset alc882_p
+ .dac_nids = alc883_dac_nids,
+ .num_adc_nids = ARRAY_SIZE(alc889_adc_nids),
+ .adc_nids = alc889_adc_nids,
++ .capsrc_nids = alc889_capsrc_nids,
++ .capsrc_nids = alc889_capsrc_nids,
+ .dig_out_nid = ALC883_DIGOUT_NID,
+ .dig_in_nid = ALC883_DIGIN_NID,
+ .slave_dig_outs = alc883_slave_dig_outs,
+@@ -9187,6 +9189,7 @@ static struct alc_config_preset alc882_p
+ .dac_nids = alc883_dac_nids,
+ .adc_nids = alc883_adc_nids_alt,
+ .num_adc_nids = ARRAY_SIZE(alc883_adc_nids_alt),
++ .capsrc_nids = alc883_capsrc_nids,
+ .dig_out_nid = ALC883_DIGOUT_NID,
+ .num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes),
+ .channel_mode = alc883_3ST_2ch_modes,
+@@ -9333,6 +9336,7 @@ static struct alc_config_preset alc882_p
+ .dac_nids = alc883_dac_nids,
+ .adc_nids = alc883_adc_nids_alt,
+ .num_adc_nids = ARRAY_SIZE(alc883_adc_nids_alt),
++ .capsrc_nids = alc883_capsrc_nids,
+ .num_channel_mode = ARRAY_SIZE(alc883_sixstack_modes),
+ .channel_mode = alc883_sixstack_modes,
+ .input_mux = &alc883_capture_source,
+@@ -9394,6 +9398,7 @@ static struct alc_config_preset alc882_p
+ .dac_nids = alc883_dac_nids,
+ .adc_nids = alc883_adc_nids_alt,
+ .num_adc_nids = ARRAY_SIZE(alc883_adc_nids_alt),
++ .capsrc_nids = alc883_capsrc_nids,
+ .num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes),
+ .channel_mode = alc883_3ST_2ch_modes,
+ .input_mux = &alc883_lenovo_101e_capture_source,
+@@ -9573,6 +9578,7 @@ static struct alc_config_preset alc882_p
+ alc880_gpio1_init_verbs },
+ .adc_nids = alc883_adc_nids,
+ .num_adc_nids = ARRAY_SIZE(alc883_adc_nids),
++ .capsrc_nids = alc883_capsrc_nids,
+ .dac_nids = alc883_dac_nids,
+ .num_dacs = ARRAY_SIZE(alc883_dac_nids),
+ .channel_mode = alc889A_mb31_6ch_modes,
--- /dev/null
+From 8f68ed9728193b1f2fb53ba06031b06bd8b3d1b4 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <borislav.petkov@amd.com>
+Date: Mon, 21 Dec 2009 15:15:59 +0100
+Subject: amd64_edac: fix driver instance freeing
+
+From: Borislav Petkov <borislav.petkov@amd.com>
+
+commit 8f68ed9728193b1f2fb53ba06031b06bd8b3d1b4 upstream.
+
+Fix use-after-free errors by pushing all memory-freeing calls to the end
+of amd64_remove_one_instance().
+
+Reported-by: Darren Jenkins <darrenrjenkins@gmail.com>
+LKML-Reference: <1261370306.11354.52.camel@ICE-BOX>
+Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/edac/amd64_edac.c | 9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+--- a/drivers/edac/amd64_edac.c
++++ b/drivers/edac/amd64_edac.c
+@@ -3082,16 +3082,15 @@ static void __devexit amd64_remove_one_i
+
+ amd64_free_mc_sibling_devices(pvt);
+
+- kfree(pvt);
+- mci->pvt_info = NULL;
+-
+- mci_lookup[pvt->mc_node_id] = NULL;
+-
+ /* unregister from EDAC MCE */
+ amd_report_gart_errors(false);
+ amd_unregister_ecc_decoder(amd64_decode_bus_error);
+
+ /* Free the EDAC CORE resources */
++ mci->pvt_info = NULL;
++ mci_lookup[pvt->mc_node_id] = NULL;
++
++ kfree(pvt);
+ edac_mc_free(mci);
+ }
+
--- /dev/null
+From 43f5e68733cfe8bed3c30b5c14c4993dffb29766 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <borislav.petkov@amd.com>
+Date: Mon, 21 Dec 2009 18:55:18 +0100
+Subject: amd64_edac: fix forcing module load/unload
+
+From: Borislav Petkov <borislav.petkov@amd.com>
+
+commit 43f5e68733cfe8bed3c30b5c14c4993dffb29766 upstream.
+
+Clear the override flag after force-loading the module.
+
+Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/edac/amd64_edac.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/edac/amd64_edac.c
++++ b/drivers/edac/amd64_edac.c
+@@ -2836,9 +2836,8 @@ static int amd64_check_ecc_enabled(struc
+ amd64_printk(KERN_WARNING, "%s", ecc_warning);
+ return -ENODEV;
+ }
+- } else
+- /* CLEAR the override, since BIOS controlled it */
+ ecc_enable_override = 0;
++ }
+
+ return 0;
+ }
--- /dev/null
+From 56b34b91e22313294154cee0c16e294cf8a45b61 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <borislav.petkov@amd.com>
+Date: Mon, 21 Dec 2009 18:13:01 +0100
+Subject: amd64_edac: make driver loading more robust
+
+From: Borislav Petkov <borislav.petkov@amd.com>
+
+commit 56b34b91e22313294154cee0c16e294cf8a45b61 upstream.
+
+Currently, the module does not initialize fully when the DIMMs aren't
+ECC but remains still loaded. Propagate the error when no instance of
+the driver is properly initialized and prevent further loading.
+
+Reorganize and polish error handling in amd64_edac_init() while at it.
+
+Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/edac/amd64_edac.c | 23 ++++++++++++++++-------
+ 1 file changed, 16 insertions(+), 7 deletions(-)
+
+--- a/drivers/edac/amd64_edac.c
++++ b/drivers/edac/amd64_edac.c
+@@ -3167,25 +3167,29 @@ static void amd64_setup_pci_device(void)
+ static int __init amd64_edac_init(void)
+ {
+ int nb, err = -ENODEV;
++ bool load_ok = false;
+
+ edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n");
+
+ opstate_init();
+
+ if (cache_k8_northbridges() < 0)
+- return err;
++ goto err_ret;
+
+ msrs = msrs_alloc();
++ if (!msrs)
++ goto err_ret;
+
+ err = pci_register_driver(&amd64_pci_driver);
+ if (err)
+- return err;
++ goto err_pci;
+
+ /*
+ * At this point, the array 'pvt_lookup[]' contains pointers to alloc'd
+ * amd64_pvt structs. These will be used in the 2nd stage init function
+ * to finish initialization of the MC instances.
+ */
++ err = -ENODEV;
+ for (nb = 0; nb < num_k8_northbridges; nb++) {
+ if (!pvt_lookup[nb])
+ continue;
+@@ -3193,16 +3197,21 @@ static int __init amd64_edac_init(void)
+ err = amd64_init_2nd_stage(pvt_lookup[nb]);
+ if (err)
+ goto err_2nd_stage;
+- }
+
+- amd64_setup_pci_device();
++ load_ok = true;
++ }
+
+- return 0;
++ if (load_ok) {
++ amd64_setup_pci_device();
++ return 0;
++ }
+
+ err_2nd_stage:
+- debugf0("2nd stage failed\n");
+ pci_unregister_driver(&amd64_pci_driver);
+-
++err_pci:
++ msrs_free(msrs);
++ msrs = NULL;
++err_ret:
+ return err;
+ }
+
--- /dev/null
+From f6d6ae965760906d79ab29bc38507608c5971549 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <borislav.petkov@amd.com>
+Date: Tue, 3 Nov 2009 15:29:26 +0100
+Subject: amd64_edac: unify MCGCTL ECC switching
+
+From: Borislav Petkov <borislav.petkov@amd.com>
+
+commit f6d6ae965760906d79ab29bc38507608c5971549 upstream.
+
+Unify almost identical code into one function and remove NUMA-specific
+usage (specifically cpumask_of_node()) in favor of generic topology
+methods.
+
+Remove unused defines, while at it.
+
+Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/edac/amd64_edac.c | 204 +++++++++++++++++++++++++---------------------
+ drivers/edac/amd64_edac.h | 9 --
+ 2 files changed, 117 insertions(+), 96 deletions(-)
+
+--- a/drivers/edac/amd64_edac.c
++++ b/drivers/edac/amd64_edac.c
+@@ -2618,6 +2618,109 @@ static int amd64_init_csrows(struct mem_
+ return empty;
+ }
+
++/* get all cores on this DCT */
++static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
++{
++ int cpu;
++
++ for_each_online_cpu(cpu)
++ if (amd_get_nb_id(cpu) == nid)
++ cpumask_set_cpu(cpu, mask);
++}
++
++/* check MCG_CTL on all the cpus on this node */
++static bool amd64_nb_mce_bank_enabled_on_node(int nid)
++{
++ cpumask_var_t mask;
++ struct msr *msrs;
++ int cpu, nbe, idx = 0;
++ bool ret = false;
++
++ if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
++ amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
++ __func__);
++ return false;
++ }
++
++ get_cpus_on_this_dct_cpumask(mask, nid);
++
++ msrs = kzalloc(sizeof(struct msr) * cpumask_weight(mask), GFP_KERNEL);
++ if (!msrs) {
++ amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
++ __func__);
++ free_cpumask_var(mask);
++ return false;
++ }
++
++ rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
++
++ for_each_cpu(cpu, mask) {
++ nbe = msrs[idx].l & K8_MSR_MCGCTL_NBE;
++
++ debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
++ cpu, msrs[idx].q,
++ (nbe ? "enabled" : "disabled"));
++
++ if (!nbe)
++ goto out;
++
++ idx++;
++ }
++ ret = true;
++
++out:
++ kfree(msrs);
++ free_cpumask_var(mask);
++ return ret;
++}
++
++static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
++{
++ cpumask_var_t cmask;
++ struct msr *msrs = NULL;
++ int cpu, idx = 0;
++
++ if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
++ amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
++ __func__);
++ return false;
++ }
++
++ get_cpus_on_this_dct_cpumask(cmask, pvt->mc_node_id);
++
++ msrs = kzalloc(sizeof(struct msr) * cpumask_weight(cmask), GFP_KERNEL);
++ if (!msrs) {
++ amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
++ __func__);
++ return -ENOMEM;
++ }
++
++ rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
++
++ for_each_cpu(cpu, cmask) {
++
++ if (on) {
++ if (msrs[idx].l & K8_MSR_MCGCTL_NBE)
++ pvt->flags.ecc_report = 1;
++
++ msrs[idx].l |= K8_MSR_MCGCTL_NBE;
++ } else {
++ /*
++ * Turn off ECC reporting only when it was off before
++ */
++ if (!pvt->flags.ecc_report)
++ msrs[idx].l &= ~K8_MSR_MCGCTL_NBE;
++ }
++ idx++;
++ }
++ wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
++
++ kfree(msrs);
++ free_cpumask_var(cmask);
++
++ return 0;
++}
++
+ /*
+ * Only if 'ecc_enable_override' is set AND BIOS had ECC disabled, do "we"
+ * enable it.
+@@ -2625,17 +2728,12 @@ static int amd64_init_csrows(struct mem_
+ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
+ {
+ struct amd64_pvt *pvt = mci->pvt_info;
+- const struct cpumask *cpumask = cpumask_of_node(pvt->mc_node_id);
+- int cpu, idx = 0, err = 0;
+- struct msr msrs[cpumask_weight(cpumask)];
+- u32 value;
+- u32 mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
++ int err = 0;
++ u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
+
+ if (!ecc_enable_override)
+ return;
+
+- memset(msrs, 0, sizeof(msrs));
+-
+ amd64_printk(KERN_WARNING,
+ "'ecc_enable_override' parameter is active, "
+ "Enabling AMD ECC hardware now: CAUTION\n");
+@@ -2651,16 +2749,9 @@ static void amd64_enable_ecc_error_repor
+ value |= mask;
+ pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
+
+- rdmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
+-
+- for_each_cpu(cpu, cpumask) {
+- if (msrs[idx].l & K8_MSR_MCGCTL_NBE)
+- set_bit(idx, &pvt->old_mcgctl);
+-
+- msrs[idx].l |= K8_MSR_MCGCTL_NBE;
+- idx++;
+- }
+- wrmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
++ if (amd64_toggle_ecc_err_reporting(pvt, ON))
++ amd64_printk(KERN_WARNING, "Error enabling ECC reporting over "
++ "MCGCTL!\n");
+
+ err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value);
+ if (err)
+@@ -2701,17 +2792,12 @@ static void amd64_enable_ecc_error_repor
+
+ static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
+ {
+- const struct cpumask *cpumask = cpumask_of_node(pvt->mc_node_id);
+- int cpu, idx = 0, err = 0;
+- struct msr msrs[cpumask_weight(cpumask)];
+- u32 value;
+- u32 mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
++ int err = 0;
++ u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
+
+ if (!pvt->nbctl_mcgctl_saved)
+ return;
+
+- memset(msrs, 0, sizeof(msrs));
+-
+ err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCTL, &value);
+ if (err)
+ debugf0("Reading K8_NBCTL failed\n");
+@@ -2721,72 +2807,9 @@ static void amd64_restore_ecc_error_repo
+ /* restore the NB Enable MCGCTL bit */
+ pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
+
+- rdmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
+-
+- for_each_cpu(cpu, cpumask) {
+- msrs[idx].l &= ~K8_MSR_MCGCTL_NBE;
+- msrs[idx].l |=
+- test_bit(idx, &pvt->old_mcgctl) << K8_MSR_MCGCTL_NBE;
+- idx++;
+- }
+-
+- wrmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
+-}
+-
+-/* get all cores on this DCT */
+-static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
+-{
+- int cpu;
+-
+- for_each_online_cpu(cpu)
+- if (amd_get_nb_id(cpu) == nid)
+- cpumask_set_cpu(cpu, mask);
+-}
+-
+-/* check MCG_CTL on all the cpus on this node */
+-static bool amd64_nb_mce_bank_enabled_on_node(int nid)
+-{
+- cpumask_var_t mask;
+- struct msr *msrs;
+- int cpu, nbe, idx = 0;
+- bool ret = false;
+-
+- if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
+- amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
+- __func__);
+- return false;
+- }
+-
+- get_cpus_on_this_dct_cpumask(mask, nid);
+-
+- msrs = kzalloc(sizeof(struct msr) * cpumask_weight(mask), GFP_KERNEL);
+- if (!msrs) {
+- amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
+- __func__);
+- free_cpumask_var(mask);
+- return false;
+- }
+-
+- rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
+-
+- for_each_cpu(cpu, mask) {
+- nbe = msrs[idx].l & K8_MSR_MCGCTL_NBE;
+-
+- debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
+- cpu, msrs[idx].q,
+- (nbe ? "enabled" : "disabled"));
+-
+- if (!nbe)
+- goto out;
+-
+- idx++;
+- }
+- ret = true;
+-
+-out:
+- kfree(msrs);
+- free_cpumask_var(mask);
+- return ret;
++ if (amd64_toggle_ecc_err_reporting(pvt, OFF))
++ amd64_printk(KERN_WARNING, "Error restoring ECC reporting over "
++ "MCGCTL!\n");
+ }
+
+ /*
+@@ -2915,7 +2938,6 @@ static int amd64_probe_one_instance(stru
+ pvt->ext_model = boot_cpu_data.x86_model >> 4;
+ pvt->mc_type_index = mc_type_index;
+ pvt->ops = family_ops(mc_type_index);
+- pvt->old_mcgctl = 0;
+
+ /*
+ * We have the dram_f2_ctl device as an argument, now go reserve its
+--- a/drivers/edac/amd64_edac.h
++++ b/drivers/edac/amd64_edac.h
+@@ -147,6 +147,8 @@
+ #define MAX_CS_COUNT 8
+ #define DRAM_REG_COUNT 8
+
++#define ON true
++#define OFF false
+
+ /*
+ * PCI-defined configuration space registers
+@@ -386,10 +388,7 @@ enum {
+ #define K8_NBCAP_DUAL_NODE BIT(1)
+ #define K8_NBCAP_DCT_DUAL BIT(0)
+
+-/*
+- * MSR Regs
+- */
+-#define K8_MSR_MCGCTL 0x017b
++/* MSRs */
+ #define K8_MSR_MCGCTL_NBE BIT(4)
+
+ #define K8_MSR_MC4CTL 0x0410
+@@ -487,7 +486,6 @@ struct amd64_pvt {
+ /* Save old hw registers' values before we modified them */
+ u32 nbctl_mcgctl_saved; /* When true, following 2 are valid */
+ u32 old_nbctl;
+- unsigned long old_mcgctl; /* per core on this node */
+
+ /* MC Type Index value: socket F vs Family 10h */
+ u32 mc_type_index;
+@@ -495,6 +493,7 @@ struct amd64_pvt {
+ /* misc settings */
+ struct flags {
+ unsigned long cf8_extcfg:1;
++ unsigned long ecc_report:1;
+ } flags;
+ };
+
--- /dev/null
+From 48e3cbb3f67a27d9c2db075f3d0f700246c40caa Mon Sep 17 00:00:00 2001
+From: Eric Millbrandt <emillbrandt@dekaresearch.com>
+Date: Tue, 22 Dec 2009 10:13:24 -0500
+Subject: ASoC: Do not write to invalid registers on the wm9712.
+
+From: Eric Millbrandt <emillbrandt@dekaresearch.com>
+
+commit 48e3cbb3f67a27d9c2db075f3d0f700246c40caa upstream.
+
+This patch fixes a bug where "virtual" registers were being written to the ac97
+bus. This was causing unrelated registers to become corrupted (headphone 0x04,
+touchscreen 0x78, etc).
+
+This patch duplicates protection that was included in the wm9713 driver.
+
+Signed-off-by: Eric Millbrandt <emillbrandt@dekaresearch.com>
+Acked-by: Liam Girdwood <lrg@slimlogic.co.uk>
+Signed-off-by: Mark Brown <broonie@opensource.wolfsonmicro.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ sound/soc/codecs/wm9712.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/sound/soc/codecs/wm9712.c
++++ b/sound/soc/codecs/wm9712.c
+@@ -464,7 +464,8 @@ static int ac97_write(struct snd_soc_cod
+ {
+ u16 *cache = codec->reg_cache;
+
+- soc_ac97_ops.write(codec->ac97, reg, val);
++ if (reg < 0x7c)
++ soc_ac97_ops.write(codec->ac97, reg, val);
+ reg = reg >> 1;
+ if (reg < (ARRAY_SIZE(wm9712_reg)))
+ cache[reg] = val;
--- /dev/null
+From 48c03ce72f2665f79a3fe54fc6d71b8cc3d30803 Mon Sep 17 00:00:00 2001
+From: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+Date: Thu, 17 Dec 2009 14:51:35 +0100
+Subject: ASoC: wm8974: fix a wrong bit definition
+
+From: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+
+commit 48c03ce72f2665f79a3fe54fc6d71b8cc3d30803 upstream.
+
+The wm8974 datasheet defines BUFIOEN as bit 2.
+
+Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+Acked-by: Liam Girdwood <lrg@slimlogic.co.uk>
+Signed-off-by: Mark Brown <broonie@opensource.wolfsonmicro.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ sound/soc/codecs/wm8974.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/sound/soc/codecs/wm8974.c
++++ b/sound/soc/codecs/wm8974.c
+@@ -47,7 +47,7 @@ static const u16 wm8974_reg[WM8974_CACHE
+ };
+
+ #define WM8974_POWER1_BIASEN 0x08
+-#define WM8974_POWER1_BUFIOEN 0x10
++#define WM8974_POWER1_BUFIOEN 0x04
+
+ struct wm8974_priv {
+ struct snd_soc_codec codec;
--- /dev/null
+From 242ab7ad689accafd5e87ffd22b85cf1bf7fbbef Mon Sep 17 00:00:00 2001
+From: Bob Copeland <me@bobcopeland.com>
+Date: Mon, 21 Dec 2009 22:26:48 -0500
+Subject: ath5k: fix SWI calibration interrupt storm
+
+From: Bob Copeland <me@bobcopeland.com>
+
+commit 242ab7ad689accafd5e87ffd22b85cf1bf7fbbef upstream.
+
+The calibration period is now invoked by triggering a software
+interrupt from within the ISR by ath5k_hw_calibration_poll()
+instead of via a timer.
+
+However, the calibration interval isn't initialized before
+interrupts are enabled, so we can have a situation where an
+interrupt occurs before the interval is assigned, so the
+interval is actually negative. As a result, the ISR will
+arm a software interrupt to schedule the tasklet, and then
+rearm it when the SWI is processed, and so on, leading to a
+softlockup at modprobe time.
+
+Move the initialization order around so the calibration interval
+is set before interrupts are active. Another possible fix
+is to schedule the tasklet directly from the poll routine,
+but I think there are additional plans for the SWI.
+
+Signed-off-by: Bob Copeland <me@bobcopeland.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/wireless/ath/ath5k/base.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/wireless/ath/ath5k/base.c
++++ b/drivers/net/wireless/ath/ath5k/base.c
+@@ -2349,6 +2349,9 @@ ath5k_init(struct ath5k_softc *sc)
+ */
+ ath5k_stop_locked(sc);
+
++ /* Set PHY calibration interval */
++ ah->ah_cal_intval = ath5k_calinterval;
++
+ /*
+ * The basic interface to setting the hardware in a good
+ * state is ``reset''. On return the hardware is known to
+@@ -2376,10 +2379,6 @@ ath5k_init(struct ath5k_softc *sc)
+
+ /* Set ack to be sent at low bit-rates */
+ ath5k_hw_set_ack_bitrate_high(ah, false);
+-
+- /* Set PHY calibration inteval */
+- ah->ah_cal_intval = ath5k_calinterval;
+-
+ ret = 0;
+ done:
+ mmiowb();
--- /dev/null
+From 5b479a076de091590423a9e6dfc2584126b28761 Mon Sep 17 00:00:00 2001
+From: Felix Fietkau <nbd@openwrt.org>
+Date: Thu, 24 Dec 2009 14:04:32 +0100
+Subject: ath9k: fix missed error codes in the tx status check
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Felix Fietkau <nbd@openwrt.org>
+
+commit 5b479a076de091590423a9e6dfc2584126b28761 upstream.
+
+My previous change added in:
+
+ commit 815833e7ecf0b9a017315cae6aef4d7cd9517681
+ ath9k: fix tx status reporting
+
+was not checking all possible tx error conditions. This could possibly
+lead to throughput issues due to slow rate control adaption or missed
+retransmissions of failed A-MPDU frames.
+
+This patch adds a mask for all possible error conditions and uses it
+in the xmit ok check.
+
+Reported-by: Björn Smedman <bjorn.smedman@venatech.se>
+Signed-off-by: Felix Fietkau <nbd@openwrt.org>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/wireless/ath/ath9k/mac.h | 3 +++
+ drivers/net/wireless/ath/ath9k/xmit.c | 2 +-
+ 2 files changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/wireless/ath/ath9k/mac.h
++++ b/drivers/net/wireless/ath/ath9k/mac.h
+@@ -77,6 +77,9 @@
+ #define ATH9K_TXERR_XTXOP 0x08
+ #define ATH9K_TXERR_TIMER_EXPIRED 0x10
+ #define ATH9K_TX_ACKED 0x20
++#define ATH9K_TXERR_MASK \
++ (ATH9K_TXERR_XRETRY | ATH9K_TXERR_FILT | ATH9K_TXERR_FIFO | \
++ ATH9K_TXERR_XTXOP | ATH9K_TXERR_TIMER_EXPIRED)
+
+ #define ATH9K_TX_BA 0x01
+ #define ATH9K_TX_PWRMGMT 0x02
+--- a/drivers/net/wireless/ath/ath9k/xmit.c
++++ b/drivers/net/wireless/ath/ath9k/xmit.c
+@@ -2020,7 +2020,7 @@ static void ath_tx_processq(struct ath_s
+ if (bf_isaggr(bf))
+ txq->axq_aggr_depth--;
+
+- txok = !(ds->ds_txstat.ts_status & ATH9K_TXERR_FILT);
++ txok = !(ds->ds_txstat.ts_status & ATH9K_TXERR_MASK);
+ txq->axq_tx_inprogress = false;
+ spin_unlock_bh(&txq->axq_lock);
+
--- /dev/null
+From 3867cf6a8c699846e928e8f5a9f31013708df192 Mon Sep 17 00:00:00 2001
+From: Sujith <Sujith.Manoharan@atheros.com>
+Date: Wed, 23 Dec 2009 20:03:27 -0500
+Subject: ath9k: fix suspend by waking device prior to stop
+
+From: Sujith <Sujith.Manoharan@atheros.com>
+
+commit 3867cf6a8c699846e928e8f5a9f31013708df192 upstream.
+
+Ensure the device is awake prior to trying to tell hardware
+to stop it. Impact of not doing this is we can likely leave
+the device in an undefined state likely causing issues with
+suspend and resume. This patch ensures harware is where it
+should be prior to suspend.
+
+Signed-off-by: Sujith <Sujith.Manoharan@atheros.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/wireless/ath/ath9k/main.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/net/wireless/ath/ath9k/main.c
++++ b/drivers/net/wireless/ath/ath9k/main.c
+@@ -2147,6 +2147,9 @@ static void ath9k_stop(struct ieee80211_
+ return; /* another wiphy still in use */
+ }
+
++ /* Ensure HW is awake when we try to shut it down. */
++ ath9k_ps_wakeup(sc);
++
+ if (sc->sc_flags & SC_OP_BTCOEX_ENABLED) {
+ ath9k_hw_btcoex_disable(sc->sc_ah);
+ if (sc->btcoex_info.btcoex_scheme == ATH_BTCOEX_CFG_3WIRE)
+@@ -2167,6 +2170,9 @@ static void ath9k_stop(struct ieee80211_
+ /* disable HAL and put h/w to sleep */
+ ath9k_hw_disable(sc->sc_ah);
+ ath9k_hw_configpcipowersave(sc->sc_ah, 1, 1);
++ ath9k_ps_restore(sc);
++
++ /* Finally, put the chip in FULL SLEEP mode */
+ ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
+
+ sc->sc_flags |= SC_OP_INVALID;
--- /dev/null
+From e8009e9850d59000d518296af372888911a129bd Mon Sep 17 00:00:00 2001
+From: Sujith <Sujith.Manoharan@atheros.com>
+Date: Mon, 14 Dec 2009 14:57:08 +0530
+Subject: ath9k: Fix TX queue draining
+
+From: Sujith <Sujith.Manoharan@atheros.com>
+
+commit e8009e9850d59000d518296af372888911a129bd upstream.
+
+When TX DMA termination has failed, the HW has to be reset
+completely. Doing a fast channel change in this case is insufficient.
+Also, change the debug level of a couple of messages to FATAL.
+
+Signed-off-by: Sujith <Sujith.Manoharan@atheros.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/wireless/ath/ath9k/mac.c | 2 +-
+ drivers/net/wireless/ath/ath9k/xmit.c | 4 ++--
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/wireless/ath/ath9k/mac.c
++++ b/drivers/net/wireless/ath/ath9k/mac.c
+@@ -155,7 +155,7 @@ bool ath9k_hw_stoptxdma(struct ath_hw *a
+ wait = wait_time;
+ while (ath9k_hw_numtxpending(ah, q)) {
+ if ((--wait) == 0) {
+- DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
++ DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
+ "Failed to stop TX DMA in 100 "
+ "msec after killing last frame\n");
+ break;
+--- a/drivers/net/wireless/ath/ath9k/xmit.c
++++ b/drivers/net/wireless/ath/ath9k/xmit.c
+@@ -1076,10 +1076,10 @@ void ath_drain_all_txq(struct ath_softc
+ if (npend) {
+ int r;
+
+- DPRINTF(sc, ATH_DBG_XMIT, "Unable to stop TxDMA. Reset HAL!\n");
++ DPRINTF(sc, ATH_DBG_FATAL, "Unable to stop TxDMA. Reset HAL!\n");
+
+ spin_lock_bh(&sc->sc_resetlock);
+- r = ath9k_hw_reset(ah, sc->sc_ah->curchan, true);
++ r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false);
+ if (r)
+ DPRINTF(sc, ATH_DBG_FATAL,
+ "Unable to reset hardware; reset status %d\n",
--- /dev/null
+From 8b685ba9de803f210936400612a32a2003f47cd3 Mon Sep 17 00:00:00 2001
+From: Luis R. Rodriguez <lrodriguez@atheros.com>
+Date: Wed, 23 Dec 2009 20:03:29 -0500
+Subject: ath9k: wake hardware during AMPDU TX actions
+
+From: Luis R. Rodriguez <lrodriguez@atheros.com>
+
+commit 8b685ba9de803f210936400612a32a2003f47cd3 upstream.
+
+AMDPDU actions poke hardware for TX operation, as such
+we want to turn hardware on for these actions. AMDPU RX operations
+do not require hardware on as nothing is done in hardware for
+those actions. Without this we cannot guarantee hardware has
+been programmed correctly for each AMPDU TX action.
+
+Signed-off-by: Luis R. Rodriguez <lrodriguez@atheros.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/wireless/ath/ath9k/main.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/net/wireless/ath/ath9k/main.c
++++ b/drivers/net/wireless/ath/ath9k/main.c
+@@ -2726,15 +2726,21 @@ static int ath9k_ampdu_action(struct iee
+ case IEEE80211_AMPDU_RX_STOP:
+ break;
+ case IEEE80211_AMPDU_TX_START:
++ ath9k_ps_wakeup(sc);
+ ath_tx_aggr_start(sc, sta, tid, ssn);
+ ieee80211_start_tx_ba_cb_irqsafe(hw, sta->addr, tid);
++ ath9k_ps_restore(sc);
+ break;
+ case IEEE80211_AMPDU_TX_STOP:
++ ath9k_ps_wakeup(sc);
+ ath_tx_aggr_stop(sc, sta, tid);
+ ieee80211_stop_tx_ba_cb_irqsafe(hw, sta->addr, tid);
++ ath9k_ps_restore(sc);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
++ ath9k_ps_wakeup(sc);
+ ath_tx_aggr_resume(sc, sta, tid);
++ ath9k_ps_restore(sc);
+ break;
+ default:
+ DPRINTF(sc, ATH_DBG_FATAL, "Unknown AMPDU action\n");
--- /dev/null
+From 5f70a88f631c3480107853cae12925185eb4c598 Mon Sep 17 00:00:00 2001
+From: Luis R. Rodriguez <lrodriguez@atheros.com>
+Date: Wed, 23 Dec 2009 20:03:28 -0500
+Subject: ath9k: wake hardware for interface IBSS/AP/Mesh removal
+
+From: Luis R. Rodriguez <lrodriguez@atheros.com>
+
+commit 5f70a88f631c3480107853cae12925185eb4c598 upstream.
+
+When we remove a IBSS/AP/Mesh interface we stop DMA
+but to do this we should ensure hardware is on. Awaken
+the device prior to these calls. This should ensure
+DMA is stopped upon suspend and plain device removal.
+
+Signed-off-by: Luis R. Rodriguez <lrodriguez@atheros.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/wireless/ath/ath9k/main.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/net/wireless/ath/ath9k/main.c
++++ b/drivers/net/wireless/ath/ath9k/main.c
+@@ -2277,8 +2277,10 @@ static void ath9k_remove_interface(struc
+ if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
+ (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) ||
+ (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) {
++ ath9k_ps_wakeup(sc);
+ ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
+ ath_beacon_return(sc, avp);
++ ath9k_ps_restore(sc);
+ }
+
+ sc->sc_flags &= ~SC_OP_BEACONS;
--- /dev/null
+From c37919bfe0a5c1bee9a31701a31e05a2f8840936 Mon Sep 17 00:00:00 2001
+From: Vasanthakumar Thiagarajan <vasanth@atheros.com>
+Date: Fri, 13 Nov 2009 14:32:40 +0530
+Subject: ath9k_hw: Fix AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB and its shift value in 0x4054
+
+From: Vasanthakumar Thiagarajan <vasanth@atheros.com>
+
+commit c37919bfe0a5c1bee9a31701a31e05a2f8840936 upstream.
+
+The bit value of AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB is wrong, it should
+be 0x400 and the number of bits to be right shifted is 10. Having this
+wrong value in 0x4054 sometimes affects bt quality on btcoex environment.
+
+Signed-off-by: Vasanthakumar Thiagarajan <vasanth@atheros.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/wireless/ath/ath9k/reg.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/wireless/ath/ath9k/reg.h
++++ b/drivers/net/wireless/ath/ath9k/reg.h
+@@ -969,10 +969,10 @@ enum {
+ #define AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_S 4
+ #define AR_GPIO_INPUT_EN_VAL_RFSILENT_DEF 0x00000080
+ #define AR_GPIO_INPUT_EN_VAL_RFSILENT_DEF_S 7
++#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB 0x00000400
++#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB_S 10
+ #define AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB 0x00001000
+ #define AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB_S 12
+-#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB 0x00001000
+-#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB_S 1
+ #define AR_GPIO_INPUT_EN_VAL_RFSILENT_BB 0x00008000
+ #define AR_GPIO_INPUT_EN_VAL_RFSILENT_BB_S 15
+ #define AR_GPIO_RTC_RESET_OVERRIDE_ENABLE 0x00010000
--- /dev/null
+From c90017dd43f0cdb42134b9229761e8be02bcd524 Mon Sep 17 00:00:00 2001
+From: Vasanthakumar Thiagarajan <vasanth@atheros.com>
+Date: Fri, 13 Nov 2009 14:32:39 +0530
+Subject: ath9k_hw: Fix possible OOB array indexing in gen_timer_index[] on 64-bit
+
+From: Vasanthakumar Thiagarajan <vasanth@atheros.com>
+
+commit c90017dd43f0cdb42134b9229761e8be02bcd524 upstream.
+
+debruijn32 (0x077CB531) is used to index gen_timer_index[]
+which is an array of 32 u32. Having debruijn32 as unsigned
+long on a 64-bit platform will result in indexing more than 32
+in gen_timer_index[] and there by causing a crash. Make it
+unsigned to fix this issue.
+
+Signed-off-by: Vasanthakumar Thiagarajan <vasanth@atheros.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/wireless/ath/ath9k/hw.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/wireless/ath/ath9k/hw.h
++++ b/drivers/net/wireless/ath/ath9k/hw.h
+@@ -408,7 +408,7 @@ struct ath9k_hw_version {
+ * Using de Bruijin sequence to to look up 1's index in a 32 bit number
+ * debruijn32 = 0000 0111 0111 1100 1011 0101 0011 0001
+ */
+-#define debruijn32 0x077CB531UL
++#define debruijn32 0x077CB531U
+
+ struct ath_gen_timer_configuration {
+ u32 next_addr;
--- /dev/null
+From c2ff581acab16c6af56d9e8c1a579bf041ec00b1 Mon Sep 17 00:00:00 2001
+From: Larry Finger <Larry.Finger@lwfinger.net>
+Date: Mon, 23 Nov 2009 18:40:45 -0600
+Subject: b43: avoid PPC fault during resume
+
+From: Larry Finger <Larry.Finger@lwfinger.net>
+
+commit c2ff581acab16c6af56d9e8c1a579bf041ec00b1 upstream.
+
+The routine b43_is_hw_radio_enabled() has long been a problem.
+For PPC architecture with PHY Revision < 3, a read of the register
+B43_MMIO_HWENABLED_LO will cause a CPU fault unless b43_status()
+returns a value of 2 (B43_STAT_STARTED) (BUG 14181). Fixing that
+results in Bug 14538 in which the driver is unable to reassociate
+after resuming from hibernation because b43_status() returns 0.
+
+The correct fix would be to determine why the status is 0; however,
+I have not yet found why that happens. The correct value is found for
+my device, which has PHY revision >= 3.
+
+Returning TRUE when the PHY revision < 3 and b43_status() returns 0 fixes
+the regression for 2.6.32.
+
+This patch fixes the problem in Red Hat Bugzilla #538523.
+
+Signed-off-by: Larry Finger <Larry.Finger@lwfinger.net>
+Tested-by: Christian Casteyde <casteyde.christian@free.fr>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/wireless/b43/rfkill.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/wireless/b43/rfkill.c
++++ b/drivers/net/wireless/b43/rfkill.c
+@@ -33,8 +33,14 @@ bool b43_is_hw_radio_enabled(struct b43_
+ & B43_MMIO_RADIO_HWENABLED_HI_MASK))
+ return 1;
+ } else {
+- if (b43_status(dev) >= B43_STAT_STARTED &&
+- b43_read16(dev, B43_MMIO_RADIO_HWENABLED_LO)
++ /* To prevent CPU fault on PPC, do not read a register
++ * unless the interface is started; however, on resume
++ * for hibernation, this routine is entered early. When
++ * that happens, unconditionally return TRUE.
++ */
++ if (b43_status(dev) < B43_STAT_STARTED)
++ return 1;
++ if (b43_read16(dev, B43_MMIO_RADIO_HWENABLED_LO)
+ & B43_MMIO_RADIO_HWENABLED_LO_MASK)
+ return 1;
+ }
--- /dev/null
+From 652fd781a52ad6e24b908cd8b83d12699754f253 Mon Sep 17 00:00:00 2001
+From: Oliver Neukum <oliver@neukum.org>
+Date: Wed, 16 Dec 2009 19:23:43 +0100
+Subject: Bluetooth: Prevent ill-timed autosuspend in USB driver
+
+From: Oliver Neukum <oliver@neukum.org>
+
+commit 652fd781a52ad6e24b908cd8b83d12699754f253 upstream.
+
+The device must be marked busy as it receives data.
+
+Signed-off-by: Oliver Neukum <oliver@neukum.org>
+Tested-by: Matthew Garrett <mjg@redhat.com>
+Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/bluetooth/btusb.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -307,6 +307,7 @@ static void btusb_bulk_complete(struct u
+ return;
+
+ usb_anchor_urb(urb, &data->bulk_anchor);
++ usb_mark_last_busy(data->udev);
+
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err < 0) {
--- /dev/null
+From 3bdb2d48c5f58c781a4099c99044384a23620884 Mon Sep 17 00:00:00 2001
+From: Johannes Berg <johannes@sipsolutions.net>
+Date: Wed, 23 Dec 2009 13:12:05 +0100
+Subject: cfg80211: fix race between deauth and assoc response
+
+From: Johannes Berg <johannes@sipsolutions.net>
+
+commit 3bdb2d48c5f58c781a4099c99044384a23620884 upstream.
+
+Joseph Nahmias reported, in http://bugs.debian.org/562016,
+that he was getting the following warning (with some log
+around the issue):
+
+ ath0: direct probe to AP 00:11:95:77:e0:b0 (try 1)
+ ath0: direct probe responded
+ ath0: authenticate with AP 00:11:95:77:e0:b0 (try 1)
+ ath0: authenticated
+ ath0: associate with AP 00:11:95:77:e0:b0 (try 1)
+ ath0: deauthenticating from 00:11:95:77:e0:b0 by local choice (reason=3)
+ ath0: direct probe to AP 00:11:95:77:e0:b0 (try 1)
+ ath0: RX AssocResp from 00:11:95:77:e0:b0 (capab=0x421 status=0 aid=2)
+ ath0: associated
+ ------------[ cut here ]------------
+ WARNING: at net/wireless/mlme.c:97 cfg80211_send_rx_assoc+0x14d/0x152 [cfg80211]()
+ Hardware name: 7658CTO
+ ...
+ Pid: 761, comm: phy0 Not tainted 2.6.32-trunk-686 #1
+ Call Trace:
+ [<c1030a5d>] ? warn_slowpath_common+0x5e/0x8a
+ [<c1030a93>] ? warn_slowpath_null+0xa/0xc
+ [<f86cafc7>] ? cfg80211_send_rx_assoc+0x14d/0x152
+ ...
+ ath0: link becomes ready
+ ath0: deauthenticating from 00:11:95:77:e0:b0 by local choice (reason=3)
+ ath0: no IPv6 routers present
+ ath0: link is not ready
+ ath0: direct probe to AP 00:11:95:77:e0:b0 (try 1)
+ ath0: direct probe responded
+ ath0: authenticate with AP 00:11:95:77:e0:b0 (try 1)
+ ath0: authenticated
+ ath0: associate with AP 00:11:95:77:e0:b0 (try 1)
+ ath0: RX ReassocResp from 00:11:95:77:e0:b0 (capab=0x421 status=0 aid=2)
+ ath0: associated
+
+It is not clear to me how the first "direct probe" here
+happens, but this seems to be a race condition, if the
+user requests to deauth after requesting assoc, but before
+the assoc response is received. In that case, it may
+happen that mac80211 tries to report the assoc success to
+cfg80211, but gets blocked on the wdev lock that is held
+because the user is requesting the deauth.
+
+The result is that we run into a warning. This is mostly
+harmless, but maybe cause an unexpected event to be sent
+to userspace; we'd send an assoc success event although
+userspace was no longer expecting that.
+
+To fix this, remove the warning and check whether the
+race happened and in that case abort processing.
+
+Reported-by: Joseph Nahmias <joe@nahmias.net>
+Cc: 562016-quiet@bugs.debian.org
+Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/wireless/mlme.c | 13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+--- a/net/wireless/mlme.c
++++ b/net/wireless/mlme.c
+@@ -94,7 +94,18 @@ void cfg80211_send_rx_assoc(struct net_d
+ }
+ }
+
+- WARN_ON(!bss);
++ /*
++ * We might be coming here because the driver reported
++ * a successful association at the same time as the
++ * user requested a deauth. In that case, we will have
++ * removed the BSS from the auth_bsses list due to the
++ * deauth request when the assoc response makes it. If
++ * the two code paths acquire the lock the other way
++ * around, that's just the standard situation of a
++ * deauth being requested while connected.
++ */
++ if (!bss)
++ goto out;
+ } else if (wdev->conn) {
+ cfg80211_sme_failed_assoc(wdev);
+ need_connect_result = false;
--- /dev/null
+From a2934c7b363ddcc001964f2444649f909e583bef Mon Sep 17 00:00:00 2001
+From: Jeff Layton <jlayton@redhat.com>
+Date: Thu, 3 Dec 2009 08:09:41 -0500
+Subject: cifs: NULL out tcon, pSesInfo, and srvTcp pointers when chasing DFS referrals
+
+From: Jeff Layton <jlayton@redhat.com>
+
+commit a2934c7b363ddcc001964f2444649f909e583bef upstream.
+
+The scenario is this:
+
+The kernel gets EREMOTE and starts chasing a DFS referral at mount time.
+The tcon reference is put, which puts the session reference too, but
+neither pointer is zeroed out.
+
+The mount gets retried (goto try_mount_again) with new mount info.
+Session setup fails fails and rc ends up being non-zero. The code then
+falls through to the end and tries to put the previously freed tcon
+pointer again. Oops at: cifs_put_smb_ses+0x14/0xd0
+
+Fix this by moving the initialization of the rc variable and the tcon,
+pSesInfo and srvTcp pointers below the try_mount_again label. Also, add
+a FreeXid() before the goto to prevent xid "leaks".
+
+Signed-off-by: Jeff Layton <jlayton@redhat.com>
+Reported-by: Gustavo Carvalho Homem <gustavo@angulosolido.pt>
+Signed-off-by: Steve French <sfrench@us.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/cifs/connect.c | 13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -2287,12 +2287,12 @@ int
+ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
+ char *mount_data_global, const char *devname)
+ {
+- int rc = 0;
++ int rc;
+ int xid;
+ struct smb_vol *volume_info;
+- struct cifsSesInfo *pSesInfo = NULL;
+- struct cifsTconInfo *tcon = NULL;
+- struct TCP_Server_Info *srvTcp = NULL;
++ struct cifsSesInfo *pSesInfo;
++ struct cifsTconInfo *tcon;
++ struct TCP_Server_Info *srvTcp;
+ char *full_path;
+ char *mount_data = mount_data_global;
+ #ifdef CONFIG_CIFS_DFS_UPCALL
+@@ -2301,6 +2301,10 @@ cifs_mount(struct super_block *sb, struc
+ int referral_walks_count = 0;
+ try_mount_again:
+ #endif
++ rc = 0;
++ tcon = NULL;
++ pSesInfo = NULL;
++ srvTcp = NULL;
+ full_path = NULL;
+
+ xid = GetXid();
+@@ -2597,6 +2601,7 @@ remote_path_check:
+
+ cleanup_volume_info(&volume_info);
+ referral_walks_count++;
++ FreeXid(xid);
+ goto try_mount_again;
+ }
+ #else /* No DFS support, return error on mount */
--- /dev/null
+From bb6eddf7676e1c1f3e637aa93c5224488d99036f Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 10 Dec 2009 15:35:10 +0100
+Subject: clockevents: Prevent clockevent_devices list corruption on cpu hotplug
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit bb6eddf7676e1c1f3e637aa93c5224488d99036f upstream.
+
+Xiaotian Feng triggered a list corruption in the clock events list on
+CPU hotplug and debugged the root cause.
+
+If a CPU registers more than one per cpu clock event device, then only
+the active clock event device is removed on CPU_DEAD. The unused
+devices are kept in the clock events device list.
+
+On CPU up the clock event devices are registered again, which means
+that we list_add an already enqueued list_head. That results in list
+corruption.
+
+Resolve this by removing all devices which are associated to the dead
+CPU on CPU_DEAD.
+
+Reported-by: Xiaotian Feng <dfeng@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Xiaotian Feng <dfeng@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/time/clockevents.c | 18 +++++++++++++++---
+ 1 file changed, 15 insertions(+), 3 deletions(-)
+
+--- a/kernel/time/clockevents.c
++++ b/kernel/time/clockevents.c
+@@ -237,8 +237,9 @@ void clockevents_exchange_device(struct
+ */
+ void clockevents_notify(unsigned long reason, void *arg)
+ {
+- struct list_head *node, *tmp;
++ struct clock_event_device *dev, *tmp;
+ unsigned long flags;
++ int cpu;
+
+ spin_lock_irqsave(&clockevents_lock, flags);
+ clockevents_do_notify(reason, arg);
+@@ -249,8 +250,19 @@ void clockevents_notify(unsigned long re
+ * Unregister the clock event devices which were
+ * released from the users in the notify chain.
+ */
+- list_for_each_safe(node, tmp, &clockevents_released)
+- list_del(node);
++ list_for_each_entry_safe(dev, tmp, &clockevents_released, list)
++ list_del(&dev->list);
++ /*
++ * Now check whether the CPU has left unused per cpu devices
++ */
++ cpu = *((int *)arg);
++ list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) {
++ if (cpumask_test_cpu(cpu, dev->cpumask) &&
++ cpumask_weight(dev->cpumask) == 1) {
++ BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
++ list_del(&dev->list);
++ }
++ }
+ break;
+ default:
+ break;
--- /dev/null
+From ba578cb34a71fb08fff14ac0796b934a8c9991e1 Mon Sep 17 00:00:00 2001
+From: Rusty Russell <rusty@rustcorp.com.au>
+Date: Tue, 3 Nov 2009 14:56:35 +1030
+Subject: cpumask: use modern cpumask style in drivers/edac/amd64_edac.c
+
+From: Rusty Russell <rusty@rustcorp.com.au>
+
+commit ba578cb34a71fb08fff14ac0796b934a8c9991e1 upstream.
+
+cpumask_t -> struct cpumask, and don't put one on the stack. (Note: this
+is actually on the stack unless CONFIG_CPUMASK_OFFSTACK=y).
+
+Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
+Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/edac/amd64_edac.c | 24 +++++++++++++++---------
+ 1 file changed, 15 insertions(+), 9 deletions(-)
+
+--- a/drivers/edac/amd64_edac.c
++++ b/drivers/edac/amd64_edac.c
+@@ -2625,7 +2625,7 @@ static int amd64_init_csrows(struct mem_
+ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
+ {
+ struct amd64_pvt *pvt = mci->pvt_info;
+- const cpumask_t *cpumask = cpumask_of_node(pvt->mc_node_id);
++ const struct cpumask *cpumask = cpumask_of_node(pvt->mc_node_id);
+ int cpu, idx = 0, err = 0;
+ struct msr msrs[cpumask_weight(cpumask)];
+ u32 value;
+@@ -2701,7 +2701,7 @@ static void amd64_enable_ecc_error_repor
+
+ static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
+ {
+- const cpumask_t *cpumask = cpumask_of_node(pvt->mc_node_id);
++ const struct cpumask *cpumask = cpumask_of_node(pvt->mc_node_id);
+ int cpu, idx = 0, err = 0;
+ struct msr msrs[cpumask_weight(cpumask)];
+ u32 value;
+@@ -2734,7 +2734,7 @@ static void amd64_restore_ecc_error_repo
+ }
+
+ /* get all cores on this DCT */
+-static void get_cpus_on_this_dct_cpumask(cpumask_t *mask, int nid)
++static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
+ {
+ int cpu;
+
+@@ -2746,25 +2746,30 @@ static void get_cpus_on_this_dct_cpumask
+ /* check MCG_CTL on all the cpus on this node */
+ static bool amd64_nb_mce_bank_enabled_on_node(int nid)
+ {
+- cpumask_t mask;
++ cpumask_var_t mask;
+ struct msr *msrs;
+ int cpu, nbe, idx = 0;
+ bool ret = false;
+
+- cpumask_clear(&mask);
++ if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
++ amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
++ __func__);
++ return false;
++ }
+
+- get_cpus_on_this_dct_cpumask(&mask, nid);
++ get_cpus_on_this_dct_cpumask(mask, nid);
+
+- msrs = kzalloc(sizeof(struct msr) * cpumask_weight(&mask), GFP_KERNEL);
++ msrs = kzalloc(sizeof(struct msr) * cpumask_weight(mask), GFP_KERNEL);
+ if (!msrs) {
+ amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
+ __func__);
++ free_cpumask_var(mask);
+ return false;
+ }
+
+- rdmsr_on_cpus(&mask, MSR_IA32_MCG_CTL, msrs);
++ rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
+
+- for_each_cpu(cpu, &mask) {
++ for_each_cpu(cpu, mask) {
+ nbe = msrs[idx].l & K8_MSR_MCGCTL_NBE;
+
+ debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
+@@ -2780,6 +2785,7 @@ static bool amd64_nb_mce_bank_enabled_on
+
+ out:
+ kfree(msrs);
++ free_cpumask_var(mask);
+ return ret;
+ }
+
--- /dev/null
+From 4297a462f455e38f08976df7b16c849614a287da Mon Sep 17 00:00:00 2001
+From: Nicolas Ferre <nicolas.ferre@atmel.com>
+Date: Wed, 16 Dec 2009 16:28:03 +0100
+Subject: dma: at_hdmac: correct incompatible type for argument 1 of 'spin_lock_bh'
+
+From: Nicolas Ferre <nicolas.ferre@atmel.com>
+
+commit 4297a462f455e38f08976df7b16c849614a287da upstream.
+
+Correct a typo error in locking calls.
+
+Signed-off-by: Nicolas Ferre <nicolas.ferre@atmel.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/dma/at_hdmac.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/dma/at_hdmac.c
++++ b/drivers/dma/at_hdmac.c
+@@ -815,7 +815,7 @@ atc_is_tx_complete(struct dma_chan *chan
+ dev_vdbg(chan2dev(chan), "is_tx_complete: %d (d%d, u%d)\n",
+ cookie, done ? *done : 0, used ? *used : 0);
+
+- spin_lock_bh(atchan->lock);
++ spin_lock_bh(&atchan->lock);
+
+ last_complete = atchan->completed_cookie;
+ last_used = chan->cookie;
+@@ -830,7 +830,7 @@ atc_is_tx_complete(struct dma_chan *chan
+ ret = dma_async_is_complete(cookie, last_complete, last_used);
+ }
+
+- spin_unlock_bh(atchan->lock);
++ spin_unlock_bh(&atchan->lock);
+
+ if (done)
+ *done = last_complete;
--- /dev/null
+From f797d9881b62c2ddb1d2e7bd80d87141949c84aa Mon Sep 17 00:00:00 2001
+From: Shaun Ruffell <sruffell@digium.com>
+Date: Thu, 17 Dec 2009 18:00:36 -0600
+Subject: dma-debug: Do not add notifier when dma debugging is disabled.
+
+From: Shaun Ruffell <sruffell@digium.com>
+
+commit f797d9881b62c2ddb1d2e7bd80d87141949c84aa upstream.
+
+If CONFIG_HAVE_DMA_API_DEBUG is defined and "dma_debug=off" is
+specified on the kernel command line, when you detach a driver from a
+device you can cause the following NULL pointer dereference:
+
+BUG: unable to handle kernel NULL pointer dereference at (null)
+IP: [<c0580d35>] dma_debug_device_change+0x5d/0x117
+
+The problem is that the dma_debug_device_change notifier function is
+added to the bus notifier chain even though the dma_entry_hash array
+was never initialized. If dma debugging is disabled, this patch both
+prevents dma_debug_device_change notifiers from being added to the
+chain, and additionally ensures that the dma_debug_device_change
+notifier function is a no-op.
+
+Signed-off-by: Shaun Ruffell <sruffell@digium.com>
+Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ lib/dma-debug.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/lib/dma-debug.c
++++ b/lib/dma-debug.c
+@@ -676,6 +676,8 @@ static int dma_debug_device_change(struc
+ struct device *dev = data;
+ int count;
+
++ if (global_disable)
++ return;
+
+ switch (action) {
+ case BUS_NOTIFY_UNBOUND_DRIVER:
+@@ -697,6 +699,9 @@ void dma_debug_add_bus(struct bus_type *
+ {
+ struct notifier_block *nb;
+
++ if (global_disable)
++ return;
++
+ nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
+ if (nb == NULL) {
+ pr_err("dma_debug_add_bus: out of memory\n");
--- /dev/null
+From a8fe9ea200ea21421ea750423d1d4d4f7ce037cf Mon Sep 17 00:00:00 2001
+From: Ingo Molnar <mingo@elte.hu>
+Date: Thu, 31 Dec 2009 15:16:23 +0100
+Subject: dma-debug: Fix bug causing build warning
+
+From: Ingo Molnar <mingo@elte.hu>
+
+commit a8fe9ea200ea21421ea750423d1d4d4f7ce037cf upstream.
+
+Stephen Rothwell reported the following build warning:
+
+ lib/dma-debug.c: In function 'dma_debug_device_change':
+ lib/dma-debug.c:680: warning: 'return' with no value, in function returning non-void
+
+Introduced by commit f797d9881b62c2ddb1d2e7bd80d87141949c84aa
+("dma-debug: Do not add notifier when dma debugging is disabled").
+
+Return 0 [notify-done] when disabled. (this is standard bus notifier behavior.)
+
+Signed-off-by: Shaun Ruffell <sruffell@digium.com>
+Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+LKML-Reference: <20091231125624.GA14666@liondog.tnic>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ lib/dma-debug.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/lib/dma-debug.c
++++ b/lib/dma-debug.c
+@@ -670,14 +670,13 @@ static int device_dma_allocations(struct
+ return count;
+ }
+
+-static int dma_debug_device_change(struct notifier_block *nb,
+- unsigned long action, void *data)
++static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
+ {
+ struct device *dev = data;
+ int count;
+
+ if (global_disable)
+- return;
++ return 0;
+
+ switch (action) {
+ case BUS_NOTIFY_UNBOUND_DRIVER:
--- /dev/null
+From 6057912d7baad31be9819518674ffad349a065b1 Mon Sep 17 00:00:00 2001
+From: Julia Lawall <julia@diku.dk>
+Date: Sun, 13 Dec 2009 05:47:04 +0000
+Subject: drivers/net/usb: Correct code taking the size of a pointer
+
+From: Julia Lawall <julia@diku.dk>
+
+commit 6057912d7baad31be9819518674ffad349a065b1 upstream.
+
+sizeof(dev->dev_addr) is the size of a pointer. A few lines above, the
+size of this field is obtained using netdev->addr_len for a call to memcpy,
+so do the same here.
+
+A simplified version of the semantic patch that finds this problem is as
+follows: (http://coccinelle.lip6.fr/)
+
+// <smpl>
+@@
+expression *x;
+expression f;
+type T;
+@@
+
+*f(...,(T)x,...)
+// </smpl>
+
+Signed-off-by: Julia Lawall <julia@diku.dk>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+--- a/drivers/net/usb/rtl8150.c
++++ b/drivers/net/usb/rtl8150.c
+@@ -324,7 +324,7 @@ static int rtl8150_set_mac_address(struct net_device *netdev, void *p)
+ dbg("%02X:", netdev->dev_addr[i]);
+ dbg("%02X\n", netdev->dev_addr[i]);
+ /* Set the IDR registers. */
+- set_registers(dev, IDR, sizeof(netdev->dev_addr), netdev->dev_addr);
++ set_registers(dev, IDR, netdev->addr_len, netdev->dev_addr);
+ #ifdef EEPROM_WRITE
+ {
+ u8 cr;
--- /dev/null
+From b16d9acbdb97452d1418420e069acf7381ef10bb Mon Sep 17 00:00:00 2001
+From: Zhao Yakui <yakui.zhao@intel.com>
+Date: Wed, 9 Dec 2009 11:23:42 +0800
+Subject: drm: disable all the possible outputs/crtcs before entering KMS mode
+
+From: Zhao Yakui <yakui.zhao@intel.com>
+
+commit b16d9acbdb97452d1418420e069acf7381ef10bb upstream.
+
+Sometimes we will use a crtc for integerated LVDS, which is different with
+that assigned by BIOS. If we want to get flicker-free transitions,
+then we could read out the current state for it and set our current state
+accordingly.
+
+But it is true that if we aren't reading current state out, we do need
+to turn everything off before modesetting. Otherwise the clocks can get very
+angry and we get things worse than a flicker at boot.
+In fact we also do the similar thing in UMS mode. We will disable all the
+possible outputs/crtcs for the first modesetting.
+
+So we disable all the possible outputs/crtcs before entering the KMS mode.
+Before we configure connector/encoder/crtc, the function of
+drm_helper_disable_unused_function can disable all the possible outputs/crtcs.
+
+Signed-off-by: Zhao Yakui <yakui.zhao@intel.com>
+Reviewed-by: Eric Anholt <eric@anholt.net>
+Reviewed-by: Rafal Milecki <zajec5@gmail.com>
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+--- a/drivers/gpu/drm/drm_crtc_helper.c
++++ b/drivers/gpu/drm/drm_crtc_helper.c
+@@ -1020,6 +1020,9 @@ bool drm_helper_initial_config(struct drm_device *dev)
+ {
+ int count = 0;
+
++ /* disable all the possible outputs/crtcs before entering KMS mode */
++ drm_helper_disable_unused_functions(dev);
++
+ drm_fb_helper_parse_command_line(dev);
+
+ count = drm_helper_probe_connector_modes(dev,
--- /dev/null
+From 794f3141a194a4f4c28c1d417b071a901f78d9bb Mon Sep 17 00:00:00 2001
+From: Dave Airlie <airlied@linux.ie>
+Date: Sun, 20 Dec 2009 16:08:40 +1000
+Subject: drm/radeon: fix build on 64-bit with some compilers.
+
+From: Dave Airlie <airlied@linux.ie>
+
+commit 794f3141a194a4f4c28c1d417b071a901f78d9bb upstream.
+
+drivers/gpu/drm/radeon/radeon_test.c:45: undefined reference to `__udivdi3'
+
+Reported-by: Mr. James W. Laferriere <babydr@baby-dragons.com>
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/radeon/radeon_test.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/radeon/radeon_test.c
++++ b/drivers/gpu/drm/radeon/radeon_test.c
+@@ -42,8 +42,8 @@ void radeon_test_moves(struct radeon_dev
+ /* Number of tests =
+ * (Total GTT - IB pool - writeback page - ring buffer) / test size
+ */
+- n = (rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE -
+- rdev->cp.ring_size) / size;
++ n = ((u32)(rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE -
++ rdev->cp.ring_size)) / size;
+
+ gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
+ if (!gtt_obj) {
--- /dev/null
+From airlied@gmail.com Mon Jan 4 14:45:25 2010
+From: Dave Airlie <airlied@gmail.com>
+Date: Mon, 21 Dec 2009 14:33:52 +1000
+Subject: drm/radeon/kms: fix crtc vblank update for r600
+To: stable@kernel.org
+Cc: Dave Airlie <airlied@redhat.com>, linux-kernel@vger.kernel.org, dri-devel@lists.sf.net
+Message-ID: <1261370032-15420-1-git-send-email-airlied@gmail.com>
+
+From: Dave Airlie <airlied@redhat.com>
+
+In 2.6.32.2 r600 had no IRQ support, however the patch in
+500b758725314ab1b5316eb0caa5b0fa26740e6b to fix vblanks on avivo
+cards, needs irqs.
+
+So check for an R600 card and avoid this path if so.
+
+This is a stable only patch for 2.6.32.2 as 2.6.33 has IRQs for r600.
+
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/radeon/atombios_crtc.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -249,13 +249,15 @@ void atombios_crtc_dpms(struct drm_crtc
+ if (ASIC_IS_DCE3(rdev))
+ atombios_enable_crtc_memreq(crtc, 1);
+ atombios_blank_crtc(crtc, 0);
+- drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
++ if (rdev->family < CHIP_R600)
++ drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
+ radeon_crtc_load_lut(crtc);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+- drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
++ if (rdev->family < CHIP_R600)
++ drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
+ atombios_blank_crtc(crtc, 1);
+ if (ASIC_IS_DCE3(rdev))
+ atombios_enable_crtc_memreq(crtc, 0);
--- /dev/null
+From 70abc8cb90e679d8519721e2761d8366a18212a6 Mon Sep 17 00:00:00 2001
+From: Roger Oksanen <roger.oksanen@cs.helsinki.fi>
+Date: Fri, 18 Dec 2009 20:18:21 -0800
+Subject: e100: Fix broken cbs accounting due to missing memset.
+
+From: Roger Oksanen <roger.oksanen@cs.helsinki.fi>
+
+commit 70abc8cb90e679d8519721e2761d8366a18212a6 upstream.
+
+Alan Stern noticed that e100 caused slab corruption.
+commit 98468efddb101f8a29af974101c17ba513b07be1 changed
+the allocation of cbs to use dma pools that don't return zeroed memory,
+especially the cb->status field used to track which cb to clean, causing
+(the visible) double freeing of skbs and a wrong free cbs count.
+
+Now the cbs are explicitly zeroed at allocation time.
+
+Reported-by: Alan Stern <stern@rowland.harvard.edu>
+Tested-by: Alan Stern <stern@rowland.harvard.edu>
+Signed-off-by: Roger Oksanen <roger.oksanen@cs.helsinki.fi>
+Acked-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/e100.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/e100.c
++++ b/drivers/net/e100.c
+@@ -1817,6 +1817,7 @@ static int e100_alloc_cbs(struct nic *ni
+ &nic->cbs_dma_addr);
+ if (!nic->cbs)
+ return -ENOMEM;
++ memset(nic->cbs, 0, count * sizeof(struct cb));
+
+ for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
+ cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
+@@ -1825,7 +1826,6 @@ static int e100_alloc_cbs(struct nic *ni
+ cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
+ cb->link = cpu_to_le32(nic->cbs_dma_addr +
+ ((i+1) % count) * sizeof(struct cb));
+- cb->skb = NULL;
+ }
+
+ nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
--- /dev/null
+From a9e7f4472075fb6937c545af3f6329e9946bbe66 Mon Sep 17 00:00:00 2001
+From: Dmitry Monakhov <dmonakhov@openvz.org>
+Date: Mon, 14 Dec 2009 15:21:14 +0300
+Subject: ext4: Convert to generic reserved quota's space management.
+
+From: Dmitry Monakhov <dmonakhov@openvz.org>
+
+commit a9e7f4472075fb6937c545af3f6329e9946bbe66 upstream.
+
+This patch also fixes write vs chown race condition.
+
+Acked-by: "Theodore Ts'o" <tytso@mit.edu>
+Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/ext4/ext4.h | 6 +++++-
+ fs/ext4/inode.c | 16 +++++++---------
+ fs/ext4/super.c | 5 +++++
+ 3 files changed, 17 insertions(+), 10 deletions(-)
+
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -698,6 +698,10 @@ struct ext4_inode_info {
+ __u16 i_extra_isize;
+
+ spinlock_t i_block_reservation_lock;
++#ifdef CONFIG_QUOTA
++ /* quota space reservation, managed internally by quota code */
++ qsize_t i_reserved_quota;
++#endif
+
+ /* completed async DIOs that might need unwritten extents handling */
+ struct list_head i_aio_dio_complete_list;
+@@ -1432,7 +1436,7 @@ extern int ext4_chunk_trans_blocks(struc
+ extern int ext4_block_truncate_page(handle_t *handle,
+ struct address_space *mapping, loff_t from);
+ extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
+-extern qsize_t ext4_get_reserved_space(struct inode *inode);
++extern qsize_t *ext4_get_reserved_space(struct inode *inode);
+ extern int flush_aio_dio_completed_IO(struct inode *inode);
+ /* ioctl.c */
+ extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -1045,17 +1045,12 @@ out:
+ return err;
+ }
+
+-qsize_t ext4_get_reserved_space(struct inode *inode)
++#ifdef CONFIG_QUOTA
++qsize_t *ext4_get_reserved_space(struct inode *inode)
+ {
+- unsigned long long total;
+-
+- spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
+- total = EXT4_I(inode)->i_reserved_data_blocks +
+- EXT4_I(inode)->i_reserved_meta_blocks;
+- spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+-
+- return (total << inode->i_blkbits);
++ return &EXT4_I(inode)->i_reserved_quota;
+ }
++#endif
+ /*
+ * Calculate the number of metadata blocks need to reserve
+ * to allocate @blocks for non extent file based file
+@@ -4850,6 +4845,9 @@ struct inode *ext4_iget(struct super_blo
+ ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
+ inode->i_size = ext4_isize(raw_inode);
+ ei->i_disksize = inode->i_size;
++#ifdef CONFIG_QUOTA
++ ei->i_reserved_quota = 0;
++#endif
+ inode->i_generation = le32_to_cpu(raw_inode->i_generation);
+ ei->i_block_group = iloc.block_group;
+ ei->i_last_alloc_group = ~0;
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -704,6 +704,9 @@ static struct inode *ext4_alloc_inode(st
+ ei->i_allocated_meta_blocks = 0;
+ ei->i_delalloc_reserved_flag = 0;
+ spin_lock_init(&(ei->i_block_reservation_lock));
++#ifdef CONFIG_QUOTA
++ ei->i_reserved_quota = 0;
++#endif
+ INIT_LIST_HEAD(&ei->i_aio_dio_complete_list);
+ ei->cur_aio_dio = NULL;
+ ei->i_sync_tid = 0;
+@@ -1001,7 +1004,9 @@ static const struct dquot_operations ext
+ .reserve_space = dquot_reserve_space,
+ .claim_space = dquot_claim_space,
+ .release_rsv = dquot_release_reserved_space,
++#ifdef CONFIG_QUOTA
+ .get_reserved_space = ext4_get_reserved_space,
++#endif
+ .alloc_inode = dquot_alloc_inode,
+ .free_space = dquot_free_space,
+ .free_inode = dquot_free_inode,
--- /dev/null
+From d21cd8f163ac44b15c465aab7306db931c606908 Mon Sep 17 00:00:00 2001
+From: Dmitry Monakhov <dmonakhov@openvz.org>
+Date: Thu, 10 Dec 2009 03:31:45 +0000
+Subject: ext4: Fix potential quota deadlock
+
+From: Dmitry Monakhov <dmonakhov@openvz.org>
+
+commit d21cd8f163ac44b15c465aab7306db931c606908 upstream.
+
+We have to delay vfs_dq_claim_space() until allocation context destruction.
+Currently we have following call-trace:
+ext4_mb_new_blocks()
+ /* task is already holding ac->alloc_semp */
+ ->ext4_mb_mark_diskspace_used
+ ->vfs_dq_claim_space() /* acquire dqptr_sem here. Possible deadlock */
+ ->ext4_mb_release_context() /* drop ac->alloc_semp here */
+
+Let's move quota claiming to ext4_da_update_reserve_space()
+
+ =======================================================
+ [ INFO: possible circular locking dependency detected ]
+ 2.6.32-rc7 #18
+ -------------------------------------------------------
+ write-truncate-/3465 is trying to acquire lock:
+ (&s->s_dquot.dqptr_sem){++++..}, at: [<c025e73b>] dquot_claim_space+0x3b/0x1b0
+
+ but task is already holding lock:
+ (&meta_group_info[i]->alloc_sem){++++..}, at: [<c02ce962>] ext4_mb_load_buddy+0xb2/0x370
+
+ which lock already depends on the new lock.
+
+ the existing dependency chain (in reverse order) is:
+
+ -> #3 (&meta_group_info[i]->alloc_sem){++++..}:
+ [<c017d04b>] __lock_acquire+0xd7b/0x1260
+ [<c017d5ea>] lock_acquire+0xba/0xd0
+ [<c0527191>] down_read+0x51/0x90
+ [<c02ce962>] ext4_mb_load_buddy+0xb2/0x370
+ [<c02d0c1c>] ext4_mb_free_blocks+0x46c/0x870
+ [<c029c9d3>] ext4_free_blocks+0x73/0x130
+ [<c02c8cfc>] ext4_ext_truncate+0x76c/0x8d0
+ [<c02a8087>] ext4_truncate+0x187/0x5e0
+ [<c01e0f7b>] vmtruncate+0x6b/0x70
+ [<c022ec02>] inode_setattr+0x62/0x190
+ [<c02a2d7a>] ext4_setattr+0x25a/0x370
+ [<c022ee81>] notify_change+0x151/0x340
+ [<c021349d>] do_truncate+0x6d/0xa0
+ [<c0221034>] may_open+0x1d4/0x200
+ [<c022412b>] do_filp_open+0x1eb/0x910
+ [<c021244d>] do_sys_open+0x6d/0x140
+ [<c021258e>] sys_open+0x2e/0x40
+ [<c0103100>] sysenter_do_call+0x12/0x32
+
+ -> #2 (&ei->i_data_sem){++++..}:
+ [<c017d04b>] __lock_acquire+0xd7b/0x1260
+ [<c017d5ea>] lock_acquire+0xba/0xd0
+ [<c0527191>] down_read+0x51/0x90
+ [<c02a5787>] ext4_get_blocks+0x47/0x450
+ [<c02a74c1>] ext4_getblk+0x61/0x1d0
+ [<c02a7a7f>] ext4_bread+0x1f/0xa0
+ [<c02bcddc>] ext4_quota_write+0x12c/0x310
+ [<c0262d23>] qtree_write_dquot+0x93/0x120
+ [<c0261708>] v2_write_dquot+0x28/0x30
+ [<c025d3fb>] dquot_commit+0xab/0xf0
+ [<c02be977>] ext4_write_dquot+0x77/0x90
+ [<c02be9bf>] ext4_mark_dquot_dirty+0x2f/0x50
+ [<c025e321>] dquot_alloc_inode+0x101/0x180
+ [<c029fec2>] ext4_new_inode+0x602/0xf00
+ [<c02ad789>] ext4_create+0x89/0x150
+ [<c0221ff2>] vfs_create+0xa2/0xc0
+ [<c02246e7>] do_filp_open+0x7a7/0x910
+ [<c021244d>] do_sys_open+0x6d/0x140
+ [<c021258e>] sys_open+0x2e/0x40
+ [<c0103100>] sysenter_do_call+0x12/0x32
+
+ -> #1 (&sb->s_type->i_mutex_key#7/4){+.+...}:
+ [<c017d04b>] __lock_acquire+0xd7b/0x1260
+ [<c017d5ea>] lock_acquire+0xba/0xd0
+ [<c0526505>] mutex_lock_nested+0x65/0x2d0
+ [<c0260c9d>] vfs_load_quota_inode+0x4bd/0x5a0
+ [<c02610af>] vfs_quota_on_path+0x5f/0x70
+ [<c02bc812>] ext4_quota_on+0x112/0x190
+ [<c026345a>] sys_quotactl+0x44a/0x8a0
+ [<c0103100>] sysenter_do_call+0x12/0x32
+
+ -> #0 (&s->s_dquot.dqptr_sem){++++..}:
+ [<c017d361>] __lock_acquire+0x1091/0x1260
+ [<c017d5ea>] lock_acquire+0xba/0xd0
+ [<c0527191>] down_read+0x51/0x90
+ [<c025e73b>] dquot_claim_space+0x3b/0x1b0
+ [<c02cb95f>] ext4_mb_mark_diskspace_used+0x36f/0x380
+ [<c02d210a>] ext4_mb_new_blocks+0x34a/0x530
+ [<c02c83fb>] ext4_ext_get_blocks+0x122b/0x13c0
+ [<c02a5966>] ext4_get_blocks+0x226/0x450
+ [<c02a5ff3>] mpage_da_map_blocks+0xc3/0xaa0
+ [<c02a6ed6>] ext4_da_writepages+0x506/0x790
+ [<c01de272>] do_writepages+0x22/0x50
+ [<c01d766d>] __filemap_fdatawrite_range+0x6d/0x80
+ [<c01d7b9b>] filemap_flush+0x2b/0x30
+ [<c02a40ac>] ext4_alloc_da_blocks+0x5c/0x60
+ [<c029e595>] ext4_release_file+0x75/0xb0
+ [<c0216b59>] __fput+0xf9/0x210
+ [<c0216c97>] fput+0x27/0x30
+ [<c02122dc>] filp_close+0x4c/0x80
+ [<c014510e>] put_files_struct+0x6e/0xd0
+ [<c01451b7>] exit_files+0x47/0x60
+ [<c0146a24>] do_exit+0x144/0x710
+ [<c0147028>] do_group_exit+0x38/0xa0
+ [<c0159abc>] get_signal_to_deliver+0x2ac/0x410
+ [<c0102849>] do_notify_resume+0xb9/0x890
+ [<c01032d2>] work_notifysig+0x13/0x21
+
+ other info that might help us debug this:
+
+ 3 locks held by write-truncate-/3465:
+ #0: (jbd2_handle){+.+...}, at: [<c02e1f8f>] start_this_handle+0x38f/0x5c0
+ #1: (&ei->i_data_sem){++++..}, at: [<c02a57f6>] ext4_get_blocks+0xb6/0x450
+ #2: (&meta_group_info[i]->alloc_sem){++++..}, at: [<c02ce962>] ext4_mb_load_buddy+0xb2/0x370
+
+ stack backtrace:
+ Pid: 3465, comm: write-truncate- Not tainted 2.6.32-rc7 #18
+ Call Trace:
+ [<c0524cb3>] ? printk+0x1d/0x22
+ [<c017ac9a>] print_circular_bug+0xca/0xd0
+ [<c017d361>] __lock_acquire+0x1091/0x1260
+ [<c016bca2>] ? sched_clock_local+0xd2/0x170
+ [<c0178fd0>] ? trace_hardirqs_off_caller+0x20/0xd0
+ [<c017d5ea>] lock_acquire+0xba/0xd0
+ [<c025e73b>] ? dquot_claim_space+0x3b/0x1b0
+ [<c0527191>] down_read+0x51/0x90
+ [<c025e73b>] ? dquot_claim_space+0x3b/0x1b0
+ [<c025e73b>] dquot_claim_space+0x3b/0x1b0
+ [<c02cb95f>] ext4_mb_mark_diskspace_used+0x36f/0x380
+ [<c02d210a>] ext4_mb_new_blocks+0x34a/0x530
+ [<c02c601d>] ? ext4_ext_find_extent+0x25d/0x280
+ [<c02c83fb>] ext4_ext_get_blocks+0x122b/0x13c0
+ [<c016bca2>] ? sched_clock_local+0xd2/0x170
+ [<c016be60>] ? sched_clock_cpu+0x120/0x160
+ [<c016beef>] ? cpu_clock+0x4f/0x60
+ [<c0178fd0>] ? trace_hardirqs_off_caller+0x20/0xd0
+ [<c052712c>] ? down_write+0x8c/0xa0
+ [<c02a5966>] ext4_get_blocks+0x226/0x450
+ [<c016be60>] ? sched_clock_cpu+0x120/0x160
+ [<c016beef>] ? cpu_clock+0x4f/0x60
+ [<c017908b>] ? trace_hardirqs_off+0xb/0x10
+ [<c02a5ff3>] mpage_da_map_blocks+0xc3/0xaa0
+ [<c01d69cc>] ? find_get_pages_tag+0x16c/0x180
+ [<c01d6860>] ? find_get_pages_tag+0x0/0x180
+ [<c02a73bd>] ? __mpage_da_writepage+0x16d/0x1a0
+ [<c01dfc4e>] ? pagevec_lookup_tag+0x2e/0x40
+ [<c01ddf1b>] ? write_cache_pages+0xdb/0x3d0
+ [<c02a7250>] ? __mpage_da_writepage+0x0/0x1a0
+ [<c02a6ed6>] ext4_da_writepages+0x506/0x790
+ [<c016beef>] ? cpu_clock+0x4f/0x60
+ [<c016bca2>] ? sched_clock_local+0xd2/0x170
+ [<c016be60>] ? sched_clock_cpu+0x120/0x160
+ [<c016be60>] ? sched_clock_cpu+0x120/0x160
+ [<c02a69d0>] ? ext4_da_writepages+0x0/0x790
+ [<c01de272>] do_writepages+0x22/0x50
+ [<c01d766d>] __filemap_fdatawrite_range+0x6d/0x80
+ [<c01d7b9b>] filemap_flush+0x2b/0x30
+ [<c02a40ac>] ext4_alloc_da_blocks+0x5c/0x60
+ [<c029e595>] ext4_release_file+0x75/0xb0
+ [<c0216b59>] __fput+0xf9/0x210
+ [<c0216c97>] fput+0x27/0x30
+ [<c02122dc>] filp_close+0x4c/0x80
+ [<c014510e>] put_files_struct+0x6e/0xd0
+ [<c01451b7>] exit_files+0x47/0x60
+ [<c0146a24>] do_exit+0x144/0x710
+ [<c017b163>] ? lock_release_holdtime+0x33/0x210
+ [<c0528137>] ? _spin_unlock_irq+0x27/0x30
+ [<c0147028>] do_group_exit+0x38/0xa0
+ [<c017babb>] ? trace_hardirqs_on+0xb/0x10
+ [<c0159abc>] get_signal_to_deliver+0x2ac/0x410
+ [<c0102849>] do_notify_resume+0xb9/0x890
+ [<c0178fd0>] ? trace_hardirqs_off_caller+0x20/0xd0
+ [<c017b163>] ? lock_release_holdtime+0x33/0x210
+ [<c0165b50>] ? autoremove_wake_function+0x0/0x50
+ [<c017ba54>] ? trace_hardirqs_on_caller+0x134/0x190
+ [<c017babb>] ? trace_hardirqs_on+0xb/0x10
+ [<c0300ba4>] ? security_file_permission+0x14/0x20
+ [<c0215761>] ? vfs_write+0x131/0x190
+ [<c0214f50>] ? do_sync_write+0x0/0x120
+ [<c0103115>] ? sysenter_do_call+0x27/0x32
+ [<c01032d2>] work_notifysig+0x13/0x21
+
+CC: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/ext4/inode.c | 9 +++++++--
+ fs/ext4/mballoc.c | 6 ------
+ 2 files changed, 7 insertions(+), 8 deletions(-)
+
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -1088,7 +1088,7 @@ static int ext4_calc_metadata_amount(str
+ static void ext4_da_update_reserve_space(struct inode *inode, int used)
+ {
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+- int total, mdb, mdb_free;
++ int total, mdb, mdb_free, mdb_claim = 0;
+
+ spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
+ /* recalculate the number of metablocks still need to be reserved */
+@@ -1101,7 +1101,9 @@ static void ext4_da_update_reserve_space
+
+ if (mdb_free) {
+ /* Account for allocated meta_blocks */
+- mdb_free -= EXT4_I(inode)->i_allocated_meta_blocks;
++ mdb_claim = EXT4_I(inode)->i_allocated_meta_blocks;
++ BUG_ON(mdb_free < mdb_claim);
++ mdb_free -= mdb_claim;
+
+ /* update fs dirty blocks counter */
+ percpu_counter_sub(&sbi->s_dirtyblocks_counter, mdb_free);
+@@ -1112,8 +1114,11 @@ static void ext4_da_update_reserve_space
+ /* update per-inode reservations */
+ BUG_ON(used > EXT4_I(inode)->i_reserved_data_blocks);
+ EXT4_I(inode)->i_reserved_data_blocks -= used;
++ percpu_counter_sub(&sbi->s_dirtyblocks_counter, used + mdb_claim);
+ spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+
++ vfs_dq_claim_block(inode, used + mdb_claim);
++
+ /*
+ * free those over-booking quota for metadata blocks
+ */
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -2755,12 +2755,6 @@ ext4_mb_mark_diskspace_used(struct ext4_
+ if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
+ /* release all the reserved blocks if non delalloc */
+ percpu_counter_sub(&sbi->s_dirtyblocks_counter, reserv_blks);
+- else {
+- percpu_counter_sub(&sbi->s_dirtyblocks_counter,
+- ac->ac_b_ex.fe_len);
+- /* convert reserved quota blocks to real quota blocks */
+- vfs_dq_claim_block(ac->ac_inode, ac->ac_b_ex.fe_len);
+- }
+
+ if (sbi->s_log_groups_per_flex) {
+ ext4_group_t flex_group = ext4_flex_group(sbi,
--- /dev/null
+From 39bc680a8160bb9d6743f7873b535d553ff61058 Mon Sep 17 00:00:00 2001
+From: Dmitry Monakhov <dmonakhov@openvz.org>
+Date: Thu, 10 Dec 2009 16:36:27 +0000
+Subject: ext4: fix sleep inside spinlock issue with quota and dealloc (#14739)
+
+From: Dmitry Monakhov <dmonakhov@openvz.org>
+
+commit 39bc680a8160bb9d6743f7873b535d553ff61058 upstream.
+
+Unlock i_block_reservation_lock before vfs_dq_reserve_block().
+This patch fixes http://bugzilla.kernel.org/show_bug.cgi?id=14739
+
+Cc: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/ext4/inode.c | 11 +++++------
+ 1 file changed, 5 insertions(+), 6 deletions(-)
+
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -1858,19 +1858,17 @@ repeat:
+
+ md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks;
+ total = md_needed + nrblocks;
++ spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+
+ /*
+ * Make quota reservation here to prevent quota overflow
+ * later. Real quota accounting is done at pages writeout
+ * time.
+ */
+- if (vfs_dq_reserve_block(inode, total)) {
+- spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
++ if (vfs_dq_reserve_block(inode, total))
+ return -EDQUOT;
+- }
+
+ if (ext4_claim_free_blocks(sbi, total)) {
+- spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+ vfs_dq_release_reservation_block(inode, total);
+ if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
+ yield();
+@@ -1878,10 +1876,11 @@ repeat:
+ }
+ return -ENOSPC;
+ }
++ spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
+ EXT4_I(inode)->i_reserved_data_blocks += nrblocks;
+- EXT4_I(inode)->i_reserved_meta_blocks = mdblocks;
+-
++ EXT4_I(inode)->i_reserved_meta_blocks += md_needed;
+ spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
++
+ return 0; /* success */
+ }
+
--- /dev/null
+From 6d3b82f2d31f22085e5711b28dddcb9fb3d97a25 Mon Sep 17 00:00:00 2001
+From: Fang Wenqi <anton.fang@gmail.com>
+Date: Thu, 24 Dec 2009 17:51:42 -0500
+Subject: ext4: Update documentation to correct the inode_readahead_blks option name
+
+From: Fang Wenqi <anton.fang@gmail.com>
+
+commit 6d3b82f2d31f22085e5711b28dddcb9fb3d97a25 upstream.
+
+Per commit 240799cd, the option name for readahead should be
+inode_readahead_blks, not inode_readahead.
+
+Signed-off-by: Fang Wenqi <antonf@turbolinux.com.cn>
+Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ Documentation/filesystems/ext4.txt | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/Documentation/filesystems/ext4.txt
++++ b/Documentation/filesystems/ext4.txt
+@@ -196,7 +196,7 @@ nobarrier This also requires an IO stac
+ also be used to enable or disable barriers, for
+ consistency with other ext4 mount options.
+
+-inode_readahead=n This tuning parameter controls the maximum
++inode_readahead_blks=n This tuning parameter controls the maximum
+ number of inode table blocks that ext4's inode
+ table readahead algorithm will pre-read into
+ the buffer cache. The default value is 32 blocks.
--- /dev/null
+From 7ea6600148c265b1fd53e521022b1d7aec81d974 Mon Sep 17 00:00:00 2001
+From: Serge E. Hallyn <serue@us.ibm.com>
+Date: Tue, 29 Dec 2009 14:50:19 -0600
+Subject: generic_permission: MAY_OPEN is not write access
+
+From: Serge E. Hallyn <serue@us.ibm.com>
+
+commit 7ea6600148c265b1fd53e521022b1d7aec81d974 upstream.
+
+generic_permission was refusing CAP_DAC_READ_SEARCH-enabled
+processes from opening DAC-protected files read-only, because
+do_filp_open adds MAY_OPEN to the open mask.
+
+Ignore MAY_OPEN. After this patch, CAP_DAC_READ_SEARCH is
+again sufficient to open(fname, O_RDONLY) on a file to which
+DAC otherwise refuses us read permission.
+
+Reported-by: Mike Kazantsev <mk.fraggod@gmail.com>
+Signed-off-by: Serge E. Hallyn <serue@us.ibm.com>
+Tested-by: Mike Kazantsev <mk.fraggod@gmail.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/namei.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -234,6 +234,7 @@ int generic_permission(struct inode *ino
+ /*
+ * Searching includes executable on directories, else just read.
+ */
++ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
+ if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
+ if (capable(CAP_DAC_READ_SEARCH))
+ return 0;
--- /dev/null
+From 4235f684b66d6f00d2cd8849c884cf8f8b57ecad Mon Sep 17 00:00:00 2001
+From: Jonathan Cameron <jic23@cam.ac.uk>
+Date: Wed, 16 Dec 2009 21:38:28 +0100
+Subject: hwmon: (sht15) Off-by-one error in array index + incorrect constants
+
+From: Jonathan Cameron <jic23@cam.ac.uk>
+
+commit 4235f684b66d6f00d2cd8849c884cf8f8b57ecad upstream.
+
+Fix an off-by-one error in array index + incorrect constants.
+
+Signed-off-by: Christoph Walser <walser@tik.ee.ethz.ch>
+Signed-off-by: Jonathan Cameron <jic23@cam.ac.uk>
+Signed-off-by: Jean Delvare <khali@linux-fr.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/hwmon/sht15.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/hwmon/sht15.c
++++ b/drivers/hwmon/sht15.c
+@@ -305,7 +305,7 @@ static inline int sht15_calc_temp(struct
+ int d1 = 0;
+ int i;
+
+- for (i = 1; i < ARRAY_SIZE(temppoints) - 1; i++)
++ for (i = 1; i < ARRAY_SIZE(temppoints); i++)
+ /* Find pointer to interpolate */
+ if (data->supply_uV > temppoints[i - 1].vdd) {
+ d1 = (data->supply_uV/1000 - temppoints[i - 1].vdd)
+@@ -332,12 +332,12 @@ static inline int sht15_calc_humid(struc
+
+ const int c1 = -4;
+ const int c2 = 40500; /* x 10 ^ -6 */
+- const int c3 = 2800; /* x10 ^ -9 */
++ const int c3 = -2800; /* x10 ^ -9 */
+
+ RHlinear = c1*1000
+ + c2 * data->val_humid/1000
+ + (data->val_humid * data->val_humid * c3)/1000000;
+- return (temp - 25000) * (10000 + 800 * data->val_humid)
++ return (temp - 25000) * (10000 + 80 * data->val_humid)
+ / 1000000 + RHlinear;
+ }
+
--- /dev/null
+From a6d52d70677e99bdb89b6921c265d0a58c22e597 Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Sat, 19 Dec 2009 15:36:02 -0700
+Subject: ioat2,3: put channel hardware in known state at init
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+commit a6d52d70677e99bdb89b6921c265d0a58c22e597 upstream.
+
+Put the ioat2 and ioat3 state machines in the halted state with all
+errors cleared.
+
+The ioat1 init path is not disturbed for stability, there are no
+reported ioat1 initiaization issues.
+
+Reported-by: Roland Dreier <rdreier@cisco.com>
+Tested-by: Roland Dreier <rdreier@cisco.com>
+Acked-by: Simon Horman <horms@verge.net.au>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/dma/ioat/dma.c | 2 -
+ drivers/dma/ioat/dma.h | 18 +++++++++++
+ drivers/dma/ioat/dma_v2.c | 69 ++++++++++++++++++++++++++++++++-----------
+ drivers/dma/ioat/dma_v2.h | 2 +
+ drivers/dma/ioat/dma_v3.c | 54 ++++++++++++++++++++++++---------
+ drivers/dma/ioat/registers.h | 1
+ 6 files changed, 114 insertions(+), 32 deletions(-)
+
+--- a/drivers/dma/ioat/dma.c
++++ b/drivers/dma/ioat/dma.c
+@@ -1032,7 +1032,7 @@ int __devinit ioat_probe(struct ioatdma_
+ dma->dev = &pdev->dev;
+
+ if (!dma->chancnt) {
+- dev_err(dev, "zero channels detected\n");
++ dev_err(dev, "channel enumeration error\n");
+ goto err_setup_interrupts;
+ }
+
+--- a/drivers/dma/ioat/dma.h
++++ b/drivers/dma/ioat/dma.h
+@@ -60,6 +60,7 @@
+ * @dca: direct cache access context
+ * @intr_quirk: interrupt setup quirk (for ioat_v1 devices)
+ * @enumerate_channels: hw version specific channel enumeration
++ * @reset_hw: hw version specific channel (re)initialization
+ * @cleanup_tasklet: select between the v2 and v3 cleanup routines
+ * @timer_fn: select between the v2 and v3 timer watchdog routines
+ * @self_test: hardware version specific self test for each supported op type
+@@ -78,6 +79,7 @@ struct ioatdma_device {
+ struct dca_provider *dca;
+ void (*intr_quirk)(struct ioatdma_device *device);
+ int (*enumerate_channels)(struct ioatdma_device *device);
++ int (*reset_hw)(struct ioat_chan_common *chan);
+ void (*cleanup_tasklet)(unsigned long data);
+ void (*timer_fn)(unsigned long data);
+ int (*self_test)(struct ioatdma_device *device);
+@@ -264,6 +266,22 @@ static inline void ioat_suspend(struct i
+ writeb(IOAT_CHANCMD_SUSPEND, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
+ }
+
++static inline void ioat_reset(struct ioat_chan_common *chan)
++{
++ u8 ver = chan->device->version;
++
++ writeb(IOAT_CHANCMD_RESET, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
++}
++
++static inline bool ioat_reset_pending(struct ioat_chan_common *chan)
++{
++ u8 ver = chan->device->version;
++ u8 cmd;
++
++ cmd = readb(chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
++ return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET;
++}
++
+ static inline void ioat_set_chainaddr(struct ioat_dma_chan *ioat, u64 addr)
+ {
+ struct ioat_chan_common *chan = &ioat->base;
+--- a/drivers/dma/ioat/dma_v2.c
++++ b/drivers/dma/ioat/dma_v2.c
+@@ -239,20 +239,50 @@ void __ioat2_restart_chan(struct ioat2_d
+ __ioat2_start_null_desc(ioat);
+ }
+
+-static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
++int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo)
+ {
+- struct ioat_chan_common *chan = &ioat->base;
+- unsigned long phys_complete;
++ unsigned long end = jiffies + tmo;
++ int err = 0;
+ u32 status;
+
+ status = ioat_chansts(chan);
+ if (is_ioat_active(status) || is_ioat_idle(status))
+ ioat_suspend(chan);
+ while (is_ioat_active(status) || is_ioat_idle(status)) {
++ if (end && time_after(jiffies, end)) {
++ err = -ETIMEDOUT;
++ break;
++ }
+ status = ioat_chansts(chan);
+ cpu_relax();
+ }
+
++ return err;
++}
++
++int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo)
++{
++ unsigned long end = jiffies + tmo;
++ int err = 0;
++
++ ioat_reset(chan);
++ while (ioat_reset_pending(chan)) {
++ if (end && time_after(jiffies, end)) {
++ err = -ETIMEDOUT;
++ break;
++ }
++ cpu_relax();
++ }
++
++ return err;
++}
++
++static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
++{
++ struct ioat_chan_common *chan = &ioat->base;
++ unsigned long phys_complete;
++
++ ioat2_quiesce(chan, 0);
+ if (ioat_cleanup_preamble(chan, &phys_complete))
+ __cleanup(ioat, phys_complete);
+
+@@ -318,6 +348,19 @@ void ioat2_timer_event(unsigned long dat
+ spin_unlock_bh(&chan->cleanup_lock);
+ }
+
++static int ioat2_reset_hw(struct ioat_chan_common *chan)
++{
++ /* throw away whatever the channel was doing and get it initialized */
++ u32 chanerr;
++
++ ioat2_quiesce(chan, msecs_to_jiffies(100));
++
++ chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
++ writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
++
++ return ioat2_reset_sync(chan, msecs_to_jiffies(200));
++}
++
+ /**
+ * ioat2_enumerate_channels - find and initialize the device's channels
+ * @device: the device to be enumerated
+@@ -360,6 +403,10 @@ int ioat2_enumerate_channels(struct ioat
+ (unsigned long) ioat);
+ ioat->xfercap_log = xfercap_log;
+ spin_lock_init(&ioat->ring_lock);
++ if (device->reset_hw(&ioat->base)) {
++ i = 0;
++ break;
++ }
+ }
+ dma->chancnt = i;
+ return i;
+@@ -467,7 +514,6 @@ int ioat2_alloc_chan_resources(struct dm
+ struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
+ struct ioat_chan_common *chan = &ioat->base;
+ struct ioat_ring_ent **ring;
+- u32 chanerr;
+ int order;
+
+ /* have we already been set up? */
+@@ -477,12 +523,6 @@ int ioat2_alloc_chan_resources(struct dm
+ /* Setup register to interrupt and write completion status on error */
+ writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
+
+- chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+- if (chanerr) {
+- dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
+- writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
+- }
+-
+ /* allocate a completion writeback area */
+ /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
+ chan->completion = pci_pool_alloc(chan->device->completion_pool,
+@@ -746,13 +786,7 @@ void ioat2_free_chan_resources(struct dm
+ tasklet_disable(&chan->cleanup_task);
+ del_timer_sync(&chan->timer);
+ device->cleanup_tasklet((unsigned long) ioat);
+-
+- /* Delay 100ms after reset to allow internal DMA logic to quiesce
+- * before removing DMA descriptor resources.
+- */
+- writeb(IOAT_CHANCMD_RESET,
+- chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
+- mdelay(100);
++ device->reset_hw(chan);
+
+ spin_lock_bh(&ioat->ring_lock);
+ descs = ioat2_ring_space(ioat);
+@@ -839,6 +873,7 @@ int __devinit ioat2_dma_probe(struct ioa
+ int err;
+
+ device->enumerate_channels = ioat2_enumerate_channels;
++ device->reset_hw = ioat2_reset_hw;
+ device->cleanup_tasklet = ioat2_cleanup_tasklet;
+ device->timer_fn = ioat2_timer_event;
+ device->self_test = ioat_dma_self_test;
+--- a/drivers/dma/ioat/dma_v2.h
++++ b/drivers/dma/ioat/dma_v2.h
+@@ -185,6 +185,8 @@ bool reshape_ring(struct ioat2_dma_chan
+ void __ioat2_issue_pending(struct ioat2_dma_chan *ioat);
+ void ioat2_cleanup_tasklet(unsigned long data);
+ void ioat2_timer_event(unsigned long data);
++int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo);
++int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo);
+ extern struct kobj_type ioat2_ktype;
+ extern struct kmem_cache *ioat2_cache;
+ #endif /* IOATDMA_V2_H */
+--- a/drivers/dma/ioat/dma_v3.c
++++ b/drivers/dma/ioat/dma_v3.c
+@@ -1130,6 +1130,45 @@ static int __devinit ioat3_dma_self_test
+ return 0;
+ }
+
++static int ioat3_reset_hw(struct ioat_chan_common *chan)
++{
++ /* throw away whatever the channel was doing and get it
++ * initialized, with ioat3 specific workarounds
++ */
++ struct ioatdma_device *device = chan->device;
++ struct pci_dev *pdev = device->pdev;
++ u32 chanerr;
++ u16 dev_id;
++ int err;
++
++ ioat2_quiesce(chan, msecs_to_jiffies(100));
++
++ chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
++ writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
++
++ /* -= IOAT ver.3 workarounds =- */
++ /* Write CHANERRMSK_INT with 3E07h to mask out the errors
++ * that can cause stability issues for IOAT ver.3, and clear any
++ * pending errors
++ */
++ pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
++ err = pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
++ if (err) {
++ dev_err(&pdev->dev, "channel error register unreachable\n");
++ return err;
++ }
++ pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
++
++ /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
++ * (workaround for spurious config parity error after restart)
++ */
++ pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
++ if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
++ pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
++
++ return ioat2_reset_sync(chan, msecs_to_jiffies(200));
++}
++
+ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
+ {
+ struct pci_dev *pdev = device->pdev;
+@@ -1139,10 +1178,10 @@ int __devinit ioat3_dma_probe(struct ioa
+ struct ioat_chan_common *chan;
+ bool is_raid_device = false;
+ int err;
+- u16 dev_id;
+ u32 cap;
+
+ device->enumerate_channels = ioat2_enumerate_channels;
++ device->reset_hw = ioat3_reset_hw;
+ device->self_test = ioat3_dma_self_test;
+ dma = &device->common;
+ dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
+@@ -1218,19 +1257,6 @@ int __devinit ioat3_dma_probe(struct ioa
+ dma->device_prep_dma_xor_val = NULL;
+ #endif
+
+- /* -= IOAT ver.3 workarounds =- */
+- /* Write CHANERRMSK_INT with 3E07h to mask out the errors
+- * that can cause stability issues for IOAT ver.3
+- */
+- pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
+-
+- /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
+- * (workaround for spurious config parity error after restart)
+- */
+- pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
+- if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
+- pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
+-
+ err = ioat_probe(device);
+ if (err)
+ return err;
+--- a/drivers/dma/ioat/registers.h
++++ b/drivers/dma/ioat/registers.h
+@@ -27,6 +27,7 @@
+
+ #define IOAT_PCI_DEVICE_ID_OFFSET 0x02
+ #define IOAT_PCI_DMAUNCERRSTS_OFFSET 0x148
++#define IOAT_PCI_CHANERR_INT_OFFSET 0x180
+ #define IOAT_PCI_CHANERRMASK_INT_OFFSET 0x184
+
+ /* MMIO Device Registers */
--- /dev/null
+From cd78809f6191485a90ea6c92c2b58900ab5c156f Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Thu, 17 Dec 2009 13:52:39 -0700
+Subject: ioat3: fix p-disabled q-continuation
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+commit cd78809f6191485a90ea6c92c2b58900ab5c156f upstream.
+
+When continuing a pq calculation the driver needs 3 extra sources. The
+driver can perform a 3 source calculation with a single descriptor, but
+needs an extended descriptor to process up to 8 sources in one
+operation. However, in the p-disabled case only one extra source is
+needed. When continuing a p-disabled operation there are occasions
+(i.e. 0 < src_cnt % 8 < 3) where the tail operation does not need an
+extended descriptor. Properly account for this fact otherwise invalid
+'dmacount' values will be written to hardware usually causing the
+channel to halt with 'invalid descriptor' errors.
+
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/dma/ioat/dma_v3.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/dma/ioat/dma_v3.c
++++ b/drivers/dma/ioat/dma_v3.c
+@@ -650,9 +650,11 @@ __ioat3_prep_pq_lock(struct dma_chan *c,
+
+ num_descs = ioat2_xferlen_to_descs(ioat, len);
+ /* we need 2x the number of descriptors to cover greater than 3
+- * sources
++ * sources (we need 1 extra source in the q-only continuation
++ * case and 3 extra sources in the p+q continuation case.
+ */
+- if (src_cnt > 3 || flags & DMA_PREP_CONTINUE) {
++ if (src_cnt + dmaf_p_disabled_continue(flags) > 3 ||
++ (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) {
+ with_ext = 1;
+ num_descs *= 2;
+ } else
--- /dev/null
+From 0b5ccb2ee250136dd7385b1c7da28417d0d4d32d Mon Sep 17 00:00:00 2001
+From: Patrick McHardy <kaber@trash.net>
+Date: Tue, 15 Dec 2009 16:59:18 +0100
+Subject: ipv6: reassembly: use seperate reassembly queues for conntrack and local delivery
+
+From: Patrick McHardy <kaber@trash.net>
+
+commit 0b5ccb2ee250136dd7385b1c7da28417d0d4d32d upstream.
+
+Currently the same reassembly queue might be used for packets reassembled
+by conntrack in different positions in the stack (PREROUTING/LOCAL_OUT),
+as well as local delivery. This can cause "packet jumps" when the fragment
+completing a reassembled packet is queued from a different position in the
+stack than the previous ones.
+
+Add a "user" identifier to the reassembly queue key to seperate the queues
+of each caller, similar to what we do for IPv4.
+
+Signed-off-by: Patrick McHardy <kaber@trash.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/net/ipv6.h | 7 +++++++
+ include/net/netfilter/ipv6/nf_conntrack_ipv6.h | 2 +-
+ net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c | 13 +++++++++++--
+ net/ipv6/netfilter/nf_conntrack_reasm.c | 7 ++++---
+ net/ipv6/reassembly.c | 5 ++++-
+ 5 files changed, 27 insertions(+), 7 deletions(-)
+
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -354,8 +354,15 @@ static inline int ipv6_prefix_equal(cons
+
+ struct inet_frag_queue;
+
++enum ip6_defrag_users {
++ IP6_DEFRAG_LOCAL_DELIVER,
++ IP6_DEFRAG_CONNTRACK_IN,
++ IP6_DEFRAG_CONNTRACK_OUT,
++};
++
+ struct ip6_create_arg {
+ __be32 id;
++ u32 user;
+ struct in6_addr *src;
+ struct in6_addr *dst;
+ };
+--- a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
++++ b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
+@@ -9,7 +9,7 @@ extern struct nf_conntrack_l4proto nf_co
+
+ extern int nf_ct_frag6_init(void);
+ extern void nf_ct_frag6_cleanup(void);
+-extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb);
++extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user);
+ extern void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
+ struct net_device *in,
+ struct net_device *out,
+--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
++++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+@@ -187,6 +187,16 @@ out:
+ return nf_conntrack_confirm(skb);
+ }
+
++static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
++ struct sk_buff *skb)
++{
++ if (hooknum == NF_INET_PRE_ROUTING)
++ return IP6_DEFRAG_CONNTRACK_IN;
++ else
++ return IP6_DEFRAG_CONNTRACK_OUT;
++
++}
++
+ static unsigned int ipv6_defrag(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+@@ -199,8 +209,7 @@ static unsigned int ipv6_defrag(unsigned
+ if (skb->nfct)
+ return NF_ACCEPT;
+
+- reasm = nf_ct_frag6_gather(skb);
+-
++ reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb));
+ /* queued */
+ if (reasm == NULL)
+ return NF_STOLEN;
+--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
+@@ -170,13 +170,14 @@ out:
+ /* Creation primitives. */
+
+ static __inline__ struct nf_ct_frag6_queue *
+-fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst)
++fq_find(__be32 id, u32 user, struct in6_addr *src, struct in6_addr *dst)
+ {
+ struct inet_frag_queue *q;
+ struct ip6_create_arg arg;
+ unsigned int hash;
+
+ arg.id = id;
++ arg.user = user;
+ arg.src = src;
+ arg.dst = dst;
+
+@@ -561,7 +562,7 @@ find_prev_fhdr(struct sk_buff *skb, u8 *
+ return 0;
+ }
+
+-struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
++struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
+ {
+ struct sk_buff *clone;
+ struct net_device *dev = skb->dev;
+@@ -607,7 +608,7 @@ struct sk_buff *nf_ct_frag6_gather(struc
+ if (atomic_read(&nf_init_frags.mem) > nf_init_frags.high_thresh)
+ nf_ct_frag6_evictor();
+
+- fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr);
++ fq = fq_find(fhdr->identification, user, &hdr->saddr, &hdr->daddr);
+ if (fq == NULL) {
+ pr_debug("Can't find and can't create new queue\n");
+ goto ret_orig;
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -72,6 +72,7 @@ struct frag_queue
+ struct inet_frag_queue q;
+
+ __be32 id; /* fragment id */
++ u32 user;
+ struct in6_addr saddr;
+ struct in6_addr daddr;
+
+@@ -141,7 +142,7 @@ int ip6_frag_match(struct inet_frag_queu
+ struct ip6_create_arg *arg = a;
+
+ fq = container_of(q, struct frag_queue, q);
+- return (fq->id == arg->id &&
++ return (fq->id == arg->id && fq->user == arg->user &&
+ ipv6_addr_equal(&fq->saddr, arg->src) &&
+ ipv6_addr_equal(&fq->daddr, arg->dst));
+ }
+@@ -163,6 +164,7 @@ void ip6_frag_init(struct inet_frag_queu
+ struct ip6_create_arg *arg = a;
+
+ fq->id = arg->id;
++ fq->user = arg->user;
+ ipv6_addr_copy(&fq->saddr, arg->src);
+ ipv6_addr_copy(&fq->daddr, arg->dst);
+ }
+@@ -244,6 +246,7 @@ fq_find(struct net *net, __be32 id, stru
+ unsigned int hash;
+
+ arg.id = id;
++ arg.user = IP6_DEFRAG_LOCAL_DELIVER;
+ arg.src = src;
+ arg.dst = dst;
+
--- /dev/null
+From bc45a67079c916a9bd0a95b0b879cc0f259bac6e Mon Sep 17 00:00:00 2001
+From: Reinette Chatre <reinette.chatre@intel.com>
+Date: Mon, 14 Dec 2009 14:12:10 -0800
+Subject: iwl3945: disable power save
+
+From: Reinette Chatre <reinette.chatre@intel.com>
+
+commit bc45a67079c916a9bd0a95b0b879cc0f259bac6e upstream.
+
+we see from http://bugzilla.intellinuxwireless.org/show_bug.cgi?id=2125
+that power saving does not work well on 3945. Since then power saving has
+also been connected with association problems where an AP deathenticates a
+3945 after it is unable to transmit data to it - this happens when 3945
+enters power savings mode.
+
+Disable power save support until issues are resolved.
+
+Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/wireless/iwlwifi/iwl-3945.c | 2 ++
+ drivers/net/wireless/iwlwifi/iwl3945-base.c | 8 +++++---
+ 2 files changed, 7 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
++++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
+@@ -3854,9 +3854,11 @@ static int iwl3945_setup_mac(struct iwl_
+ /* Tell mac80211 our characteristics */
+ hw->flags = IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_NOISE_DBM |
+- IEEE80211_HW_SPECTRUM_MGMT |
+- IEEE80211_HW_SUPPORTS_PS |
+- IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
++ IEEE80211_HW_SPECTRUM_MGMT;
++
++ if (!priv->cfg->broken_powersave)
++ hw->flags |= IEEE80211_HW_SUPPORTS_PS |
++ IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
+
+ hw->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION) |
+--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
++++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
+@@ -2895,6 +2895,7 @@ static struct iwl_cfg iwl3945_bg_cfg = {
+ .mod_params = &iwl3945_mod_params,
+ .use_isr_legacy = true,
+ .ht_greenfield_support = false,
++ .broken_powersave = true,
+ };
+
+ static struct iwl_cfg iwl3945_abg_cfg = {
+@@ -2909,6 +2910,7 @@ static struct iwl_cfg iwl3945_abg_cfg =
+ .mod_params = &iwl3945_mod_params,
+ .use_isr_legacy = true,
+ .ht_greenfield_support = false,
++ .broken_powersave = true,
+ };
+
+ struct pci_device_id iwl3945_hw_card_ids[] = {
--- /dev/null
+From dc57a303faab8562b92e85df0d79c4a05d7e2a61 Mon Sep 17 00:00:00 2001
+From: Zhu Yi <yi.zhu@intel.com>
+Date: Mon, 14 Dec 2009 14:12:12 -0800
+Subject: iwl3945: fix panic in iwl3945 driver
+
+From: Zhu Yi <yi.zhu@intel.com>
+
+commit dc57a303faab8562b92e85df0d79c4a05d7e2a61 upstream.
+
+3945 updated write_ptr without regard to read_ptr on the Tx path.
+This messes up our TFD on high load and result in the following:
+
+<1>[ 7290.414172] IP: [<ffffffffa0dd53a1>] iwl3945_rx_reply_tx+0xc1/0x450 [iwl3945]
+<4>[ 7290.414205] PGD 0
+<1>[ 7290.414214] Thread overran stack, or stack corrupted
+<0>[ 7290.414229] Oops: 0002 [#1] PREEMPT SMP
+<0>[ 7290.414246] last sysfs file: /sys/devices/platform/coretemp.1/temp1_input
+<4>[ 7290.414265] CPU 0
+<4>[ 7290.414274] Modules linked in: af_packet nfsd usb_storage usb_libusual cpufreq_powersave exportfs cpufreq_conservative iwl3945 nfs cpufreq_userspace snd_hda_codec_realtek acpi_cpufreq uvcvideo lockd iwlcore snd_hda_intel joydev coretemp nfs_acl videodev snd_hda_codec mac80211 v4l1_compat snd_hwdep sbp2 v4l2_compat_ioctl32 uhci_hcd psmouse auth_rpcgss ohci1394 cfg80211 ehci_hcd video ieee1394 snd_pcm serio_raw battery ac nvidia(P) usbcore output sunrpc evdev lirc_ene0100 snd_page_alloc rfkill tg3 libphy fuse lzo lzo_decompress lzo_compress
+<6>[ 7290.414486] Pid: 0, comm: swapper Tainted: P 2.6.32-rc8-wl #213 Aspire 5720
+<6>[ 7290.414507] RIP: 0010:[<ffffffffa0dd53a1>] [<ffffffffa0dd53a1>] iwl3945_rx_reply_tx+0xc1/0x450 [iwl3945]
+<6>[ 7290.414541] RSP: 0018:ffff880002203d60 EFLAGS: 00010246
+<6>[ 7290.414557] RAX: 000000000000004f RBX: ffff880064c11600 RCX: 0000000000000013
+<6>[ 7290.414576] RDX: ffffffffa0ddcf20 RSI: ffff8800512b7008 RDI: 0000000000000038
+<6>[ 7290.414596] RBP: ffff880002203dd0 R08: 0000000000000000 R09: 0000000000000100
+<6>[ 7290.414616] R10: 0000000000000001 R11: 0000000000000000 R12: 00000000000000a0
+<6>[ 7290.414635] R13: 0000000000000002 R14: 0000000000000013 R15: 0000000000020201
+<6>[ 7290.414655] FS: 0000000000000000(0000) GS:ffff880002200000(0000) knlGS:0000000000000000
+<6>[ 7290.414677] CS: 0010 DS: 0018 ES: 0018 CR0: 000000008005003b
+<6>[ 7290.414693] CR2: 0000000000000041 CR3: 0000000001001000 CR4: 00000000000006f0
+<6>[ 7290.414712] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+<6>[ 7290.414732] DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
+<4>[ 7290.414752] Process swapper (pid: 0, threadinfo ffffffff81524000, task ffffffff81528b60)
+<0>[ 7290.414772] Stack:
+<4>[ 7290.414780] ffff880002203da0 0000000000000046 0000000000000000 0000000000000046
+<4>[ 7290.414804] <0> 0000000000000282 0000000000000282 0000000000000282 ffff880064c12010
+<4>[ 7290.414830] <0> ffff880002203db0 ffff880064c11600 ffff880064c12e50 ffff8800512b7000
+<0>[ 7290.414858] Call Trace:
+<0>[ 7290.414867] <IRQ>
+<4>[ 7290.414884] [<ffffffffa0dc8c47>] iwl3945_irq_tasklet+0x657/0x1740 [iwl3945]
+<4>[ 7290.414910] [<ffffffff8138fc60>] ? _spin_unlock+0x30/0x60
+<4>[ 7290.414931] [<ffffffff81049a21>] tasklet_action+0x101/0x110
+<4>[ 7290.414950] [<ffffffff8104a3d0>] __do_softirq+0xc0/0x160
+<4>[ 7290.414968] [<ffffffff8100d01c>] call_softirq+0x1c/0x30
+<4>[ 7290.414986] [<ffffffff8100eff5>] do_softirq+0x75/0xb0
+<4>[ 7290.415003] [<ffffffff81049ee5>] irq_exit+0x95/0xa0
+<4>[ 7290.415020] [<ffffffff8100e547>] do_IRQ+0x77/0xf0
+<4>[ 7290.415038] [<ffffffff8100c7d3>] ret_from_intr+0x0/0xf
+<0>[ 7290.415052] <EOI>
+<4>[ 7290.415067] [<ffffffff81234efa>] ? acpi_idle_enter_bm+0x270/0x2a5
+<4>[ 7290.415087] [<ffffffff81234f04>] ? acpi_idle_enter_bm+0x27a/0x2a5
+<4>[ 7290.415107] [<ffffffff81234efa>] ? acpi_idle_enter_bm+0x270/0x2a5
+<4>[ 7290.415130] [<ffffffff812c11f3>] ? cpuidle_idle_call+0x93/0xf0
+<4>[ 7290.415149] [<ffffffff8100b0d7>] ? cpu_idle+0xa7/0x110
+<4>[ 7290.415168] [<ffffffff8137b3d5>] ? rest_init+0x75/0x80
+<4>[ 7290.415187] [<ffffffff8158cd0a>] ? start_kernel+0x3a7/0x3b3
+<4>[ 7290.415206] [<ffffffff8158c315>] ? x86_64_start_reservations+0x125/0x129
+<4>[ 7290.415227] [<ffffffff8158c3fd>] ? x86_64_start_kernel+0xe4/0xeb
+<0>[ 7290.415243] Code: 00 41 39 ce 0f 8d e8 01 00 00 48 8b 47 40 48 63 d2 48 69 d2 98 00 00 00 4c 8b 04 02 48 c7 c2 20 cf dd a0 49 8d 78 38 49 8d 40 4f <c6> 47 09 00 c6 47 0c 00 c6 47 0f 00 c6 47 12 00 c6 47 15 00 49
+<1>[ 7290.415382] RIP [<ffffffffa0dd53a1>] iwl3945_rx_reply_tx+0xc1/0x450 [iwl3945]
+<4>[ 7290.415410] RSP <ffff880002203d60>
+<0>[ 7290.415421] CR2: 0000000000000041
+<4>[ 7290.415436] ---[ end trace ec46807277caa515 ]---
+<0>[ 7290.415450] Kernel panic - not syncing: Fatal exception in interrupt
+<4>[ 7290.415468] Pid: 0, comm: swapper Tainted: P D 2.6.32-rc8-wl #213
+<4>[ 7290.415486] Call Trace:
+<4>[ 7290.415495] <IRQ> [<ffffffff8138c040>] panic+0x7d/0x13a
+<4>[ 7290.415519] [<ffffffff8101071a>] oops_end+0xda/0xe0
+<4>[ 7290.415538] [<ffffffff8102e1ea>] no_context+0xea/0x250
+<4>[ 7290.415557] [<ffffffff81038991>] ? select_task_rq_fair+0x511/0x780
+<4>[ 7290.415578] [<ffffffff8102e475>] __bad_area_nosemaphore+0x125/0x1e0
+<4>[ 7290.415597] [<ffffffff81038d0c>] ? __enqueue_entity+0x7c/0x80
+<4>[ 7290.415616] [<ffffffff81039201>] ? enqueue_task_fair+0x111/0x150
+<4>[ 7290.415636] [<ffffffff8102e53e>] bad_area_nosemaphore+0xe/0x10
+<4>[ 7290.415656] [<ffffffff8102e8fa>] do_page_fault+0x26a/0x320
+<4>[ 7290.415674] [<ffffffff813905df>] page_fault+0x1f/0x30
+<4>[ 7290.415697] [<ffffffffa0dd53a1>] ? iwl3945_rx_reply_tx+0xc1/0x450 [iwl3945]
+<4>[ 7290.415723] [<ffffffffa0dc8c47>] iwl3945_irq_tasklet+0x657/0x1740 [iwl3945]
+<4>[ 7290.415746] [<ffffffff8138fc60>] ? _spin_unlock+0x30/0x60
+<4>[ 7290.415764] [<ffffffff81049a21>] tasklet_action+0x101/0x110
+<4>[ 7290.415783] [<ffffffff8104a3d0>] __do_softirq+0xc0/0x160
+<4>[ 7290.415801] [<ffffffff8100d01c>] call_softirq+0x1c/0x30
+<4>[ 7290.415818] [<ffffffff8100eff5>] do_softirq+0x75/0xb0
+<4>[ 7290.415835] [<ffffffff81049ee5>] irq_exit+0x95/0xa0
+<4>[ 7290.415852] [<ffffffff8100e547>] do_IRQ+0x77/0xf0
+<4>[ 7290.415869] [<ffffffff8100c7d3>] ret_from_intr+0x0/0xf
+<4>[ 7290.415883] <EOI> [<ffffffff81234efa>] ? acpi_idle_enter_bm+0x270/0x2a5
+<4>[ 7290.415911] [<ffffffff81234f04>] ? acpi_idle_enter_bm+0x27a/0x2a5
+<4>[ 7290.415931] [<ffffffff81234efa>] ? acpi_idle_enter_bm+0x270/0x2a5
+<4>[ 7290.415952] [<ffffffff812c11f3>] ? cpuidle_idle_call+0x93/0xf0
+<4>[ 7290.415971] [<ffffffff8100b0d7>] ? cpu_idle+0xa7/0x110
+<4>[ 7290.415989] [<ffffffff8137b3d5>] ? rest_init+0x75/0x80
+<4>[ 7290.416007] [<ffffffff8158cd0a>] ? start_kernel+0x3a7/0x3b3
+<4>[ 7290.416026] [<ffffffff8158c315>] ? x86_64_start_reservations+0x125/0x129
+<4>[ 7290.416047] [<ffffffff8158c3fd>] ? x86_64_start_kernel+0xe4/0xeb
+
+Reported-by: Maxim Levitsky <maximlevitsky@gmail.com>
+Tested-by: Maxim Levitsky <maximlevitsky@gmail.com>
+Signed-off-by: Zhu Yi <yi.zhu@intel.com>
+Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/wireless/iwlwifi/iwl3945-base.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
++++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
+@@ -562,6 +562,9 @@ static int iwl3945_tx_skb(struct iwl_pri
+ txq = &priv->txq[txq_id];
+ q = &txq->q;
+
++ if ((iwl_queue_space(q) < q->high_mark))
++ goto drop;
++
+ spin_lock_irqsave(&priv->lock, flags);
+
+ idx = get_cmd_index(q, q->write_ptr, 0);
--- /dev/null
+From 6c3069b1e7e983e176a5f826e2edffefdd404a08 Mon Sep 17 00:00:00 2001
+From: Reinette Chatre <reinette.chatre@intel.com>
+Date: Mon, 14 Dec 2009 14:12:13 -0800
+Subject: iwlwifi: fix 40MHz operation setting on cards that do not allow it
+
+From: Reinette Chatre <reinette.chatre@intel.com>
+
+commit 6c3069b1e7e983e176a5f826e2edffefdd404a08 upstream.
+
+Some devices have 40MHz operation disabled entirely. Ensure that driver do
+not enable 40MHz operation if a channel does not allow this.
+
+This fixes http://bugzilla.intellinuxwireless.org/show_bug.cgi?id=2135
+
+Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/wireless/iwlwifi/iwl-eeprom.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
++++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+@@ -711,7 +711,8 @@ static int iwl_mod_ht40_chan_info(struct
+ ch_info->ht40_min_power = 0;
+ ch_info->ht40_scan_power = eeprom_ch->max_power_avg;
+ ch_info->ht40_flags = eeprom_ch->flags;
+- ch_info->ht40_extension_channel &= ~clear_ht40_extension_channel;
++ if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
++ ch_info->ht40_extension_channel &= ~clear_ht40_extension_channel;
+
+ return 0;
+ }
--- /dev/null
+From af6b8ee38833b39f70946f767740565ceb126961 Mon Sep 17 00:00:00 2001
+From: Johannes Berg <johannes@sipsolutions.net>
+Date: Mon, 14 Dec 2009 14:12:08 -0800
+Subject: iwlwifi: fix EEPROM/OTP reading endian annotations and a bug
+
+From: Johannes Berg <johannes@sipsolutions.net>
+
+commit af6b8ee38833b39f70946f767740565ceb126961 upstream.
+
+The construct "le16_to_cpu((__force __le16)(r >> 16))" has
+always bothered me when looking through the iwlwifi code,
+it shouldn't be necessary to __force anything, and before
+this code, "r" was obtained with an ioread32, which swaps
+each of the two u16 values in it properly when swapping the
+entire u32 value. I've had arguments about this code with
+people before, but always conceded they were right because
+removing it only made things not work at all on big endian
+platforms.
+
+However, analysing a failure of the OTP reading code, I now
+finally figured out what is going on, and why my intuition
+about that code being wrong was right all along.
+
+It turns out that the 'priv->eeprom' u8 array really wants
+to have the data in it in little endian. So the force code
+above and all really converts *to* little endian, not from
+it. Cf., for instance, the function iwl_eeprom_query16() --
+it reads two u8 values and combines them into a u16, in a
+little-endian way. And considering it more, it makes sense
+to have the eeprom array as on the device, after all not
+all values really are 16-bit values, the MAC address for
+instance is not.
+
+Now, what this really means is that all the annotations are
+completely wrong. The eeprom reading code should fill the
+priv->eeprom array as a __le16 array, with __le16 values.
+
+This also means that iwl_read_otp_word() should really have
+a __le16 pointer as the data argument, since it should be
+filling that in a format suitable for priv->eeprom.
+
+Propagating these changes throughout, iwl_find_otp_image()
+is found to be, now obviously visible, defective -- it uses
+the data returned by iwl_read_otp_word() directly as if it
+was CPU endianness. Fixing that, which is this hunk of the
+patch:
+
+- next_link_addr = link_value * sizeof(u16);
++ next_link_addr = le16_to_cpu(link_value) * sizeof(u16);
+
+is the only real change of this patch. Everything else is
+just fixing the sparse annotations.
+
+Also, the bug only shows up on big endian platforms with a
+1000 series card. 5000 and previous series do not use OTP,
+and 6000 series has shadow RAM support which means we don't
+ever use the defective code on any cards but 1000.
+
+Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
+Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/wireless/iwlwifi/iwl-dev.h | 2 +-
+ drivers/net/wireless/iwlwifi/iwl-eeprom.c | 20 +++++++++++---------
+ 2 files changed, 12 insertions(+), 10 deletions(-)
+
+--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
++++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
+@@ -1149,7 +1149,7 @@ struct iwl_priv {
+ u32 last_beacon_time;
+ u64 last_tsf;
+
+- /* eeprom */
++ /* eeprom -- this is in the card's little endian byte order */
+ u8 *eeprom;
+ int nvm_device_type;
+ struct iwl_eeprom_calib_info *calib_info;
+--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
++++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+@@ -337,7 +337,7 @@ static int iwl_init_otp_access(struct iw
+ return ret;
+ }
+
+-static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, u16 *eeprom_data)
++static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_data)
+ {
+ int ret = 0;
+ u32 r;
+@@ -370,7 +370,7 @@ static int iwl_read_otp_word(struct iwl_
+ CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
+ IWL_ERR(priv, "Correctable OTP ECC error, continue read\n");
+ }
+- *eeprom_data = le16_to_cpu((__force __le16)(r >> 16));
++ *eeprom_data = cpu_to_le16(r >> 16);
+ return 0;
+ }
+
+@@ -379,7 +379,8 @@ static int iwl_read_otp_word(struct iwl_
+ */
+ static bool iwl_is_otp_empty(struct iwl_priv *priv)
+ {
+- u16 next_link_addr = 0, link_value;
++ u16 next_link_addr = 0;
++ __le16 link_value;
+ bool is_empty = false;
+
+ /* locate the beginning of OTP link list */
+@@ -409,7 +410,8 @@ static bool iwl_is_otp_empty(struct iwl_
+ static int iwl_find_otp_image(struct iwl_priv *priv,
+ u16 *validblockaddr)
+ {
+- u16 next_link_addr = 0, link_value = 0, valid_addr;
++ u16 next_link_addr = 0, valid_addr;
++ __le16 link_value = 0;
+ int usedblocks = 0;
+
+ /* set addressing mode to absolute to traverse the link list */
+@@ -429,7 +431,7 @@ static int iwl_find_otp_image(struct iwl
+ * check for more block on the link list
+ */
+ valid_addr = next_link_addr;
+- next_link_addr = link_value * sizeof(u16);
++ next_link_addr = le16_to_cpu(link_value) * sizeof(u16);
+ IWL_DEBUG_INFO(priv, "OTP blocks %d addr 0x%x\n",
+ usedblocks, next_link_addr);
+ if (iwl_read_otp_word(priv, next_link_addr, &link_value))
+@@ -463,7 +465,7 @@ static int iwl_find_otp_image(struct iwl
+ */
+ int iwl_eeprom_init(struct iwl_priv *priv)
+ {
+- u16 *e;
++ __le16 *e;
+ u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
+ int sz;
+ int ret;
+@@ -482,7 +484,7 @@ int iwl_eeprom_init(struct iwl_priv *pri
+ ret = -ENOMEM;
+ goto alloc_err;
+ }
+- e = (u16 *)priv->eeprom;
++ e = (__le16 *)priv->eeprom;
+
+ ret = priv->cfg->ops->lib->eeprom_ops.verify_signature(priv);
+ if (ret < 0) {
+@@ -521,7 +523,7 @@ int iwl_eeprom_init(struct iwl_priv *pri
+ }
+ for (addr = validblockaddr; addr < validblockaddr + sz;
+ addr += sizeof(u16)) {
+- u16 eeprom_data;
++ __le16 eeprom_data;
+
+ ret = iwl_read_otp_word(priv, addr, &eeprom_data);
+ if (ret)
+@@ -545,7 +547,7 @@ int iwl_eeprom_init(struct iwl_priv *pri
+ goto done;
+ }
+ r = _iwl_read_direct32(priv, CSR_EEPROM_REG);
+- e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16));
++ e[addr / 2] = cpu_to_le16(r >> 16);
+ }
+ }
+ ret = 0;
--- /dev/null
+From b7bb1756cb6a610cdbac8cfdad9e79bb5670b63b Mon Sep 17 00:00:00 2001
+From: Johannes Berg <johannes@sipsolutions.net>
+Date: Mon, 14 Dec 2009 14:12:09 -0800
+Subject: iwlwifi: fix more eeprom endian bugs
+
+From: Johannes Berg <johannes@sipsolutions.net>
+
+commit b7bb1756cb6a610cdbac8cfdad9e79bb5670b63b upstream.
+
+I've also for a long time had a problem with the
+temperature calculation code, which I had fixed
+by byte-swapping the values, and now it turns out
+that was the correct fix after all.
+
+Also, any use of iwl_eeprom_query_addr() that is
+for more than a u8 must be cast to little endian,
+and some structs as well.
+
+Fix all this. Again, no real impact on platforms
+that already are little endian.
+
+Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
+Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/wireless/iwlwifi/iwl-4965.c | 2 +-
+ drivers/net/wireless/iwlwifi/iwl-5000-hw.h | 14 +++++++++-----
+ drivers/net/wireless/iwlwifi/iwl-5000.c | 7 ++++---
+ drivers/net/wireless/iwlwifi/iwl-eeprom.h | 4 ++--
+ 4 files changed, 16 insertions(+), 11 deletions(-)
+
+--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
++++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
+@@ -1337,7 +1337,7 @@ static int iwl4965_fill_txpower_tbl(stru
+ iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
+
+ /* calculate tx gain adjustment based on power supply voltage */
+- voltage = priv->calib_info->voltage;
++ voltage = le16_to_cpu(priv->calib_info->voltage);
+ init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
+ voltage_compensation =
+ iwl4965_get_voltage_compensation(voltage, init_voltage);
+--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
++++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
+@@ -460,14 +460,15 @@ static void iwl5000_set_ct_threshold(str
+ static int iwl5000_set_Xtal_calib(struct iwl_priv *priv)
+ {
+ struct iwl_calib_xtal_freq_cmd cmd;
+- u16 *xtal_calib = (u16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL);
++ __le16 *xtal_calib =
++ (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL);
+
+ cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
+ cmd.hdr.first_group = 0;
+ cmd.hdr.groups_num = 1;
+ cmd.hdr.data_valid = 1;
+- cmd.cap_pin1 = (u8)xtal_calib[0];
+- cmd.cap_pin2 = (u8)xtal_calib[1];
++ cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
++ cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
+ return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL],
+ (u8 *)&cmd, sizeof(cmd));
+ }
+--- a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
++++ b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
+@@ -92,11 +92,15 @@
+
+ static inline s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
+ {
+- u16 *temp_calib = (u16 *)iwl_eeprom_query_addr(priv,
+- EEPROM_5000_TEMPERATURE);
+- /* offset = temperature - voltage / coef */
+- s32 offset = (s32)(temp_calib[0] - temp_calib[1] / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
+- return offset;
++ u16 temperature, voltage;
++ __le16 *temp_calib =
++ (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_TEMPERATURE);
++
++ temperature = le16_to_cpu(temp_calib[0]);
++ voltage = le16_to_cpu(temp_calib[1]);
++
++ /* offset = temp - volt / coeff */
++ return (s32)(temperature - voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
+ }
+
+ /* Fixed (non-configurable) rx data from phy */
+--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
++++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+@@ -133,7 +133,7 @@ struct iwl_eeprom_channel {
+ *
+ */
+ struct iwl_eeprom_enhanced_txpwr {
+- u16 reserved;
++ __le16 common;
+ s8 chain_a_max;
+ s8 chain_b_max;
+ s8 chain_c_max;
+@@ -347,7 +347,7 @@ struct iwl_eeprom_calib_subband_info {
+ struct iwl_eeprom_calib_info {
+ u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */
+ u8 saturation_power52; /* half-dBm */
+- s16 voltage; /* signed */
++ __le16 voltage; /* signed */
+ struct iwl_eeprom_calib_subband_info
+ band_info[EEPROM_TX_POWER_BANDS];
+ } __attribute__ ((packed));
--- /dev/null
+From 6c853da3f30c93eae847ecbcd9fdf10ba0da04c2 Mon Sep 17 00:00:00 2001
+From: Zhu Yi <yi.zhu@intel.com>
+Date: Mon, 28 Dec 2009 14:23:11 +0800
+Subject: iwmc3200wifi: fix array out-of-boundary access
+
+From: Zhu Yi <yi.zhu@intel.com>
+
+commit 6c853da3f30c93eae847ecbcd9fdf10ba0da04c2 upstream.
+
+Allocate priv->rx_packets[IWM_RX_ID_HASH + 1] because the max array
+index is IWM_RX_ID_HASH according to IWM_RX_ID_GET_HASH().
+
+Signed-off-by: Zhu Yi <yi.zhu@intel.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/wireless/iwmc3200wifi/iwm.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/wireless/iwmc3200wifi/iwm.h
++++ b/drivers/net/wireless/iwmc3200wifi/iwm.h
+@@ -258,7 +258,7 @@ struct iwm_priv {
+
+ struct sk_buff_head rx_list;
+ struct list_head rx_tickets;
+- struct list_head rx_packets[IWM_RX_ID_HASH];
++ struct list_head rx_packets[IWM_RX_ID_HASH + 1];
+ struct workqueue_struct *rx_wq;
+ struct work_struct rx_worker;
+
--- /dev/null
+From 3e26120cc7c819c97bc07281ca1fb9017cfe9a39 Mon Sep 17 00:00:00 2001
+From: WANG Cong <amwang@redhat.com>
+Date: Thu, 17 Dec 2009 15:27:05 -0800
+Subject: kernel/sysctl.c: fix the incomplete part of sysctl_max_map_count-should-be-non-negative.patch
+
+From: WANG Cong <amwang@redhat.com>
+
+commit 3e26120cc7c819c97bc07281ca1fb9017cfe9a39 upstream.
+
+It is a mistake that we used 'proc_dointvec', it should be
+'proc_dointvec_minmax', as in the original patch.
+
+Signed-off-by: WANG Cong <amwang@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/sysctl.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -1607,7 +1607,7 @@ static struct ctl_table debug_table[] =
+ .data = &show_unhandled_signals,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+- .proc_handler = proc_dointvec,
++ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ },
+ #endif
--- /dev/null
+From a00ae4d21b2fa9379914f270ffffd8d3bec55430 Mon Sep 17 00:00:00 2001
+From: Geert Uytterhoeven <geert@linux-m68k.org>
+Date: Sun, 13 Dec 2009 20:21:34 +0100
+Subject: Keys: KEYCTL_SESSION_TO_PARENT needs TIF_NOTIFY_RESUME architecture support
+
+From: Geert Uytterhoeven <geert@linux-m68k.org>
+
+commit a00ae4d21b2fa9379914f270ffffd8d3bec55430 upstream.
+
+As of commit ee18d64c1f632043a02e6f5ba5e045bb26a5465f ("KEYS: Add a keyctl to
+install a process's session keyring on its parent [try #6]"), CONFIG_KEYS=y
+fails to build on architectures that haven't implemented TIF_NOTIFY_RESUME yet:
+
+security/keys/keyctl.c: In function 'keyctl_session_to_parent':
+security/keys/keyctl.c:1312: error: 'TIF_NOTIFY_RESUME' undeclared (first use in this function)
+security/keys/keyctl.c:1312: error: (Each undeclared identifier is reported only once
+security/keys/keyctl.c:1312: error: for each function it appears in.)
+
+Make KEYCTL_SESSION_TO_PARENT depend on TIF_NOTIFY_RESUME until
+m68k, and xtensa have implemented it.
+
+Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
+Signed-off-by: James Morris <jmorris@namei.org>
+Acked-by: Mike Frysinger <vapier@gentoo.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ security/keys/keyctl.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/security/keys/keyctl.c
++++ b/security/keys/keyctl.c
+@@ -1236,6 +1236,7 @@ long keyctl_get_security(key_serial_t ke
+ */
+ long keyctl_session_to_parent(void)
+ {
++#ifdef TIF_NOTIFY_RESUME
+ struct task_struct *me, *parent;
+ const struct cred *mycred, *pcred;
+ struct cred *cred, *oldcred;
+@@ -1326,6 +1327,15 @@ not_permitted:
+ error_keyring:
+ key_ref_put(keyring_r);
+ return ret;
++
++#else /* !TIF_NOTIFY_RESUME */
++ /*
++ * To be removed when TIF_NOTIFY_RESUME has been implemented on
++ * m68k/xtensa
++ */
++#warning TIF_NOTIFY_RESUME not implemented
++ return -EOPNOTSUPP;
++#endif /* !TIF_NOTIFY_RESUME */
+ }
+
+ /*****************************************************************************/
--- /dev/null
+From hugh.dickins@tiscali.co.uk Tue Jan 5 10:54:39 2010
+From: Hugh Dickins <hugh.dickins@tiscali.co.uk>
+Date: Wed, 30 Dec 2009 23:00:30 +0000 (GMT)
+Subject: ksm: fix mlockfreed to munlocked
+To: stable@kernel.org
+Cc: Andrea Arcangeli <aarcange@redhat.com>, Chris Wright <chrisw@redhat.com>, Rik van Riel <riel@redhat.com>, Mel Gorman <mel@csn.ul.ie>, Izik Eidus <ieidus@redhat.com>, KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>, Andrew Morton <akpm@linux-foundation.org>
+Message-ID: <alpine.LSU.2.00.0912302254090.7519@sister.anvils>
+
+From: Hugh Dickins <hugh.dickins@tiscali.co.uk>
+
+2.6.33-rc1 commit 73848b4684e84a84cfd1555af78d41158f31e16b, adjusted
+to include 31e855ea7173bdb0520f9684580423a9560f66e0's movement of
+the unlock_page(oldpage), but omit other intervening cleanups.
+
+When KSM merges an mlocked page, it has been forgetting to munlock it:
+that's been left to free_page_mlock(), which reports it in /proc/vmstat
+as unevictable_pgs_mlockfreed instead of unevictable_pgs_munlocked,
+which indicates that such pages _might_ be left unevictable for long
+after they should be evictable. Call munlock_vma_page() to fix that.
+
+Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ mm/internal.h | 3 ++-
+ mm/ksm.c | 14 +++++++-------
+ mm/mlock.c | 4 ++--
+ 3 files changed, 11 insertions(+), 10 deletions(-)
+
+--- a/mm/internal.h
++++ b/mm/internal.h
+@@ -107,9 +107,10 @@ static inline int is_mlocked_vma(struct
+ }
+
+ /*
+- * must be called with vma's mmap_sem held for read, and page locked.
++ * must be called with vma's mmap_sem held for read or write, and page locked.
+ */
+ extern void mlock_vma_page(struct page *page);
++extern void munlock_vma_page(struct page *page);
+
+ /*
+ * Clear the page's PageMlocked(). This can be useful in a situation where
+--- a/mm/ksm.c
++++ b/mm/ksm.c
+@@ -34,6 +34,7 @@
+ #include <linux/ksm.h>
+
+ #include <asm/tlbflush.h>
++#include "internal.h"
+
+ /*
+ * A few notes about the KSM scanning process,
+@@ -767,15 +768,14 @@ static int try_to_merge_one_page(struct
+ * ptes are necessarily already write-protected. But in either
+ * case, we need to lock and check page_count is not raised.
+ */
+- if (write_protect_page(vma, oldpage, &orig_pte)) {
+- unlock_page(oldpage);
+- goto out_putpage;
+- }
+- unlock_page(oldpage);
+-
+- if (pages_identical(oldpage, newpage))
++ if (write_protect_page(vma, oldpage, &orig_pte) == 0 &&
++ pages_identical(oldpage, newpage))
+ err = replace_page(vma, oldpage, newpage, orig_pte);
+
++ if ((vma->vm_flags & VM_LOCKED) && !err)
++ munlock_vma_page(oldpage);
++
++ unlock_page(oldpage);
+ out_putpage:
+ put_page(oldpage);
+ put_page(newpage);
+--- a/mm/mlock.c
++++ b/mm/mlock.c
+@@ -99,14 +99,14 @@ void mlock_vma_page(struct page *page)
+ * not get another chance to clear PageMlocked. If we successfully
+ * isolate the page and try_to_munlock() detects other VM_LOCKED vmas
+ * mapping the page, it will restore the PageMlocked state, unless the page
+- * is mapped in a non-linear vma. So, we go ahead and SetPageMlocked(),
++ * is mapped in a non-linear vma. So, we go ahead and ClearPageMlocked(),
+ * perhaps redundantly.
+ * If we lose the isolation race, and the page is mapped by other VM_LOCKED
+ * vmas, we'll detect this in vmscan--via try_to_munlock() or try_to_unmap()
+ * either of which will restore the PageMlocked state by calling
+ * mlock_vma_page() above, if it can grab the vma's mmap sem.
+ */
+-static void munlock_vma_page(struct page *page)
++void munlock_vma_page(struct page *page)
+ {
+ BUG_ON(!PageLocked(page));
+
--- /dev/null
+From 6e24a6eff4571002cd48b99a2b92dc829ce39cb9 Mon Sep 17 00:00:00 2001
+From: Marcelo Tosatti <mtosatti@redhat.com>
+Date: Mon, 14 Dec 2009 17:37:35 -0200
+Subject: KVM: LAPIC: make sure IRR bitmap is scanned after vm load
+
+From: Marcelo Tosatti <mtosatti@redhat.com>
+
+commit 6e24a6eff4571002cd48b99a2b92dc829ce39cb9 upstream.
+
+The vcpus are initialized with irr_pending set to false, but
+loading the LAPIC registers with pending IRR fails to reset
+the irr_pending variable.
+
+Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kvm/lapic.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -1156,6 +1156,7 @@ void kvm_apic_post_state_restore(struct
+ hrtimer_cancel(&apic->lapic_timer.timer);
+ update_divide_count(apic);
+ start_apic_timer(apic);
++ apic->irr_pending = true;
+ }
+
+ void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
--- /dev/null
+From fb341f572d26e0786167cd96b90cc4febed830cf Mon Sep 17 00:00:00 2001
+From: Marcelo Tosatti <mtosatti@redhat.com>
+Date: Sat, 5 Dec 2009 12:34:11 -0200
+Subject: KVM: MMU: remove prefault from invlpg handler
+
+From: Marcelo Tosatti <mtosatti@redhat.com>
+
+commit fb341f572d26e0786167cd96b90cc4febed830cf upstream.
+
+The invlpg prefault optimization breaks Windows 2008 R2 occasionally.
+
+The visible effect is that the invlpg handler instantiates a pte which
+is, microseconds later, written with a different gfn by another vcpu.
+
+The OS could have other mechanisms to prevent a present translation from
+being used, which the hypervisor is unaware of.
+
+While the documentation states that the cpu is at liberty to prefetch tlb
+entries, it looks like this is not heeded, so remove tlb prefetch from
+invlpg.
+
+Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kvm/paging_tmpl.h | 18 ------------------
+ 1 file changed, 18 deletions(-)
+
+--- a/arch/x86/kvm/paging_tmpl.h
++++ b/arch/x86/kvm/paging_tmpl.h
+@@ -455,8 +455,6 @@ out_unlock:
+ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
+ {
+ struct kvm_shadow_walk_iterator iterator;
+- pt_element_t gpte;
+- gpa_t pte_gpa = -1;
+ int level;
+ u64 *sptep;
+ int need_flush = 0;
+@@ -471,10 +469,6 @@ static void FNAME(invlpg)(struct kvm_vcp
+ if (level == PT_PAGE_TABLE_LEVEL ||
+ ((level == PT_DIRECTORY_LEVEL && is_large_pte(*sptep))) ||
+ ((level == PT_PDPE_LEVEL && is_large_pte(*sptep)))) {
+- struct kvm_mmu_page *sp = page_header(__pa(sptep));
+-
+- pte_gpa = (sp->gfn << PAGE_SHIFT);
+- pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
+
+ if (is_shadow_present_pte(*sptep)) {
+ rmap_remove(vcpu->kvm, sptep);
+@@ -493,18 +487,6 @@ static void FNAME(invlpg)(struct kvm_vcp
+ if (need_flush)
+ kvm_flush_remote_tlbs(vcpu->kvm);
+ spin_unlock(&vcpu->kvm->mmu_lock);
+-
+- if (pte_gpa == -1)
+- return;
+- if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
+- sizeof(pt_element_t)))
+- return;
+- if (is_present_gpte(gpte) && (gpte & PT_ACCESSED_MASK)) {
+- if (mmu_topup_memory_caches(vcpu))
+- return;
+- kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte,
+- sizeof(pt_element_t), 0);
+- }
+ }
+
+ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
--- /dev/null
+From 3e27249c84beed1c79d767b350e52ad038db9053 Mon Sep 17 00:00:00 2001
+From: Rusty Russell <rusty@rustcorp.com.au>
+Date: Mon, 4 Jan 2010 19:26:14 +1030
+Subject: lguest: fix bug in setting guest GDT entry
+
+From: Rusty Russell <rusty@rustcorp.com.au>
+
+commit 3e27249c84beed1c79d767b350e52ad038db9053 upstream.
+
+We kill the guest, but then we blatt random stuff.
+
+Reported-by: Dan Carpenter <error27@gmail.com>
+Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/lguest/segments.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/lguest/segments.c
++++ b/drivers/lguest/segments.c
+@@ -179,8 +179,10 @@ void load_guest_gdt_entry(struct lg_cpu
+ * We assume the Guest has the same number of GDT entries as the
+ * Host, otherwise we'd have to dynamically allocate the Guest GDT.
+ */
+- if (num >= ARRAY_SIZE(cpu->arch.gdt))
++ if (num >= ARRAY_SIZE(cpu->arch.gdt)) {
+ kill_guest(cpu, "too many gdt entries %i", num);
++ return;
++ }
+
+ /* Set it up, then fix it. */
+ cpu->arch.gdt[num].a = lo;
--- /dev/null
+From 45b241689179a6065384260242637cf21dabfb2d Mon Sep 17 00:00:00 2001
+From: Daniel Mack <daniel@caiaq.de>
+Date: Wed, 16 Dec 2009 05:12:58 +0100
+Subject: Libertas: fix buffer overflow in lbs_get_essid()
+
+From: Daniel Mack <daniel@caiaq.de>
+
+commit 45b241689179a6065384260242637cf21dabfb2d upstream.
+
+The libertas driver copies the SSID buffer back to the wireless core and
+appends a trailing NULL character for termination. This is
+
+a) unnecessary because the buffer is allocated with kzalloc and is hence
+ already NULLed when this function is called, and
+
+b) for priv->curbssparams.ssid_len == 32, it writes back one byte too
+ much which causes memory corruptions.
+
+Fix this by removing the extra write.
+
+Signed-off-by: Daniel Mack <daniel@caiaq.de>
+Cc: Stephen Hemminger <shemminger@vyatta.com>
+Cc: Maithili Hinge <maithili@marvell.com>
+Cc: Kiran Divekar <dkiran@marvell.com>
+Cc: Michael Hirsch <m.hirsch@raumfeld.com>
+Cc: netdev@vger.kernel.org
+Cc: libertas-dev@lists.infradead.org
+Cc: linux-wireless@lists.infradead.org
+Acked-by: Holger Schurig <holgerschurig@gmail.com>
+Acked-by: Dan Williams <dcbw@redhat.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/wireless/libertas/wext.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/drivers/net/wireless/libertas/wext.c
++++ b/drivers/net/wireless/libertas/wext.c
+@@ -1953,10 +1953,8 @@ static int lbs_get_essid(struct net_devi
+ if (priv->connect_status == LBS_CONNECTED) {
+ memcpy(extra, priv->curbssparams.ssid,
+ priv->curbssparams.ssid_len);
+- extra[priv->curbssparams.ssid_len] = '\0';
+ } else {
+ memset(extra, 0, 32);
+- extra[priv->curbssparams.ssid_len] = '\0';
+ }
+ /*
+ * If none, we may want to get the one that was set
--- /dev/null
+From 450aae3d7b60a970f266349a837dfb30a539198b Mon Sep 17 00:00:00 2001
+From: Sujith <Sujith.Manoharan@atheros.com>
+Date: Mon, 2 Nov 2009 12:33:23 +0530
+Subject: mac80211: Fix IBSS merge
+
+From: Sujith <Sujith.Manoharan@atheros.com>
+
+commit 450aae3d7b60a970f266349a837dfb30a539198b upstream.
+
+Currently, in IBSS mode, a single creator would go into
+a loop trying to merge/scan. This happens because the IBSS timer is
+rearmed on finishing a scan and the subsequent
+timer invocation requests another scan immediately.
+
+This patch fixes this issue by checking if we have just completed
+a scan run trying to merge with other IBSS networks.
+
+Signed-off-by: Sujith <Sujith.Manoharan@atheros.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Cc: Luis Rodriguez <lrodriguez@atheros.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/mac80211/ibss.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/net/mac80211/ibss.c
++++ b/net/mac80211/ibss.c
+@@ -455,6 +455,10 @@ static void ieee80211_sta_merge_ibss(str
+
+ ieee80211_sta_expire(sdata, IEEE80211_IBSS_INACTIVITY_LIMIT);
+
++ if (time_before(jiffies, ifibss->last_scan_completed +
++ IEEE80211_IBSS_MERGE_INTERVAL))
++ return;
++
+ if (ieee80211_sta_active_ibss(sdata))
+ return;
+
--- /dev/null
+From 24feda0084722189468a65e20019cdd8ef99702b Mon Sep 17 00:00:00 2001
+From: Luis R. Rodriguez <lrodriguez@atheros.com>
+Date: Thu, 24 Dec 2009 15:38:22 -0500
+Subject: mac80211: fix propagation of failed hardware reconfigurations
+
+From: Luis R. Rodriguez <lrodriguez@atheros.com>
+
+commit 24feda0084722189468a65e20019cdd8ef99702b upstream.
+
+mac80211 does not propagate failed hardware reconfiguration
+requests. For suspend and resume this is important due to all
+the possible issues that can come out of the suspend <-> resume
+cycle. Not propagating the error means cfg80211 will assume
+the resume for the device went through fine and mac80211 will
+continue on trying to poke at the hardware, enable timers,
+queue work, and so on for a device which is completley
+unfunctional.
+
+The least we can do is to propagate device start issues and
+warn when this occurs upon resume. A side effect of this patch
+is we also now propagate the start errors upon harware
+reconfigurations (non-suspend), but this should also be desirable
+anyway, there is not point in continuing to reconfigure a
+device if mac80211 was unable to start the device.
+
+For further details refer to the thread:
+
+http://marc.info/?t=126151038700001&r=1&w=2
+
+Signed-off-by: Luis R. Rodriguez <lrodriguez@atheros.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/mac80211/util.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -1031,7 +1031,19 @@ int ieee80211_reconfig(struct ieee80211_
+
+ /* restart hardware */
+ if (local->open_count) {
++ /*
++ * Upon resume hardware can sometimes be goofy due to
++ * various platform / driver / bus issues, so restarting
++ * the device may at times not work immediately. Propagate
++ * the error.
++ */
+ res = drv_start(local);
++ if (res) {
++ WARN(local->suspended, "Harware became unavailable "
++ "upon resume. This is could be a software issue"
++ "prior to suspend or a harware issue\n");
++ return res;
++ }
+
+ ieee80211_led_radio(local, true);
+ }
--- /dev/null
+From b98c06b6debfe84c90200143bb1102f312f50a33 Mon Sep 17 00:00:00 2001
+From: Luis R. Rodriguez <lrodriguez@atheros.com>
+Date: Thu, 24 Dec 2009 15:26:09 -0500
+Subject: mac80211: fix race with suspend and dynamic_ps_disable_work
+
+From: Luis R. Rodriguez <lrodriguez@atheros.com>
+
+commit b98c06b6debfe84c90200143bb1102f312f50a33 upstream.
+
+When mac80211 suspends it calls a driver's suspend callback
+as a last step and after that the driver assumes no calls will
+be made to it until we resume and its start callback is kicked.
+If such calls are made, however, suspend can end up throwing
+hardware in an unexpected state and making the device unusable
+upon resume.
+
+Fix this by preventing mac80211 to schedule dynamic_ps_disable_work
+by checking for when mac80211 starts to suspend and starts
+quiescing. Frames should be allowed to go through though as
+that is part of the quiescing steps and we do not flush the
+mac80211 workqueue since it was already done towards the
+beginning of suspend cycle.
+
+The other mac80211 issue will be hanled in the next patch.
+
+For further details see refer to the thread:
+
+http://marc.info/?t=126144866100001&r=1&w=2
+
+Cc: stable@kernel.org
+Signed-off-by: Luis R. Rodriguez <lrodriguez@atheros.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/mac80211/tx.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -1401,6 +1401,7 @@ static void ieee80211_xmit(struct ieee80
+
+ if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
+ local->hw.conf.dynamic_ps_timeout > 0 &&
++ !local->quiescing &&
+ !(local->scanning) && local->ps_sdata) {
+ if (local->hw.conf.flags & IEEE80211_CONF_PS) {
+ ieee80211_stop_queues_by_reason(&local->hw,
--- /dev/null
+From 0183826b58a2712ffe608bc3302447be3e6a3ab8 Mon Sep 17 00:00:00 2001
+From: Johannes Berg <johannes@sipsolutions.net>
+Date: Thu, 17 Dec 2009 16:16:53 +0100
+Subject: mac80211: fix WMM AP settings application
+
+From: Johannes Berg <johannes@sipsolutions.net>
+
+commit 0183826b58a2712ffe608bc3302447be3e6a3ab8 upstream.
+
+My
+ commit 77fdaa12cea26c204cc12c312fe40bc0f3dcdfd8
+ Author: Johannes Berg <johannes@sipsolutions.net>
+ Date: Tue Jul 7 03:45:17 2009 +0200
+
+ mac80211: rework MLME for multiple authentications
+
+inadvertedly broke WMM because it removed, along with
+a bunch of other now useless initialisations, the line
+initialising sdata->u.mgd.wmm_last_param_set to -1
+which would make it adopt any WMM parameter set. If,
+as is usually the case, the AP uses WMM parameter set
+sequence number zero, we'd never update it until the
+AP changes the sequence number.
+
+Add the missing initialisation back to get the WMM
+settings from the AP applied locally.
+
+Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/mac80211/mlme.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -904,6 +904,14 @@ static void ieee80211_set_associated(str
+ sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL |
+ IEEE80211_STA_BEACON_POLL);
+
++ /*
++ * Always handle WMM once after association regardless
++ * of the first value the AP uses. Setting -1 here has
++ * that effect because the AP values is an unsigned
++ * 4-bit value.
++ */
++ sdata->u.mgd.wmm_last_param_set = -1;
++
+ ieee80211_led_assoc(local, 1);
+
+ sdata->vif.bss_conf.assoc = 1;
--- /dev/null
+From cbd1998377504df005302ac90d49db72a48552a6 Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.de>
+Date: Wed, 30 Dec 2009 12:08:49 +1100
+Subject: md: Fix unfortunate interaction with evms
+
+From: NeilBrown <neilb@suse.de>
+
+commit cbd1998377504df005302ac90d49db72a48552a6 upstream.
+
+evms configures md arrays by:
+ open device
+ send ioctl
+ close device
+
+for each different ioctl needed.
+Since 2.6.29, the device can disappear after the 'close'
+unless a significant configuration has happened to the device.
+The change made by "SET_ARRAY_INFO" can too minor to stop the device
+from disappearing, but important enough that losing the change is bad.
+
+So: make sure SET_ARRAY_INFO sets mddev->ctime, and keep the device
+active as long as ctime is non-zero (it gets zeroed with lots of other
+things when the array is stopped).
+
+This is suitable for -stable kernels since 2.6.29.
+
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/md/md.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -282,7 +282,9 @@ static void mddev_put(mddev_t *mddev)
+ if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
+ return;
+ if (!mddev->raid_disks && list_empty(&mddev->disks) &&
+- !mddev->hold_active) {
++ mddev->ctime == 0 && !mddev->hold_active) {
++ /* Array is not configured at all, and not held active,
++ * so destroy it */
+ list_del(&mddev->all_mddevs);
+ if (mddev->gendisk) {
+ /* we did a probe so need to clean up.
+@@ -5071,6 +5073,10 @@ static int set_array_info(mddev_t * mdde
+ mddev->minor_version = info->minor_version;
+ mddev->patch_version = info->patch_version;
+ mddev->persistent = !info->not_persistent;
++ /* ensure mddev_put doesn't delete this now that there
++ * is some minimal configuration.
++ */
++ mddev->ctime = get_seconds();
+ return 0;
+ }
+ mddev->major_version = MD_MAJOR_VERSION;
--- /dev/null
+From d31f56dbf8bafaacb0c617f9a6f137498d5c7aed Mon Sep 17 00:00:00 2001
+From: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
+Date: Tue, 15 Dec 2009 16:47:12 -0800
+Subject: memcg: avoid oom-killing innocent task in case of use_hierarchy
+
+From: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
+
+commit d31f56dbf8bafaacb0c617f9a6f137498d5c7aed upstream.
+
+task_in_mem_cgroup(), which is called by select_bad_process() to check
+whether a task can be a candidate for being oom-killed from memcg's limit,
+checks "curr->use_hierarchy"("curr" is the mem_cgroup the task belongs
+to).
+
+But this check return true(it's false positive) when:
+
+ <some path>/aa use_hierarchy == 0 <- hitting limit
+ <some path>/aa/00 use_hierarchy == 1 <- the task belongs to
+
+This leads to killing an innocent task in aa/00. This patch is a fix for
+this bug. And this patch also fixes the arg for
+mem_cgroup_print_oom_info(). We should print information of mem_cgroup
+which the task being killed, not current, belongs to.
+
+Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
+Acked-by: Balbir Singh <balbir@linux.vnet.ibm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ mm/memcontrol.c | 8 +++++++-
+ mm/oom_kill.c | 2 +-
+ 2 files changed, 8 insertions(+), 2 deletions(-)
+
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -758,7 +758,13 @@ int task_in_mem_cgroup(struct task_struc
+ task_unlock(task);
+ if (!curr)
+ return 0;
+- if (curr->use_hierarchy)
++ /*
++ * We should check use_hierarchy of "mem" not "curr". Because checking
++ * use_hierarchy of "curr" here make this function true if hierarchy is
++ * enabled in "curr" and "curr" is a child of "mem" in *cgroup*
++ * hierarchy(even if use_hierarchy is disabled in "mem").
++ */
++ if (mem->use_hierarchy)
+ ret = css_is_ancestor(&curr->css, &mem->css);
+ else
+ ret = (curr == mem);
+--- a/mm/oom_kill.c
++++ b/mm/oom_kill.c
+@@ -404,7 +404,7 @@ static int oom_kill_process(struct task_
+ cpuset_print_task_mems_allowed(current);
+ task_unlock(current);
+ dump_stack();
+- mem_cgroup_print_oom_info(mem, current);
++ mem_cgroup_print_oom_info(mem, p);
+ show_mem();
+ if (sysctl_oom_dump_tasks)
+ dump_tasks(mem);
--- /dev/null
+From 8fa9ff6849bb86c59cc2ea9faadf3cb2d5223497 Mon Sep 17 00:00:00 2001
+From: Patrick McHardy <kaber@trash.net>
+Date: Tue, 15 Dec 2009 16:59:59 +0100
+Subject: netfilter: fix crashes in bridge netfilter caused by fragment jumps
+
+From: Patrick McHardy <kaber@trash.net>
+
+commit 8fa9ff6849bb86c59cc2ea9faadf3cb2d5223497 upstream.
+
+When fragments from bridge netfilter are passed to IPv4 or IPv6 conntrack
+and a reassembly queue with the same fragment key already exists from
+reassembling a similar packet received on a different device (f.i. with
+multicasted fragments), the reassembled packet might continue on a different
+codepath than where the head fragment originated. This can cause crashes
+in bridge netfilter when a fragment received on a non-bridge device (and
+thus with skb->nf_bridge == NULL) continues through the bridge netfilter
+code.
+
+Add a new reassembly identifier for packets originating from bridge
+netfilter and use it to put those packets in insolated queues.
+
+Fixes http://bugzilla.kernel.org/show_bug.cgi?id=14805
+
+Reported-and-Tested-by: Chong Qiao <qiaochong@loongson.cn>
+Signed-off-by: Patrick McHardy <kaber@trash.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/net/ip.h | 1 +
+ include/net/ipv6.h | 1 +
+ net/ipv4/netfilter/nf_defrag_ipv4.c | 21 +++++++++++++++++----
+ net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c | 6 ++++++
+ 4 files changed, 25 insertions(+), 4 deletions(-)
+
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -342,6 +342,7 @@ enum ip_defrag_users
+ IP_DEFRAG_CALL_RA_CHAIN,
+ IP_DEFRAG_CONNTRACK_IN,
+ IP_DEFRAG_CONNTRACK_OUT,
++ IP_DEFRAG_CONNTRACK_BRIDGE_IN,
+ IP_DEFRAG_VS_IN,
+ IP_DEFRAG_VS_OUT,
+ IP_DEFRAG_VS_FWD
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -358,6 +358,7 @@ enum ip6_defrag_users {
+ IP6_DEFRAG_LOCAL_DELIVER,
+ IP6_DEFRAG_CONNTRACK_IN,
+ IP6_DEFRAG_CONNTRACK_OUT,
++ IP6_DEFRAG_CONNTRACK_BRIDGE_IN,
+ };
+
+ struct ip6_create_arg {
+--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
++++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
+@@ -14,6 +14,7 @@
+ #include <net/route.h>
+ #include <net/ip.h>
+
++#include <linux/netfilter_bridge.h>
+ #include <linux/netfilter_ipv4.h>
+ #include <net/netfilter/ipv4/nf_defrag_ipv4.h>
+
+@@ -34,6 +35,20 @@ static int nf_ct_ipv4_gather_frags(struc
+ return err;
+ }
+
++static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
++ struct sk_buff *skb)
++{
++#ifdef CONFIG_BRIDGE_NETFILTER
++ if (skb->nf_bridge &&
++ skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
++ return IP_DEFRAG_CONNTRACK_BRIDGE_IN;
++#endif
++ if (hooknum == NF_INET_PRE_ROUTING)
++ return IP_DEFRAG_CONNTRACK_IN;
++ else
++ return IP_DEFRAG_CONNTRACK_OUT;
++}
++
+ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+@@ -50,10 +65,8 @@ static unsigned int ipv4_conntrack_defra
+ #endif
+ /* Gather fragments. */
+ if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
+- if (nf_ct_ipv4_gather_frags(skb,
+- hooknum == NF_INET_PRE_ROUTING ?
+- IP_DEFRAG_CONNTRACK_IN :
+- IP_DEFRAG_CONNTRACK_OUT))
++ enum ip_defrag_users user = nf_ct_defrag_user(hooknum, skb);
++ if (nf_ct_ipv4_gather_frags(skb, user))
+ return NF_STOLEN;
+ }
+ return NF_ACCEPT;
+--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
++++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+@@ -20,6 +20,7 @@
+ #include <net/ipv6.h>
+ #include <net/inet_frag.h>
+
++#include <linux/netfilter_bridge.h>
+ #include <linux/netfilter_ipv6.h>
+ #include <net/netfilter/nf_conntrack.h>
+ #include <net/netfilter/nf_conntrack_helper.h>
+@@ -190,6 +191,11 @@ out:
+ static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
+ struct sk_buff *skb)
+ {
++#ifdef CONFIG_BRIDGE_NETFILTER
++ if (skb->nf_bridge &&
++ skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
++ return IP6_DEFRAG_CONNTRACK_BRIDGE_IN;
++#endif
+ if (hooknum == NF_INET_PRE_ROUTING)
+ return IP6_DEFRAG_CONNTRACK_IN;
+ else
--- /dev/null
+From 6e1415467614e854fee660ff6648bd10fa976e95 Mon Sep 17 00:00:00 2001
+From: David Howells <dhowells@redhat.com>
+Date: Tue, 15 Dec 2009 19:27:45 +0000
+Subject: NOMMU: Optimise away the {dac_,}mmap_min_addr tests
+
+From: David Howells <dhowells@redhat.com>
+
+commit 6e1415467614e854fee660ff6648bd10fa976e95 upstream.
+
+In NOMMU mode clamp dac_mmap_min_addr to zero to cause the tests on it to be
+skipped by the compiler. We do this as the minimum mmap address doesn't make
+any sense in NOMMU mode.
+
+mmap_min_addr and round_hint_to_min() can be discarded entirely in NOMMU mode.
+
+Signed-off-by: David Howells <dhowells@redhat.com>
+Acked-by: Eric Paris <eparis@redhat.com>
+Signed-off-by: James Morris <jmorris@namei.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/linux/security.h | 7 +++++++
+ kernel/sysctl.c | 2 ++
+ mm/Kconfig | 1 +
+ security/Makefile | 3 ++-
+ 4 files changed, 12 insertions(+), 1 deletion(-)
+
+--- a/include/linux/security.h
++++ b/include/linux/security.h
+@@ -95,8 +95,13 @@ struct seq_file;
+ extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
+ extern int cap_netlink_recv(struct sk_buff *skb, int cap);
+
++#ifdef CONFIG_MMU
+ extern unsigned long mmap_min_addr;
+ extern unsigned long dac_mmap_min_addr;
++#else
++#define dac_mmap_min_addr 0UL
++#endif
++
+ /*
+ * Values used in the task_security_ops calls
+ */
+@@ -121,6 +126,7 @@ struct request_sock;
+ #define LSM_UNSAFE_PTRACE 2
+ #define LSM_UNSAFE_PTRACE_CAP 4
+
++#ifdef CONFIG_MMU
+ /*
+ * If a hint addr is less than mmap_min_addr change hint to be as
+ * low as possible but still greater than mmap_min_addr
+@@ -135,6 +141,7 @@ static inline unsigned long round_hint_t
+ }
+ extern int mmap_min_addr_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
++#endif
+
+ #ifdef CONFIG_SECURITY
+
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -1200,6 +1200,7 @@ static struct ctl_table vm_table[] = {
+ .extra2 = (void *)&hugetlb_infinity,
+ },
+ #endif
++#ifdef CONFIG_MMU
+ {
+ .ctl_name = VM_LOWMEM_RESERVE_RATIO,
+ .procname = "lowmem_reserve_ratio",
+@@ -1353,6 +1354,7 @@ static struct ctl_table vm_table[] = {
+ .mode = 0644,
+ .proc_handler = &mmap_min_addr_handler,
+ },
++#endif
+ #ifdef CONFIG_NUMA
+ {
+ .ctl_name = CTL_UNNUMBERED,
+--- a/mm/Kconfig
++++ b/mm/Kconfig
+@@ -227,6 +227,7 @@ config KSM
+
+ config DEFAULT_MMAP_MIN_ADDR
+ int "Low address space to protect from user allocation"
++ depends on MMU
+ default 4096
+ help
+ This is the portion of low virtual memory which should be protected
+--- a/security/Makefile
++++ b/security/Makefile
+@@ -8,7 +8,8 @@ subdir-$(CONFIG_SECURITY_SMACK) += smac
+ subdir-$(CONFIG_SECURITY_TOMOYO) += tomoyo
+
+ # always enable default capabilities
+-obj-y += commoncap.o min_addr.o
++obj-y += commoncap.o
++obj-$(CONFIG_MMU) += min_addr.o
+
+ # Object file lists
+ obj-$(CONFIG_SECURITY) += security.o capability.o
--- /dev/null
+From 5b0691508aa99d309101a49b4b084dc16b3d7019 Mon Sep 17 00:00:00 2001
+From: Andrey Borzenkov <arvidjaar@mail.ru>
+Date: Tue, 22 Dec 2009 21:38:44 +0300
+Subject: orinoco: fix GFP_KERNEL in orinoco_set_key with interrupts disabled
+
+From: Andrey Borzenkov <arvidjaar@mail.ru>
+
+commit 5b0691508aa99d309101a49b4b084dc16b3d7019 upstream.
+
+orinoco_set_key is called from two places both with interrupts disabled
+(under orinoco_lock). Use GFP_ATOMIC instead of GFP_KERNEL. Fixes following
+warning:
+
+[ 77.254109] WARNING: at /home/bor/src/linux-git/kernel/lockdep.c:2465 lockdep_trace_alloc+0x9a/0xa0()
+[ 77.254109] Hardware name: PORTEGE 4000
+[ 77.254109] Modules linked in: af_packet irnet ppp_generic slhc ircomm_tty ircomm binfmt_misc dm_mirror dm_region_hash dm_log dm_round_robin dm_multipath dm_mod loop nvram toshiba cryptomgr aead pcompress crypto_blkcipher michael_mic crypto_hash crypto_algapi orinoco_cs orinoco cfg80211 smsc_ircc2 pcmcia irda toshiba_acpi yenta_socket video i2c_ali1535 backlight rsrc_nonstatic ali_agp pcmcia_core psmouse output crc_ccitt i2c_core alim1535_wdt rfkill sg evdev ohci_hcd agpgart usbcore pata_ali libata reiserfs [last unloaded: scsi_wait_scan]
+[ 77.254109] Pid: 2296, comm: wpa_supplicant Not tainted 2.6.32-1avb #1
+[ 77.254109] Call Trace:
+[ 77.254109] [<c011f0ad>] warn_slowpath_common+0x6d/0xa0
+[ 77.254109] [<c014206a>] ? lockdep_trace_alloc+0x9a/0xa0
+[ 77.254109] [<c014206a>] ? lockdep_trace_alloc+0x9a/0xa0
+[ 77.254109] [<c011f0f5>] warn_slowpath_null+0x15/0x20
+[ 77.254109] [<c014206a>] lockdep_trace_alloc+0x9a/0xa0
+[ 77.254109] [<c018d296>] __kmalloc+0x36/0x130
+[ 77.254109] [<dffcb6a8>] ? orinoco_set_key+0x48/0x1c0 [orinoco]
+[ 77.254109] [<dffcb6a8>] orinoco_set_key+0x48/0x1c0 [orinoco]
+[ 77.254109] [<dffcb9fc>] orinoco_ioctl_set_encodeext+0x1dc/0x2d0 [orinoco]
+[ 77.254109] [<c035b117>] ioctl_standard_call+0x207/0x3b0
+[ 77.254109] [<dffcb820>] ? orinoco_ioctl_set_encodeext+0x0/0x2d0 [orinoco]
+[ 77.254109] [<c0307f1f>] ? rtnl_lock+0xf/0x20
+[ 77.254109] [<c0307f1f>] ? rtnl_lock+0xf/0x20
+[ 77.254109] [<c02fb115>] ? __dev_get_by_name+0x85/0xb0
+[ 77.254109] [<c035b616>] wext_handle_ioctl+0x176/0x200
+[ 77.254109] [<dffcb820>] ? orinoco_ioctl_set_encodeext+0x0/0x2d0 [orinoco]
+[ 77.254109] [<c030020f>] dev_ioctl+0x6af/0x730
+[ 77.254109] [<c02eec65>] ? move_addr_to_kernel+0x55/0x60
+[ 77.254109] [<c02eed59>] ? sys_sendto+0xe9/0x130
+[ 77.254109] [<c02ed77e>] sock_ioctl+0x7e/0x250
+[ 77.254109] [<c02ed700>] ? sock_ioctl+0x0/0x250
+[ 77.254109] [<c019cf4c>] vfs_ioctl+0x1c/0x70
+[ 77.254109] [<c019d1fa>] do_vfs_ioctl+0x6a/0x590
+[ 77.254109] [<c0178e50>] ? might_fault+0x90/0xa0
+[ 77.254109] [<c0178e0a>] ? might_fault+0x4a/0xa0
+[ 77.254109] [<c02ef90e>] ? sys_socketcall+0x17e/0x280
+[ 77.254109] [<c019d759>] sys_ioctl+0x39/0x60
+[ 77.254109] [<c0102e3b>] sysenter_do_call+0x12/0x32
+[ 77.254109] ---[ end trace 95ef563548d21efd ]---
+
+Signed-off-by: Andrey Borzenkov <arvidjaar@mail.ru>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/wireless/orinoco/wext.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/wireless/orinoco/wext.c
++++ b/drivers/net/wireless/orinoco/wext.c
+@@ -23,7 +23,7 @@
+ #define MAX_RID_LEN 1024
+
+ /* Helper routine to record keys
+- * Do not call from interrupt context */
++ * It is called under orinoco_lock so it may not sleep */
+ static int orinoco_set_key(struct orinoco_private *priv, int index,
+ enum orinoco_alg alg, const u8 *key, int key_len,
+ const u8 *seq, int seq_len)
+@@ -32,14 +32,14 @@ static int orinoco_set_key(struct orinoc
+ kzfree(priv->keys[index].seq);
+
+ if (key_len) {
+- priv->keys[index].key = kzalloc(key_len, GFP_KERNEL);
++ priv->keys[index].key = kzalloc(key_len, GFP_ATOMIC);
+ if (!priv->keys[index].key)
+ goto nomem;
+ } else
+ priv->keys[index].key = NULL;
+
+ if (seq_len) {
+- priv->keys[index].seq = kzalloc(seq_len, GFP_KERNEL);
++ priv->keys[index].seq = kzalloc(seq_len, GFP_ATOMIC);
+ if (!priv->keys[index].seq)
+ goto free_key;
+ } else
--- /dev/null
+From 509426bd46ad0903dca409803e0ee3d30f99f1e8 Mon Sep 17 00:00:00 2001
+From: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
+Date: Sun, 20 Dec 2009 19:22:33 +0100
+Subject: pata_cmd64x: fix overclocking of UDMA0-2 modes
+
+From: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
+
+commit 509426bd46ad0903dca409803e0ee3d30f99f1e8 upstream.
+
+adev->dma_mode stores the transfer mode value not UDMA mode number
+so the condition in cmd64x_set_dmamode() is always true and the higher
+UDMA clock is always selected. This can potentially result in data
+corruption when UDMA33 device is used, when 40-wire cable is used or
+when the error recovery code decides to lower the device speed down.
+
+The issue was introduced in the commit 6a40da0 ("libata cmd64x: whack
+into a shape that looks like the documentation") which goes back to
+kernel 2.6.20.
+
+Signed-off-by: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
+Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/ata/pata_cmd64x.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/ata/pata_cmd64x.c
++++ b/drivers/ata/pata_cmd64x.c
+@@ -219,7 +219,7 @@ static void cmd64x_set_dmamode(struct at
+ regU |= udma_data[adev->dma_mode - XFER_UDMA_0] << shift;
+ /* Merge the control bits */
+ regU |= 1 << adev->devno; /* UDMA on */
+- if (adev->dma_mode > 2) /* 15nS timing */
++ if (adev->dma_mode > XFER_UDMA_2) /* 15nS timing */
+ regU |= 4 << adev->devno;
+ } else {
+ regU &= ~ (1 << adev->devno); /* UDMA off */
--- /dev/null
+From 256ace9bbd4cdb6d48d5f55d55d42fa20527fad1 Mon Sep 17 00:00:00 2001
+From: Sergei Shtylyov <sshtylyov@ru.mvista.com>
+Date: Thu, 17 Dec 2009 01:11:27 -0500
+Subject: pata_hpt3x2n: fix clock turnaround
+
+From: Sergei Shtylyov <sshtylyov@ru.mvista.com>
+
+commit 256ace9bbd4cdb6d48d5f55d55d42fa20527fad1 upstream.
+
+The clock turnaround code still doesn't work for several reasons:
+
+- 'USE_DPLL' flag in 'ap->host->private_data' is never initialized
+ or updated, so the driver can only set the chip to the DPLL clock
+ mode, not the PCI mode;
+
+- the driver doesn't serialize access to the channels depending on
+ the current clock mode like the vendor drivers, so the clock
+ turnaround is only executed "optionally", not always as it should be;
+
+- the wrong ports are written to when hpt3x2n_set_clock() is called
+ for the secondary channel;
+
+- hpt3x2n_set_clock() can inadvertently enable the disabled channels
+ when resetting the channel state machines.
+
+Signed-off-by: Sergei Shtylyov <sshtylyov@ru.mvista.com>
+Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/ata/pata_hpt3x2n.c | 64 ++++++++++++++++++++++++---------------------
+ 1 file changed, 35 insertions(+), 29 deletions(-)
+
+--- a/drivers/ata/pata_hpt3x2n.c
++++ b/drivers/ata/pata_hpt3x2n.c
+@@ -8,7 +8,7 @@
+ * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org>
+ * Portions Copyright (C) 2001 Sun Microsystems, Inc.
+ * Portions Copyright (C) 2003 Red Hat Inc
+- * Portions Copyright (C) 2005-2007 MontaVista Software, Inc.
++ * Portions Copyright (C) 2005-2009 MontaVista Software, Inc.
+ *
+ *
+ * TODO
+@@ -25,7 +25,7 @@
+ #include <linux/libata.h>
+
+ #define DRV_NAME "pata_hpt3x2n"
+-#define DRV_VERSION "0.3.7"
++#define DRV_VERSION "0.3.8"
+
+ enum {
+ HPT_PCI_FAST = (1 << 31),
+@@ -262,7 +262,7 @@ static void hpt3x2n_bmdma_stop(struct at
+
+ static void hpt3x2n_set_clock(struct ata_port *ap, int source)
+ {
+- void __iomem *bmdma = ap->ioaddr.bmdma_addr;
++ void __iomem *bmdma = ap->ioaddr.bmdma_addr - ap->port_no * 8;
+
+ /* Tristate the bus */
+ iowrite8(0x80, bmdma+0x73);
+@@ -272,9 +272,9 @@ static void hpt3x2n_set_clock(struct ata
+ iowrite8(source, bmdma+0x7B);
+ iowrite8(0xC0, bmdma+0x79);
+
+- /* Reset state machines */
+- iowrite8(0x37, bmdma+0x70);
+- iowrite8(0x37, bmdma+0x74);
++ /* Reset state machines, avoid enabling the disabled channels */
++ iowrite8(ioread8(bmdma+0x70) | 0x32, bmdma+0x70);
++ iowrite8(ioread8(bmdma+0x74) | 0x32, bmdma+0x74);
+
+ /* Complete reset */
+ iowrite8(0x00, bmdma+0x79);
+@@ -284,21 +284,10 @@ static void hpt3x2n_set_clock(struct ata
+ iowrite8(0x00, bmdma+0x77);
+ }
+
+-/* Check if our partner interface is busy */
+-
+-static int hpt3x2n_pair_idle(struct ata_port *ap)
+-{
+- struct ata_host *host = ap->host;
+- struct ata_port *pair = host->ports[ap->port_no ^ 1];
+-
+- if (pair->hsm_task_state == HSM_ST_IDLE)
+- return 1;
+- return 0;
+-}
+-
+ static int hpt3x2n_use_dpll(struct ata_port *ap, int writing)
+ {
+ long flags = (long)ap->host->private_data;
++
+ /* See if we should use the DPLL */
+ if (writing)
+ return USE_DPLL; /* Needed for write */
+@@ -307,20 +296,35 @@ static int hpt3x2n_use_dpll(struct ata_p
+ return 0;
+ }
+
++static int hpt3x2n_qc_defer(struct ata_queued_cmd *qc)
++{
++ struct ata_port *ap = qc->ap;
++ struct ata_port *alt = ap->host->ports[ap->port_no ^ 1];
++ int rc, flags = (long)ap->host->private_data;
++ int dpll = hpt3x2n_use_dpll(ap, qc->tf.flags & ATA_TFLAG_WRITE);
++
++ /* First apply the usual rules */
++ rc = ata_std_qc_defer(qc);
++ if (rc != 0)
++ return rc;
++
++ if ((flags & USE_DPLL) != dpll && alt->qc_active)
++ return ATA_DEFER_PORT;
++ return 0;
++}
++
+ static unsigned int hpt3x2n_qc_issue(struct ata_queued_cmd *qc)
+ {
+- struct ata_taskfile *tf = &qc->tf;
+ struct ata_port *ap = qc->ap;
+ int flags = (long)ap->host->private_data;
++ int dpll = hpt3x2n_use_dpll(ap, qc->tf.flags & ATA_TFLAG_WRITE);
+
+- if (hpt3x2n_pair_idle(ap)) {
+- int dpll = hpt3x2n_use_dpll(ap, (tf->flags & ATA_TFLAG_WRITE));
+- if ((flags & USE_DPLL) != dpll) {
+- if (dpll == 1)
+- hpt3x2n_set_clock(ap, 0x21);
+- else
+- hpt3x2n_set_clock(ap, 0x23);
+- }
++ if ((flags & USE_DPLL) != dpll) {
++ flags &= ~USE_DPLL;
++ flags |= dpll;
++ ap->host->private_data = (void *)(long)flags;
++
++ hpt3x2n_set_clock(ap, dpll ? 0x21 : 0x23);
+ }
+ return ata_sff_qc_issue(qc);
+ }
+@@ -337,6 +341,8 @@ static struct ata_port_operations hpt3x2
+ .inherits = &ata_bmdma_port_ops,
+
+ .bmdma_stop = hpt3x2n_bmdma_stop,
++
++ .qc_defer = hpt3x2n_qc_defer,
+ .qc_issue = hpt3x2n_qc_issue,
+
+ .cable_detect = hpt3x2n_cable_detect,
+@@ -454,7 +460,7 @@ static int hpt3x2n_init_one(struct pci_d
+ unsigned int f_low, f_high;
+ int adjust;
+ unsigned long iobase = pci_resource_start(dev, 4);
+- void *hpriv = NULL;
++ void *hpriv = (void *)USE_DPLL;
+ int rc;
+
+ rc = pcim_enable_device(dev);
+@@ -542,7 +548,7 @@ static int hpt3x2n_init_one(struct pci_d
+ /* Set our private data up. We only need a few flags so we use
+ it directly */
+ if (pci_mhz > 60) {
+- hpriv = (void *)PCI66;
++ hpriv = (void *)(PCI66 | USE_DPLL);
+ /*
+ * On HPT371N, if ATA clock is 66 MHz we must set bit 2 in
+ * the MISC. register to stretch the UltraDMA Tss timing.
--- /dev/null
+From bb7f20b1c639606def3b91f4e4aca6daeee5d80a Mon Sep 17 00:00:00 2001
+From: Neil Campbell <neilc@linux.vnet.ibm.com>
+Date: Mon, 14 Dec 2009 04:08:57 +0000
+Subject: powerpc: Handle VSX alignment faults correctly in little-endian mode
+
+From: Neil Campbell <neilc@linux.vnet.ibm.com>
+
+commit bb7f20b1c639606def3b91f4e4aca6daeee5d80a upstream.
+
+This patch fixes the handling of VSX alignment faults in little-endian
+mode (the current code assumes the processor is in big-endian mode).
+
+The patch also makes the handlers clear the top 8 bytes of the register
+when handling an 8 byte VSX load.
+
+This is based on 2.6.32.
+
+Signed-off-by: Neil Campbell <neilc@linux.vnet.ibm.com>
+Acked-by: Michael Neuling <mikey@neuling.org>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/powerpc/kernel/align.c | 63 ++++++++++++++++++++++++++++++++------------
+ 1 file changed, 46 insertions(+), 17 deletions(-)
+
+--- a/arch/powerpc/kernel/align.c
++++ b/arch/powerpc/kernel/align.c
+@@ -642,10 +642,14 @@ static int emulate_spe(struct pt_regs *r
+ */
+ static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
+ unsigned int areg, struct pt_regs *regs,
+- unsigned int flags, unsigned int length)
++ unsigned int flags, unsigned int length,
++ unsigned int elsize)
+ {
+ char *ptr;
++ unsigned long *lptr;
+ int ret = 0;
++ int sw = 0;
++ int i, j;
+
+ flush_vsx_to_thread(current);
+
+@@ -654,19 +658,35 @@ static int emulate_vsx(unsigned char __u
+ else
+ ptr = (char *) ¤t->thread.vr[reg - 32];
+
+- if (flags & ST)
+- ret = __copy_to_user(addr, ptr, length);
+- else {
+- if (flags & SPLT){
+- ret = __copy_from_user(ptr, addr, length);
+- ptr += length;
++ lptr = (unsigned long *) ptr;
++
++ if (flags & SW)
++ sw = elsize-1;
++
++ for (j = 0; j < length; j += elsize) {
++ for (i = 0; i < elsize; ++i) {
++ if (flags & ST)
++ ret |= __put_user(ptr[i^sw], addr + i);
++ else
++ ret |= __get_user(ptr[i^sw], addr + i);
+ }
+- ret |= __copy_from_user(ptr, addr, length);
++ ptr += elsize;
++ addr += elsize;
+ }
+- if (flags & U)
+- regs->gpr[areg] = regs->dar;
+- if (ret)
++
++ if (!ret) {
++ if (flags & U)
++ regs->gpr[areg] = regs->dar;
++
++ /* Splat load copies the same data to top and bottom 8 bytes */
++ if (flags & SPLT)
++ lptr[1] = lptr[0];
++ /* For 8 byte loads, zero the top 8 bytes */
++ else if (!(flags & ST) && (8 == length))
++ lptr[1] = 0;
++ } else
+ return -EFAULT;
++
+ return 1;
+ }
+ #endif
+@@ -767,16 +787,25 @@ int fix_alignment(struct pt_regs *regs)
+
+ #ifdef CONFIG_VSX
+ if ((instruction & 0xfc00003e) == 0x7c000018) {
+- /* Additional register addressing bit (64 VSX vs 32 FPR/GPR */
++ unsigned int elsize;
++
++ /* Additional register addressing bit (64 VSX vs 32 FPR/GPR) */
+ reg |= (instruction & 0x1) << 5;
+ /* Simple inline decoder instead of a table */
++ /* VSX has only 8 and 16 byte memory accesses */
++ nb = 8;
+ if (instruction & 0x200)
+ nb = 16;
+- else if (instruction & 0x080)
+- nb = 8;
+- else
+- nb = 4;
++
++ /* Vector stores in little-endian mode swap individual
++ elements, so process them separately */
++ elsize = 4;
++ if (instruction & 0x80)
++ elsize = 8;
++
+ flags = 0;
++ if (regs->msr & MSR_LE)
++ flags |= SW;
+ if (instruction & 0x100)
+ flags |= ST;
+ if (instruction & 0x040)
+@@ -787,7 +816,7 @@ int fix_alignment(struct pt_regs *regs)
+ nb = 8;
+ }
+ PPC_WARN_EMULATED(vsx);
+- return emulate_vsx(addr, reg, areg, regs, flags, nb);
++ return emulate_vsx(addr, reg, areg, regs, flags, nb, elsize);
+ }
+ #endif
+ /* A size of 0 indicates an instruction we don't support, with
--- /dev/null
+From fd8fbfc1709822bd94247c5b2ab15a5f5041e103 Mon Sep 17 00:00:00 2001
+From: Dmitry Monakhov <dmonakhov@openvz.org>
+Date: Mon, 14 Dec 2009 15:21:13 +0300
+Subject: quota: decouple fs reserved space from quota reservation
+
+From: Dmitry Monakhov <dmonakhov@openvz.org>
+
+commit fd8fbfc1709822bd94247c5b2ab15a5f5041e103 upstream.
+
+Currently inode_reservation is managed by fs itself and this
+reservation is transfered on dquot_transfer(). This means what
+inode_reservation must always be in sync with
+dquot->dq_dqb.dqb_rsvspace. Otherwise dquot_transfer() will result
+in incorrect quota(WARN_ON in dquot_claim_reserved_space() will be
+triggered)
+This is not easy because of complex locking order issues
+for example http://bugzilla.kernel.org/show_bug.cgi?id=14739
+
+The patch introduce quota reservation field for each fs-inode
+(fs specific inode is used in order to prevent bloating generic
+vfs inode). This reservation is managed by quota code internally
+similar to i_blocks/i_bytes and may not be always in sync with
+internal fs reservation.
+
+Also perform some code rearrangement:
+- Unify dquot_reserve_space() and dquot_reserve_space()
+- Unify dquot_release_reserved_space() and dquot_free_space()
+- Also this patch add missing warning update to release_rsv()
+ dquot_release_reserved_space() must call flush_warnings() as
+ dquot_free_space() does.
+
+Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/quota/dquot.c | 213 +++++++++++++++++++++++++++-----------------------
+ include/linux/quota.h | 5 -
+ 2 files changed, 122 insertions(+), 96 deletions(-)
+
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -1388,6 +1388,67 @@ void vfs_dq_drop(struct inode *inode)
+ EXPORT_SYMBOL(vfs_dq_drop);
+
+ /*
++ * inode_reserved_space is managed internally by quota, and protected by
++ * i_lock similar to i_blocks+i_bytes.
++ */
++static qsize_t *inode_reserved_space(struct inode * inode)
++{
++ /* Filesystem must explicitly define it's own method in order to use
++ * quota reservation interface */
++ BUG_ON(!inode->i_sb->dq_op->get_reserved_space);
++ return inode->i_sb->dq_op->get_reserved_space(inode);
++}
++
++static void inode_add_rsv_space(struct inode *inode, qsize_t number)
++{
++ spin_lock(&inode->i_lock);
++ *inode_reserved_space(inode) += number;
++ spin_unlock(&inode->i_lock);
++}
++
++
++static void inode_claim_rsv_space(struct inode *inode, qsize_t number)
++{
++ spin_lock(&inode->i_lock);
++ *inode_reserved_space(inode) -= number;
++ __inode_add_bytes(inode, number);
++ spin_unlock(&inode->i_lock);
++}
++
++static void inode_sub_rsv_space(struct inode *inode, qsize_t number)
++{
++ spin_lock(&inode->i_lock);
++ *inode_reserved_space(inode) -= number;
++ spin_unlock(&inode->i_lock);
++}
++
++static qsize_t inode_get_rsv_space(struct inode *inode)
++{
++ qsize_t ret;
++ spin_lock(&inode->i_lock);
++ ret = *inode_reserved_space(inode);
++ spin_unlock(&inode->i_lock);
++ return ret;
++}
++
++static void inode_incr_space(struct inode *inode, qsize_t number,
++ int reserve)
++{
++ if (reserve)
++ inode_add_rsv_space(inode, number);
++ else
++ inode_add_bytes(inode, number);
++}
++
++static void inode_decr_space(struct inode *inode, qsize_t number, int reserve)
++{
++ if (reserve)
++ inode_sub_rsv_space(inode, number);
++ else
++ inode_sub_bytes(inode, number);
++}
++
++/*
+ * Following four functions update i_blocks+i_bytes fields and
+ * quota information (together with appropriate checks)
+ * NOTE: We absolutely rely on the fact that caller dirties
+@@ -1405,6 +1466,21 @@ int __dquot_alloc_space(struct inode *in
+ int cnt, ret = QUOTA_OK;
+ char warntype[MAXQUOTAS];
+
++ /*
++ * First test before acquiring mutex - solves deadlocks when we
++ * re-enter the quota code and are already holding the mutex
++ */
++ if (IS_NOQUOTA(inode)) {
++ inode_incr_space(inode, number, reserve);
++ goto out;
++ }
++
++ down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
++ if (IS_NOQUOTA(inode)) {
++ inode_incr_space(inode, number, reserve);
++ goto out_unlock;
++ }
++
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+ warntype[cnt] = QUOTA_NL_NOWARN;
+
+@@ -1415,7 +1491,8 @@ int __dquot_alloc_space(struct inode *in
+ if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt)
+ == NO_QUOTA) {
+ ret = NO_QUOTA;
+- goto out_unlock;
++ spin_unlock(&dq_data_lock);
++ goto out_flush_warn;
+ }
+ }
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+@@ -1426,64 +1503,32 @@ int __dquot_alloc_space(struct inode *in
+ else
+ dquot_incr_space(inode->i_dquot[cnt], number);
+ }
+- if (!reserve)
+- inode_add_bytes(inode, number);
+-out_unlock:
++ inode_incr_space(inode, number, reserve);
+ spin_unlock(&dq_data_lock);
+- flush_warnings(inode->i_dquot, warntype);
+- return ret;
+-}
+-
+-int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
+-{
+- int cnt, ret = QUOTA_OK;
+-
+- /*
+- * First test before acquiring mutex - solves deadlocks when we
+- * re-enter the quota code and are already holding the mutex
+- */
+- if (IS_NOQUOTA(inode)) {
+- inode_add_bytes(inode, number);
+- goto out;
+- }
+-
+- down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+- if (IS_NOQUOTA(inode)) {
+- inode_add_bytes(inode, number);
+- goto out_unlock;
+- }
+-
+- ret = __dquot_alloc_space(inode, number, warn, 0);
+- if (ret == NO_QUOTA)
+- goto out_unlock;
+
++ if (reserve)
++ goto out_flush_warn;
+ /* Dirtify all the dquots - this can block when journalling */
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+ if (inode->i_dquot[cnt])
+ mark_dquot_dirty(inode->i_dquot[cnt]);
++out_flush_warn:
++ flush_warnings(inode->i_dquot, warntype);
+ out_unlock:
+ up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ out:
+ return ret;
+ }
++
++int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
++{
++ return __dquot_alloc_space(inode, number, warn, 0);
++}
+ EXPORT_SYMBOL(dquot_alloc_space);
+
+ int dquot_reserve_space(struct inode *inode, qsize_t number, int warn)
+ {
+- int ret = QUOTA_OK;
+-
+- if (IS_NOQUOTA(inode))
+- goto out;
+-
+- down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+- if (IS_NOQUOTA(inode))
+- goto out_unlock;
+-
+- ret = __dquot_alloc_space(inode, number, warn, 1);
+-out_unlock:
+- up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+-out:
+- return ret;
++ return __dquot_alloc_space(inode, number, warn, 1);
+ }
+ EXPORT_SYMBOL(dquot_reserve_space);
+
+@@ -1540,14 +1585,14 @@ int dquot_claim_space(struct inode *inod
+ int ret = QUOTA_OK;
+
+ if (IS_NOQUOTA(inode)) {
+- inode_add_bytes(inode, number);
++ inode_claim_rsv_space(inode, number);
+ goto out;
+ }
+
+ down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ if (IS_NOQUOTA(inode)) {
+ up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+- inode_add_bytes(inode, number);
++ inode_claim_rsv_space(inode, number);
+ goto out;
+ }
+
+@@ -1559,7 +1604,7 @@ int dquot_claim_space(struct inode *inod
+ number);
+ }
+ /* Update inode bytes */
+- inode_add_bytes(inode, number);
++ inode_claim_rsv_space(inode, number);
+ spin_unlock(&dq_data_lock);
+ /* Dirtify all the dquots - this can block when journalling */
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+@@ -1572,38 +1617,9 @@ out:
+ EXPORT_SYMBOL(dquot_claim_space);
+
+ /*
+- * Release reserved quota space
+- */
+-void dquot_release_reserved_space(struct inode *inode, qsize_t number)
+-{
+- int cnt;
+-
+- if (IS_NOQUOTA(inode))
+- goto out;
+-
+- down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+- if (IS_NOQUOTA(inode))
+- goto out_unlock;
+-
+- spin_lock(&dq_data_lock);
+- /* Release reserved dquots */
+- for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+- if (inode->i_dquot[cnt])
+- dquot_free_reserved_space(inode->i_dquot[cnt], number);
+- }
+- spin_unlock(&dq_data_lock);
+-
+-out_unlock:
+- up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+-out:
+- return;
+-}
+-EXPORT_SYMBOL(dquot_release_reserved_space);
+-
+-/*
+ * This operation can block, but only after everything is updated
+ */
+-int dquot_free_space(struct inode *inode, qsize_t number)
++int __dquot_free_space(struct inode *inode, qsize_t number, int reserve)
+ {
+ unsigned int cnt;
+ char warntype[MAXQUOTAS];
+@@ -1612,7 +1628,7 @@ int dquot_free_space(struct inode *inode
+ * re-enter the quota code and are already holding the mutex */
+ if (IS_NOQUOTA(inode)) {
+ out_sub:
+- inode_sub_bytes(inode, number);
++ inode_decr_space(inode, number, reserve);
+ return QUOTA_OK;
+ }
+
+@@ -1627,21 +1643,43 @@ out_sub:
+ if (!inode->i_dquot[cnt])
+ continue;
+ warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number);
+- dquot_decr_space(inode->i_dquot[cnt], number);
++ if (reserve)
++ dquot_free_reserved_space(inode->i_dquot[cnt], number);
++ else
++ dquot_decr_space(inode->i_dquot[cnt], number);
+ }
+- inode_sub_bytes(inode, number);
++ inode_decr_space(inode, number, reserve);
+ spin_unlock(&dq_data_lock);
++
++ if (reserve)
++ goto out_unlock;
+ /* Dirtify all the dquots - this can block when journalling */
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+ if (inode->i_dquot[cnt])
+ mark_dquot_dirty(inode->i_dquot[cnt]);
++out_unlock:
+ flush_warnings(inode->i_dquot, warntype);
+ up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ return QUOTA_OK;
+ }
++
++int dquot_free_space(struct inode *inode, qsize_t number)
++{
++ return __dquot_free_space(inode, number, 0);
++}
+ EXPORT_SYMBOL(dquot_free_space);
+
+ /*
++ * Release reserved quota space
++ */
++void dquot_release_reserved_space(struct inode *inode, qsize_t number)
++{
++ __dquot_free_space(inode, number, 1);
++
++}
++EXPORT_SYMBOL(dquot_release_reserved_space);
++
++/*
+ * This operation can block, but only after everything is updated
+ */
+ int dquot_free_inode(const struct inode *inode, qsize_t number)
+@@ -1679,19 +1717,6 @@ int dquot_free_inode(const struct inode
+ EXPORT_SYMBOL(dquot_free_inode);
+
+ /*
+- * call back function, get reserved quota space from underlying fs
+- */
+-qsize_t dquot_get_reserved_space(struct inode *inode)
+-{
+- qsize_t reserved_space = 0;
+-
+- if (sb_any_quota_active(inode->i_sb) &&
+- inode->i_sb->dq_op->get_reserved_space)
+- reserved_space = inode->i_sb->dq_op->get_reserved_space(inode);
+- return reserved_space;
+-}
+-
+-/*
+ * Transfer the number of inode and blocks from one diskquota to an other.
+ *
+ * This operation can block, but only after everything is updated
+@@ -1734,7 +1759,7 @@ int dquot_transfer(struct inode *inode,
+ }
+ spin_lock(&dq_data_lock);
+ cur_space = inode_get_bytes(inode);
+- rsv_space = dquot_get_reserved_space(inode);
++ rsv_space = inode_get_rsv_space(inode);
+ space = cur_space + rsv_space;
+ /* Build the transfer_from list and check the limits */
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+--- a/include/linux/quota.h
++++ b/include/linux/quota.h
+@@ -313,8 +313,9 @@ struct dquot_operations {
+ int (*claim_space) (struct inode *, qsize_t);
+ /* release rsved quota for delayed alloc */
+ void (*release_rsv) (struct inode *, qsize_t);
+- /* get reserved quota for delayed alloc */
+- qsize_t (*get_reserved_space) (struct inode *);
++ /* get reserved quota for delayed alloc, value returned is managed by
++ * quota code only */
++ qsize_t *(*get_reserved_space) (struct inode *);
+ };
+
+ /* Operations handling requests from userspace */
--- /dev/null
+From 93b6bd26b74efe46b4579592560f9f1cb7b61994 Mon Sep 17 00:00:00 2001
+From: Gertjan van Wingerde <gwingerde@gmail.com>
+Date: Mon, 14 Dec 2009 20:33:55 +0100
+Subject: rt2x00: Disable powersaving for rt61pci and rt2800pci.
+
+From: Gertjan van Wingerde <gwingerde@gmail.com>
+
+commit 93b6bd26b74efe46b4579592560f9f1cb7b61994 upstream.
+
+We've had many reports of rt61pci failures with powersaving enabled.
+Therefore, as a stop-gap measure, disable powersaving of the rt61pci
+until we have found a proper solution.
+Also disable powersaving on rt2800pci as it most probably will show
+the same problem.
+
+Signed-off-by: Gertjan van Wingerde <gwingerde@gmail.com>
+Acked-by: Ivo van Doorn <IvDoorn@gmail.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/wireless/rt2x00/rt61pci.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/net/wireless/rt2x00/rt61pci.c
++++ b/drivers/net/wireless/rt2x00/rt61pci.c
+@@ -2538,6 +2538,11 @@ static int rt61pci_probe_hw_mode(struct
+ unsigned int i;
+
+ /*
++ * Disable powersaving as default.
++ */
++ rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
++
++ /*
+ * Initialize all hw fields.
+ */
+ rt2x00dev->hw->flags =
--- /dev/null
+From 22825ab7693fd29769518a0d25ba43c01a50092a Mon Sep 17 00:00:00 2001
+From: Stefan Weinhuber <wein@de.ibm.com>
+Date: Mon, 7 Dec 2009 12:51:48 +0100
+Subject: S390: dasd: support DIAG access for read-only devices
+
+From: Stefan Weinhuber <wein@de.ibm.com>
+
+commit 22825ab7693fd29769518a0d25ba43c01a50092a upstream.
+
+When a DASD device is used with the DIAG discipline, the DIAG
+initialization will indicate success or error with a respective
+return code. So far we have interpreted a return code of 4 as error,
+but it actually means that the initialization was successful, but
+the device is read-only. To allow read-only devices to be used with
+DIAG we need to accept a return code of 4 as success.
+
+Re-initialization of the DIAG access is also part of the DIAG error
+recovery. If we find that the access mode of a device has been
+changed from writable to read-only while the device was in use,
+we print an error message.
+
+Signed-off-by: Stefan Weinhuber <wein@de.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Cc: Stephen Powell <zlinuxman@wowway.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/s390/block/dasd_diag.c | 19 ++++++++++++++++---
+ 1 file changed, 16 insertions(+), 3 deletions(-)
+
+--- a/drivers/s390/block/dasd_diag.c
++++ b/drivers/s390/block/dasd_diag.c
+@@ -145,6 +145,15 @@ dasd_diag_erp(struct dasd_device *device
+
+ mdsk_term_io(device);
+ rc = mdsk_init_io(device, device->block->bp_block, 0, NULL);
++ if (rc == 4) {
++ if (!(device->features & DASD_FEATURE_READONLY)) {
++ dev_warn(&device->cdev->dev,
++ "The access mode of a DIAG device changed"
++ " to read-only");
++ device->features |= DASD_FEATURE_READONLY;
++ }
++ rc = 0;
++ }
+ if (rc)
+ dev_warn(&device->cdev->dev, "DIAG ERP failed with "
+ "rc=%d\n", rc);
+@@ -433,16 +442,20 @@ dasd_diag_check_device(struct dasd_devic
+ for (sb = 512; sb < bsize; sb = sb << 1)
+ block->s2b_shift++;
+ rc = mdsk_init_io(device, block->bp_block, 0, NULL);
+- if (rc) {
++ if (rc && (rc != 4)) {
+ dev_warn(&device->cdev->dev, "DIAG initialization "
+ "failed with rc=%d\n", rc);
+ rc = -EIO;
+ } else {
++ if (rc == 4)
++ device->features |= DASD_FEATURE_READONLY;
+ dev_info(&device->cdev->dev,
+- "New DASD with %ld byte/block, total size %ld KB\n",
++ "New DASD with %ld byte/block, total size %ld KB%s\n",
+ (unsigned long) block->bp_block,
+ (unsigned long) (block->blocks <<
+- block->s2b_shift) >> 1);
++ block->s2b_shift) >> 1,
++ (rc == 4) ? ", read-only device" : "");
++ rc = 0;
+ }
+ out_label:
+ free_page((long) label);
--- /dev/null
+From 6ad4c18884e864cf4c77f9074d3d1816063f99cd Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Date: Wed, 25 Nov 2009 13:31:39 +0100
+Subject: sched: Fix balance vs hotplug race
+
+From: Peter Zijlstra <a.p.zijlstra@chello.nl>
+
+commit 6ad4c18884e864cf4c77f9074d3d1816063f99cd upstream.
+
+Since (e761b77: cpu hotplug, sched: Introduce cpu_active_map and redo
+sched domain managment) we have cpu_active_mask which is suppose to rule
+scheduler migration and load-balancing, except it never (fully) did.
+
+The particular problem being solved here is a crash in try_to_wake_up()
+where select_task_rq() ends up selecting an offline cpu because
+select_task_rq_fair() trusts the sched_domain tree to reflect the
+current state of affairs, similarly select_task_rq_rt() trusts the
+root_domain.
+
+However, the sched_domains are updated from CPU_DEAD, which is after the
+cpu is taken offline and after stop_machine is done. Therefore it can
+race perfectly well with code assuming the domains are right.
+
+Cure this by building the domains from cpu_active_mask on
+CPU_DOWN_PREPARE.
+
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+LKML-Reference: <new-submission>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Cc: Mike Galbraith <efault@gmx.de>
+Cc: Holger Hoffstätte <holger.hoffstaette@googlemail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/linux/cpumask.h | 2 ++
+ kernel/cpu.c | 18 +++++++++++++-----
+ kernel/cpuset.c | 16 +++++++++-------
+ kernel/sched.c | 32 +++++++++++++++++---------------
+ 4 files changed, 41 insertions(+), 27 deletions(-)
+
+--- a/include/linux/cpumask.h
++++ b/include/linux/cpumask.h
+@@ -84,6 +84,7 @@ extern const struct cpumask *const cpu_a
+ #define num_online_cpus() cpumask_weight(cpu_online_mask)
+ #define num_possible_cpus() cpumask_weight(cpu_possible_mask)
+ #define num_present_cpus() cpumask_weight(cpu_present_mask)
++#define num_active_cpus() cpumask_weight(cpu_active_mask)
+ #define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask)
+ #define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask)
+ #define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask)
+@@ -92,6 +93,7 @@ extern const struct cpumask *const cpu_a
+ #define num_online_cpus() 1
+ #define num_possible_cpus() 1
+ #define num_present_cpus() 1
++#define num_active_cpus() 1
+ #define cpu_online(cpu) ((cpu) == 0)
+ #define cpu_possible(cpu) ((cpu) == 0)
+ #define cpu_present(cpu) ((cpu) == 0)
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -212,6 +212,8 @@ static int __ref _cpu_down(unsigned int
+ err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
+ hcpu, -1, &nr_calls);
+ if (err == NOTIFY_BAD) {
++ set_cpu_active(cpu, true);
++
+ nr_calls--;
+ __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
+ hcpu, nr_calls, NULL);
+@@ -223,11 +225,11 @@ static int __ref _cpu_down(unsigned int
+
+ /* Ensure that we are not runnable on dying cpu */
+ cpumask_copy(old_allowed, ¤t->cpus_allowed);
+- set_cpus_allowed_ptr(current,
+- cpumask_of(cpumask_any_but(cpu_online_mask, cpu)));
++ set_cpus_allowed_ptr(current, cpu_active_mask);
+
+ err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
+ if (err) {
++ set_cpu_active(cpu, true);
+ /* CPU didn't die: tell everyone. Can't complain. */
+ if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
+ hcpu) == NOTIFY_BAD)
+@@ -292,9 +294,6 @@ int __ref cpu_down(unsigned int cpu)
+
+ err = _cpu_down(cpu, 0);
+
+- if (cpu_online(cpu))
+- set_cpu_active(cpu, true);
+-
+ out:
+ cpu_maps_update_done();
+ stop_machine_destroy();
+@@ -387,6 +386,15 @@ int disable_nonboot_cpus(void)
+ * with the userspace trying to use the CPU hotplug at the same time
+ */
+ cpumask_clear(frozen_cpus);
++
++ for_each_online_cpu(cpu) {
++ if (cpu == first_cpu)
++ continue;
++ set_cpu_active(cpu, false);
++ }
++
++ synchronize_sched();
++
+ printk("Disabling non-boot CPUs ...\n");
+ for_each_online_cpu(cpu) {
+ if (cpu == first_cpu)
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -873,7 +873,7 @@ static int update_cpumask(struct cpuset
+ if (retval < 0)
+ return retval;
+
+- if (!cpumask_subset(trialcs->cpus_allowed, cpu_online_mask))
++ if (!cpumask_subset(trialcs->cpus_allowed, cpu_active_mask))
+ return -EINVAL;
+ }
+ retval = validate_change(cs, trialcs);
+@@ -2011,7 +2011,7 @@ static void scan_for_empty_cpusets(struc
+ }
+
+ /* Continue past cpusets with all cpus, mems online */
+- if (cpumask_subset(cp->cpus_allowed, cpu_online_mask) &&
++ if (cpumask_subset(cp->cpus_allowed, cpu_active_mask) &&
+ nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY]))
+ continue;
+
+@@ -2020,7 +2020,7 @@ static void scan_for_empty_cpusets(struc
+ /* Remove offline cpus and mems from this cpuset. */
+ mutex_lock(&callback_mutex);
+ cpumask_and(cp->cpus_allowed, cp->cpus_allowed,
+- cpu_online_mask);
++ cpu_active_mask);
+ nodes_and(cp->mems_allowed, cp->mems_allowed,
+ node_states[N_HIGH_MEMORY]);
+ mutex_unlock(&callback_mutex);
+@@ -2058,8 +2058,10 @@ static int cpuset_track_online_cpus(stru
+ switch (phase) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+- case CPU_DEAD:
+- case CPU_DEAD_FROZEN:
++ case CPU_DOWN_PREPARE:
++ case CPU_DOWN_PREPARE_FROZEN:
++ case CPU_DOWN_FAILED:
++ case CPU_DOWN_FAILED_FROZEN:
+ break;
+
+ default:
+@@ -2068,7 +2070,7 @@ static int cpuset_track_online_cpus(stru
+
+ cgroup_lock();
+ mutex_lock(&callback_mutex);
+- cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask);
++ cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
+ mutex_unlock(&callback_mutex);
+ scan_for_empty_cpusets(&top_cpuset);
+ ndoms = generate_sched_domains(&doms, &attr);
+@@ -2115,7 +2117,7 @@ static int cpuset_track_online_nodes(str
+
+ void __init cpuset_init_smp(void)
+ {
+- cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask);
++ cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
+ top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
+
+ hotcpu_notifier(cpuset_track_online_cpus, 0);
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -4139,7 +4139,7 @@ static int load_balance(int this_cpu, st
+ unsigned long flags;
+ struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
+
+- cpumask_copy(cpus, cpu_online_mask);
++ cpumask_copy(cpus, cpu_active_mask);
+
+ /*
+ * When power savings policy is enabled for the parent domain, idle
+@@ -4302,7 +4302,7 @@ load_balance_newidle(int this_cpu, struc
+ int all_pinned = 0;
+ struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
+
+- cpumask_copy(cpus, cpu_online_mask);
++ cpumask_copy(cpus, cpu_active_mask);
+
+ /*
+ * When power savings policy is enabled for the parent domain, idle
+@@ -4699,7 +4699,7 @@ int select_nohz_load_balancer(int stop_t
+ cpumask_set_cpu(cpu, nohz.cpu_mask);
+
+ /* time for ilb owner also to sleep */
+- if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) {
++ if (cpumask_weight(nohz.cpu_mask) == num_active_cpus()) {
+ if (atomic_read(&nohz.load_balancer) == cpu)
+ atomic_set(&nohz.load_balancer, -1);
+ return 0;
+@@ -7075,7 +7075,7 @@ int set_cpus_allowed_ptr(struct task_str
+ int ret = 0;
+
+ rq = task_rq_lock(p, &flags);
+- if (!cpumask_intersects(new_mask, cpu_online_mask)) {
++ if (!cpumask_intersects(new_mask, cpu_active_mask)) {
+ ret = -EINVAL;
+ goto out;
+ }
+@@ -7097,7 +7097,7 @@ int set_cpus_allowed_ptr(struct task_str
+ if (cpumask_test_cpu(task_cpu(p), new_mask))
+ goto out;
+
+- if (migrate_task(p, cpumask_any_and(cpu_online_mask, new_mask), &req)) {
++ if (migrate_task(p, cpumask_any_and(cpu_active_mask, new_mask), &req)) {
+ /* Need help from migration thread: drop lock and wait. */
+ struct task_struct *mt = rq->migration_thread;
+
+@@ -7251,19 +7251,19 @@ static void move_task_off_dead_cpu(int d
+
+ again:
+ /* Look for allowed, online CPU in same node. */
+- for_each_cpu_and(dest_cpu, nodemask, cpu_online_mask)
++ for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
+ if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
+ goto move;
+
+ /* Any allowed, online CPU? */
+- dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_online_mask);
++ dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
+ if (dest_cpu < nr_cpu_ids)
+ goto move;
+
+ /* No more Mr. Nice Guy. */
+ if (dest_cpu >= nr_cpu_ids) {
+ cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
+- dest_cpu = cpumask_any_and(cpu_online_mask, &p->cpus_allowed);
++ dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
+
+ /*
+ * Don't tell them about moving exiting tasks or
+@@ -7292,7 +7292,7 @@ move:
+ */
+ static void migrate_nr_uninterruptible(struct rq *rq_src)
+ {
+- struct rq *rq_dest = cpu_rq(cpumask_any(cpu_online_mask));
++ struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
+ unsigned long flags;
+
+ local_irq_save(flags);
+@@ -7546,7 +7546,7 @@ static ctl_table *sd_alloc_ctl_cpu_table
+ static struct ctl_table_header *sd_sysctl_header;
+ static void register_sched_domain_sysctl(void)
+ {
+- int i, cpu_num = num_online_cpus();
++ int i, cpu_num = num_possible_cpus();
+ struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
+ char buf[32];
+
+@@ -7556,7 +7556,7 @@ static void register_sched_domain_sysctl
+ if (entry == NULL)
+ return;
+
+- for_each_online_cpu(i) {
++ for_each_possible_cpu(i) {
+ snprintf(buf, 32, "cpu%d", i);
+ entry->procname = kstrdup(buf, GFP_KERNEL);
+ entry->mode = 0555;
+@@ -9042,7 +9042,7 @@ match1:
+ if (doms_new == NULL) {
+ ndoms_cur = 0;
+ doms_new = fallback_doms;
+- cpumask_andnot(&doms_new[0], cpu_online_mask, cpu_isolated_map);
++ cpumask_andnot(&doms_new[0], cpu_active_mask, cpu_isolated_map);
+ WARN_ON_ONCE(dattr_new);
+ }
+
+@@ -9173,8 +9173,10 @@ static int update_sched_domains(struct n
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+- case CPU_DEAD:
+- case CPU_DEAD_FROZEN:
++ case CPU_DOWN_PREPARE:
++ case CPU_DOWN_PREPARE_FROZEN:
++ case CPU_DOWN_FAILED:
++ case CPU_DOWN_FAILED_FROZEN:
+ partition_sched_domains(1, NULL, NULL);
+ return NOTIFY_OK;
+
+@@ -9221,7 +9223,7 @@ void __init sched_init_smp(void)
+ #endif
+ get_online_cpus();
+ mutex_lock(&sched_domains_mutex);
+- arch_init_sched_domains(cpu_online_mask);
++ arch_init_sched_domains(cpu_active_mask);
+ cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
+ if (cpumask_empty(non_isolated_cpus))
+ cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
--- /dev/null
+From e6c8fba7771563b2f3dfb96a78f36ec17e15bdf0 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Date: Wed, 16 Dec 2009 18:04:33 +0100
+Subject: sched: Fix task_hot() test order
+
+From: Peter Zijlstra <a.p.zijlstra@chello.nl>
+
+commit e6c8fba7771563b2f3dfb96a78f36ec17e15bdf0 upstream.
+
+Make sure not to access sched_fair fields before verifying it is
+indeed a sched_fair task.
+
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: Mike Galbraith <efault@gmx.de>
+LKML-Reference: <20091216170517.577998058@chello.nl>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/sched.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -2036,6 +2036,9 @@ task_hot(struct task_struct *p, u64 now,
+ {
+ s64 delta;
+
++ if (p->sched_class != &fair_sched_class)
++ return 0;
++
+ /*
+ * Buddy candidates are cache hot:
+ */
+@@ -2044,9 +2047,6 @@ task_hot(struct task_struct *p, u64 now,
+ &p->se == cfs_rq_of(&p->se)->last))
+ return 1;
+
+- if (p->sched_class != &fair_sched_class)
+- return 0;
+-
+ if (sysctl_sched_migration_cost == -1)
+ return 1;
+ if (sysctl_sched_migration_cost == 0)
--- /dev/null
+From 047106adcc85e3023da210143a6ab8a55df9e0fc Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Date: Mon, 16 Nov 2009 10:28:09 +0100
+Subject: sched: Sched_rt_periodic_timer vs cpu hotplug
+
+From: Peter Zijlstra <a.p.zijlstra@chello.nl>
+
+commit 047106adcc85e3023da210143a6ab8a55df9e0fc upstream.
+
+Heiko reported a case where a timer interrupt managed to
+reference a root_domain structure that was already freed by a
+concurrent hot-un-plug operation.
+
+Solve this like the regular sched_domain stuff is also
+synchronized, by adding a synchronize_sched() stmt to the free
+path, this ensures that a root_domain stays present for any
+atomic section that could have observed it.
+
+Reported-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Acked-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Cc: Gregory Haskins <ghaskins@novell.com>
+Cc: Siddha Suresh B <suresh.b.siddha@intel.com>
+Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
+LKML-Reference: <1258363873.26714.83.camel@laptop>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/sched.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -7925,6 +7925,8 @@ sd_parent_degenerate(struct sched_domain
+
+ static void free_rootdomain(struct root_domain *rd)
+ {
++ synchronize_sched();
++
+ cpupri_cleanup(&rd->cpupri);
+
+ free_cpumask_var(rd->rto_mask);
--- /dev/null
+From e4f4288842ee12747e10c354d72be7d424c0b627 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Date: Wed, 16 Dec 2009 18:04:34 +0100
+Subject: sched: Select_task_rq_fair() must honour SD_LOAD_BALANCE
+
+From: Peter Zijlstra <a.p.zijlstra@chello.nl>
+
+commit e4f4288842ee12747e10c354d72be7d424c0b627 upstream.
+
+We should skip !SD_LOAD_BALANCE domains.
+
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: Mike Galbraith <efault@gmx.de>
+LKML-Reference: <20091216170517.653578430@chello.nl>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/sched_fair.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/kernel/sched_fair.c
++++ b/kernel/sched_fair.c
+@@ -1374,6 +1374,9 @@ static int select_task_rq_fair(struct ta
+
+ rcu_read_lock();
+ for_each_domain(cpu, tmp) {
++ if (!(tmp->flags & SD_LOAD_BALANCE))
++ continue;
++
+ /*
+ * If power savings logic is enabled for a domain, see if we
+ * are not overloaded, if so, don't balance wider.
--- /dev/null
+From 48de68a40aef032a2e198437f4781a83bfb938db Mon Sep 17 00:00:00 2001
+From: Mike Christie <michaelc@cs.wisc.edu>
+Date: Tue, 17 Nov 2009 21:25:16 -0600
+Subject: SCSI: fc class: fix fc_transport_init error handling
+
+From: Mike Christie <michaelc@cs.wisc.edu>
+
+commit 48de68a40aef032a2e198437f4781a83bfb938db upstream.
+
+If transport_class_register fails we should unregister any
+registered classes, or we will leak memory or other
+resources.
+
+I did a quick modprobe of scsi_transport_fc to test the
+patch.
+
+Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
+Signed-off-by: James Bottomley <James.Bottomley@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/scsi/scsi_transport_fc.c | 17 ++++++++++++++---
+ 1 file changed, 14 insertions(+), 3 deletions(-)
+
+--- a/drivers/scsi/scsi_transport_fc.c
++++ b/drivers/scsi/scsi_transport_fc.c
+@@ -648,11 +648,22 @@ static __init int fc_transport_init(void
+ return error;
+ error = transport_class_register(&fc_vport_class);
+ if (error)
+- return error;
++ goto unreg_host_class;
+ error = transport_class_register(&fc_rport_class);
+ if (error)
+- return error;
+- return transport_class_register(&fc_transport_class);
++ goto unreg_vport_class;
++ error = transport_class_register(&fc_transport_class);
++ if (error)
++ goto unreg_rport_class;
++ return 0;
++
++unreg_rport_class:
++ transport_class_unregister(&fc_rport_class);
++unreg_vport_class:
++ transport_class_unregister(&fc_vport_class);
++unreg_host_class:
++ transport_class_unregister(&fc_host_class);
++ return error;
+ }
+
+ static void __exit fc_transport_exit(void)
--- /dev/null
+From 99c965dd9ee1a004efc083c3d760ba982bb76adf Mon Sep 17 00:00:00 2001
+From: Kleber Sacilotto de Souza <klebers@linux.vnet.ibm.com>
+Date: Wed, 25 Nov 2009 20:13:43 -0200
+Subject: SCSI: ipr: fix EEH recovery
+
+From: Kleber Sacilotto de Souza <klebers@linux.vnet.ibm.com>
+
+commit 99c965dd9ee1a004efc083c3d760ba982bb76adf upstream.
+
+After commits c82f63e411f1b58427c103bd95af2863b1c96dd1 (PCI: check saved
+state before restore) and 4b77b0a2ba27d64f58f16d8d4d48d8319dda36ff (PCI:
+Clear saved_state after the state has been restored) PCI drivers are
+prevented from restoring the device standard configuration registers
+twice in a row. These changes introduced a regression on ipr EEH
+recovery.
+
+The ipr device driver saves the PCI state only during the device probe
+and restores it on ipr_reset_restore_cfg_space() during IOA resets. This
+behavior is causing the EEH recovery to fail after the second error
+detected, since the registers are not being restored.
+
+One possible solution would be saving the registers after restoring
+them. The problem with this approach is that while recovering from an
+EEH error if pci_save_state() results in an EEH error, the adapter/slot
+will be reset, and end up back in ipr_reset_restore_cfg_space(), but it
+won't have a valid saved state to restore, so pci_restore_state() will
+fail.
+
+The following patch introduces a workaround for this problem, hacking
+around the PCI API by setting pdev->state_saved = true before we do the
+restore. It fixes the EEH regression and prevents that we hit another
+EEH error during EEH recovery.
+
+
+[jejb: fix is a hack ... Jesse and Rafael will fix properly]
+Signed-off-by: Kleber Sacilotto de Souza <klebers@linux.vnet.ibm.com>
+Acked-by: Brian King <brking@linux.vnet.ibm.com>
+Cc: Jesse Barnes <jbarnes@virtuousgeek.org>
+Signed-off-by: James Bottomley <James.Bottomley@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/scsi/ipr.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/scsi/ipr.c
++++ b/drivers/scsi/ipr.c
+@@ -6516,6 +6516,7 @@ static int ipr_reset_restore_cfg_space(s
+ int rc;
+
+ ENTER;
++ ioa_cfg->pdev->state_saved = true;
+ rc = pci_restore_state(ioa_cfg->pdev);
+
+ if (rc != PCIBIOS_SUCCESSFUL) {
--- /dev/null
+From 1486400f7edd009d49550da968d5744e246dc7f8 Mon Sep 17 00:00:00 2001
+From: Michael Reed <mdr@sgi.com>
+Date: Wed, 2 Dec 2009 09:11:16 -0600
+Subject: SCSI: qla2xxx: dpc thread can execute before scsi host has been added
+
+From: Michael Reed <mdr@sgi.com>
+
+commit 1486400f7edd009d49550da968d5744e246dc7f8 upstream.
+
+Fix crash in qla2x00_fdmi_register() due to the dpc
+thread executing before the scsi host has been fully
+added.
+
+Unable to handle kernel NULL pointer dereference (address 00000000000001d0)
+qla2xxx_7_dpc[4140]: Oops 8813272891392 [1]
+
+Call Trace:
+ [<a000000100016910>] show_stack+0x50/0xa0
+ sp=e00000b07c59f930 bsp=e00000b07c591400
+ [<a000000100017180>] show_regs+0x820/0x860
+ sp=e00000b07c59fb00 bsp=e00000b07c5913a0
+ [<a00000010003bd60>] die+0x1a0/0x2e0
+ sp=e00000b07c59fb00 bsp=e00000b07c591360
+ [<a0000001000681a0>] ia64_do_page_fault+0x8c0/0x9e0
+ sp=e00000b07c59fb00 bsp=e00000b07c591310
+ [<a00000010000c8e0>] ia64_native_leave_kernel+0x0/0x270
+ sp=e00000b07c59fb90 bsp=e00000b07c591310
+ [<a000000207197350>] qla2x00_fdmi_register+0x850/0xbe0 [qla2xxx]
+ sp=e00000b07c59fd60 bsp=e00000b07c591290
+ [<a000000207171570>] qla2x00_configure_loop+0x1930/0x34c0 [qla2xxx]
+ sp=e00000b07c59fd60 bsp=e00000b07c591128
+ [<a0000002071732b0>] qla2x00_loop_resync+0x1b0/0x2e0 [qla2xxx]
+ sp=e00000b07c59fdf0 bsp=e00000b07c5910c0
+ [<a000000207166d40>] qla2x00_do_dpc+0x9a0/0xce0 [qla2xxx]
+ sp=e00000b07c59fdf0 bsp=e00000b07c590fa0
+ [<a0000001000d5bb0>] kthread+0x110/0x140
+ sp=e00000b07c59fe00 bsp=e00000b07c590f68
+ [<a000000100014a30>] kernel_thread_helper+0xd0/0x100
+ sp=e00000b07c59fe30 bsp=e00000b07c590f40
+ [<a00000010000a4c0>] start_kernel_thread+0x20/0x40
+ sp=e00000b07c59fe30 bsp=e00000b07c590f40
+
+crash> dis a000000207197350
+0xa000000207197350 <qla2x00_fdmi_register+2128>: [MMI] ld1 r45=[r14];;
+crash> scsi_qla_host.host 0xe00000b058c73ff8
+ host = 0xe00000b058c73be0,
+crash> Scsi_Host.shost_data 0xe00000b058c73be0
+ shost_data = 0x0, <<<<<<<<<<<
+
+The fc_transport fc_* workqueue threads have yet to be created.
+
+crash> ps | grep _7
+ 3891 2 2 e00000b075c80000 IN 0.0 0 0 [scsi_eh_7]
+ 4140 2 3 e00000b07c590000 RU 0.0 0 0 [qla2xxx_7_dpc]
+
+The thread creating adding the Scsi_Host is blocked due to other
+activity in sysfs.
+
+crash> bt 3762
+PID: 3762 TASK: e00000b071e70000 CPU: 3 COMMAND: "modprobe"
+ #0 [BSP:e00000b071e71548] schedule at a000000100727e00
+ #1 [BSP:e00000b071e714c8] __mutex_lock_slowpath at a0000001007295a0
+ #2 [BSP:e00000b071e714a8] mutex_lock at a000000100729830
+ #3 [BSP:e00000b071e71478] sysfs_addrm_start at a0000001002584f0
+ #4 [BSP:e00000b071e71440] create_dir at a000000100259350
+ #5 [BSP:e00000b071e71410] sysfs_create_subdir at a000000100259510
+ #6 [BSP:e00000b071e713b0] internal_create_group at a00000010025c880
+ #7 [BSP:e00000b071e71388] sysfs_create_group at a00000010025cc50
+ #8 [BSP:e00000b071e71368] dpm_sysfs_add at a000000100425050
+ #9 [BSP:e00000b071e71310] device_add at a000000100417d90
+#10 [BSP:e00000b071e712d8] scsi_add_host at a00000010045a380
+#11 [BSP:e00000b071e71268] qla2x00_probe_one at a0000002071be950
+#12 [BSP:e00000b071e71248] local_pci_probe at a00000010032e490
+#13 [BSP:e00000b071e71218] pci_device_probe at a00000010032ecd0
+#14 [BSP:e00000b071e711d8] driver_probe_device at a00000010041d480
+#15 [BSP:e00000b071e711a8] __driver_attach at a00000010041d6e0
+#16 [BSP:e00000b071e71170] bus_for_each_dev at a00000010041c240
+#17 [BSP:e00000b071e71150] driver_attach at a00000010041d0a0
+#18 [BSP:e00000b071e71108] bus_add_driver at a00000010041b080
+#19 [BSP:e00000b071e710c0] driver_register at a00000010041dea0
+#20 [BSP:e00000b071e71088] __pci_register_driver at a00000010032f610
+#21 [BSP:e00000b071e71058] (unknown) at a000000207200270
+#22 [BSP:e00000b071e71018] do_one_initcall at a00000010000a9c0
+#23 [BSP:e00000b071e70f98] sys_init_module at a0000001000fef00
+#24 [BSP:e00000b071e70f98] ia64_ret_from_syscall at a00000010000c740
+
+So, it appears that qla2xxx dpc thread is moving forward before the
+scsi host has been completely added.
+
+This patch moves the setting of the init_done (and online) flag to
+after the call to scsi_add_host() to hold off the dpc thread.
+
+Found via large lun count testing using 2.6.31.
+
+Signed-off-by: Michael Reed <mdr@sgi.com>
+Acked-by: Giridhar Malavali <giridhar.malavali@qlogic.com>
+Signed-off-by: James Bottomley <James.Bottomley@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/scsi/qla2xxx/qla_os.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -2016,13 +2016,13 @@ skip_dpc:
+ DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
+ base_vha->host_no, ha));
+
+- base_vha->flags.init_done = 1;
+- base_vha->flags.online = 1;
+-
+ ret = scsi_add_host(host, &pdev->dev);
+ if (ret)
+ goto probe_failed;
+
++ base_vha->flags.init_done = 1;
++ base_vha->flags.online = 1;
++
+ ha->isp_ops->enable_intrs(ha);
+
+ scsi_scan_host(host);
--- /dev/null
+From c982c368bb90adbd312faa05d0cfd842e9ab45a7 Mon Sep 17 00:00:00 2001
+From: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+Date: Thu, 26 Nov 2009 09:24:13 +0900
+Subject: SCSI: st: fix mdata->page_order handling
+
+From: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+
+commit c982c368bb90adbd312faa05d0cfd842e9ab45a7 upstream.
+
+dio transfer always resets mdata->page_order to zero. It breaks
+high-order pages previously allocated for non-dio transfer.
+
+This patches adds reserved_page_order to st_buffer structure to save
+page order for non-dio transfer.
+
+http://bugzilla.kernel.org/show_bug.cgi?id=14563
+
+When enlarge_buffer() allocates 524288 from 0, st uses six-order page
+allocation. So mdata->page_order is 6 and frp_seg is 2.
+
+After that, if st uses dio, sgl_map_user_pages() sets
+mdata->page_order to 0 for st_do_scsi(). After that, when we call
+normalize_buffer(), it frees only free frp_seg * PAGE_SIZE (2 * 4096)
+though we should free frp_seg * PAGE_SIZE << 6 (2 * 4096 << 6). So we
+see buffer_size is set to 516096 (524288 - 8192).
+
+Reported-by: Joachim Breuer <linux-kernel@jmbreuer.net>
+Tested-by: Joachim Breuer <linux-kernel@jmbreuer.net>
+Acked-by: Kai Makisara <kai.makisara@kolumbus.fi>
+Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+Signed-off-by: James Bottomley <James.Bottomley@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/scsi/st.c | 23 ++++++++++++-----------
+ drivers/scsi/st.h | 1 +
+ 2 files changed, 13 insertions(+), 11 deletions(-)
+
+--- a/drivers/scsi/st.c
++++ b/drivers/scsi/st.c
+@@ -552,13 +552,15 @@ st_do_scsi(struct st_request * SRpnt, st
+ SRpnt->waiting = waiting;
+
+ if (STp->buffer->do_dio) {
++ mdata->page_order = 0;
+ mdata->nr_entries = STp->buffer->sg_segs;
+ mdata->pages = STp->buffer->mapped_pages;
+ } else {
++ mdata->page_order = STp->buffer->reserved_page_order;
+ mdata->nr_entries =
+ DIV_ROUND_UP(bytes, PAGE_SIZE << mdata->page_order);
+- STp->buffer->map_data.pages = STp->buffer->reserved_pages;
+- STp->buffer->map_data.offset = 0;
++ mdata->pages = STp->buffer->reserved_pages;
++ mdata->offset = 0;
+ }
+
+ memcpy(SRpnt->cmd, cmd, sizeof(SRpnt->cmd));
+@@ -3718,7 +3720,7 @@ static int enlarge_buffer(struct st_buff
+ priority |= __GFP_ZERO;
+
+ if (STbuffer->frp_segs) {
+- order = STbuffer->map_data.page_order;
++ order = STbuffer->reserved_page_order;
+ b_size = PAGE_SIZE << order;
+ } else {
+ for (b_size = PAGE_SIZE, order = 0;
+@@ -3751,7 +3753,7 @@ static int enlarge_buffer(struct st_buff
+ segs++;
+ }
+ STbuffer->b_data = page_address(STbuffer->reserved_pages[0]);
+- STbuffer->map_data.page_order = order;
++ STbuffer->reserved_page_order = order;
+
+ return 1;
+ }
+@@ -3764,7 +3766,7 @@ static void clear_buffer(struct st_buffe
+
+ for (i=0; i < st_bp->frp_segs; i++)
+ memset(page_address(st_bp->reserved_pages[i]), 0,
+- PAGE_SIZE << st_bp->map_data.page_order);
++ PAGE_SIZE << st_bp->reserved_page_order);
+ st_bp->cleared = 1;
+ }
+
+@@ -3772,7 +3774,7 @@ static void clear_buffer(struct st_buffe
+ /* Release the extra buffer */
+ static void normalize_buffer(struct st_buffer * STbuffer)
+ {
+- int i, order = STbuffer->map_data.page_order;
++ int i, order = STbuffer->reserved_page_order;
+
+ for (i = 0; i < STbuffer->frp_segs; i++) {
+ __free_pages(STbuffer->reserved_pages[i], order);
+@@ -3780,7 +3782,7 @@ static void normalize_buffer(struct st_b
+ }
+ STbuffer->frp_segs = 0;
+ STbuffer->sg_segs = 0;
+- STbuffer->map_data.page_order = 0;
++ STbuffer->reserved_page_order = 0;
+ STbuffer->map_data.offset = 0;
+ }
+
+@@ -3790,7 +3792,7 @@ static void normalize_buffer(struct st_b
+ static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, int do_count)
+ {
+ int i, cnt, res, offset;
+- int length = PAGE_SIZE << st_bp->map_data.page_order;
++ int length = PAGE_SIZE << st_bp->reserved_page_order;
+
+ for (i = 0, offset = st_bp->buffer_bytes;
+ i < st_bp->frp_segs && offset >= length; i++)
+@@ -3822,7 +3824,7 @@ static int append_to_buffer(const char _
+ static int from_buffer(struct st_buffer * st_bp, char __user *ubp, int do_count)
+ {
+ int i, cnt, res, offset;
+- int length = PAGE_SIZE << st_bp->map_data.page_order;
++ int length = PAGE_SIZE << st_bp->reserved_page_order;
+
+ for (i = 0, offset = st_bp->read_pointer;
+ i < st_bp->frp_segs && offset >= length; i++)
+@@ -3855,7 +3857,7 @@ static void move_buffer_data(struct st_b
+ {
+ int src_seg, dst_seg, src_offset = 0, dst_offset;
+ int count, total;
+- int length = PAGE_SIZE << st_bp->map_data.page_order;
++ int length = PAGE_SIZE << st_bp->reserved_page_order;
+
+ if (offset == 0)
+ return;
+@@ -4577,7 +4579,6 @@ static int sgl_map_user_pages(struct st_
+ }
+
+ mdata->offset = uaddr & ~PAGE_MASK;
+- mdata->page_order = 0;
+ STbp->mapped_pages = pages;
+
+ return nr_pages;
+--- a/drivers/scsi/st.h
++++ b/drivers/scsi/st.h
+@@ -46,6 +46,7 @@ struct st_buffer {
+ struct st_request *last_SRpnt;
+ struct st_cmdstatus cmdstat;
+ struct page **reserved_pages;
++ int reserved_page_order;
+ struct page **mapped_pages;
+ struct rq_map_data map_data;
+ unsigned char *b_data;
--- /dev/null
+scsi-ipr-fix-eeh-recovery.patch
+scsi-qla2xxx-dpc-thread-can-execute-before-scsi-host-has-been-added.patch
+scsi-st-fix-mdata-page_order-handling.patch
+scsi-fc-class-fix-fc_transport_init-error-handling.patch
+sched-fix-task_hot-test-order.patch
+x86-cpuid-add-volatile-to-asm-in-native_cpuid.patch
+sched-select_task_rq_fair-must-honour-sd_load_balance.patch
+clockevents-prevent-clockevent_devices-list-corruption-on-cpu-hotplug.patch
+pata_hpt3x2n-fix-clock-turnaround.patch
+pata_cmd64x-fix-overclocking-of-udma0-2-modes.patch
+asoc-wm8974-fix-a-wrong-bit-definition.patch
+sound-sgio2audio-pdaudiocf-usb-audio-initialize-pcm-buffer.patch
+alsa-hda-fix-missing-capsrc_nids-for-alc88x.patch
+acerhdf-limit-modalias-matching-to-supported.patch
+acpi-ec-fix-msi-dmi-detection.patch
+acpi-use-the-return-result-of-acpi-lid-notifier-chain-correctly.patch
+powerpc-handle-vsx-alignment-faults-correctly-in-little-endian-mode.patch
+asoc-do-not-write-to-invalid-registers-on-the-wm9712.patch
+drm-radeon-fix-build-on-64-bit-with-some-compilers.patch
+usb-emi62-fix-crash-when-trying-to-load-emi-6-2-firmware.patch
+usb-option-support-hi-speed-for-modem-haier-ce100.patch
+usb-fix-a-bug-on-appledisplay.c-regarding-signedness.patch
+usb-musb-gadget_ep0-avoid-setupend-interrupt.patch
+bluetooth-prevent-ill-timed-autosuspend-in-usb-driver.patch
+usb-rename-usb_configure_device.patch
+usb-fix-bugs-in-usb_-de-authorize_device.patch
+drivers-net-usb-correct-code-taking-the-size-of-a-pointer.patch
+x86-sgi-uv-fix-writes-to-led-registers-on-remote-uv-hubs.patch
+md-fix-unfortunate-interaction-with-evms.patch
+dma-at_hdmac-correct-incompatible-type-for-argument-1-of-spin_lock_bh.patch
+dma-debug-do-not-add-notifier-when-dma-debugging-is-disabled.patch
+dma-debug-fix-bug-causing-build-warning.patch
+cifs-null-out-tcon-psesinfo-and-srvtcp-pointers-when-chasing-dfs-referrals.patch
+x86-amd-iommu-fix-initialization-failure-panic.patch
+ioat3-fix-p-disabled-q-continuation.patch
+ioat2-3-put-channel-hardware-in-known-state-at-init.patch
+kvm-mmu-remove-prefault-from-invlpg-handler.patch
+kvm-lapic-make-sure-irr-bitmap-is-scanned-after-vm-load.patch
+libertas-fix-buffer-overflow-in-lbs_get_essid.patch
+iwmc3200wifi-fix-array-out-of-boundary-access.patch
+mac80211-fix-propagation-of-failed-hardware-reconfigurations.patch
+mac80211-fix-wmm-ap-settings-application.patch
+mac80211-fix-ibss-merge.patch
+cfg80211-fix-race-between-deauth-and-assoc-response.patch
+ath5k-fix-swi-calibration-interrupt-storm.patch
+ath9k-wake-hardware-for-interface-ibss-ap-mesh-removal.patch
+ath9k-fix-tx-queue-draining.patch
+ath9k-fix-missed-error-codes-in-the-tx-status-check.patch
+ath9k-wake-hardware-during-ampdu-tx-actions.patch
+ath9k-fix-suspend-by-waking-device-prior-to-stop.patch
+ath9k_hw-fix-possible-oob-array-indexing-in-gen_timer_index-on-64-bit.patch
+ath9k_hw-fix-ar_gpio_input_en_val_bt_priority_bb-and-its-shift-value-in-0x4054.patch
+iwl3945-disable-power-save.patch
+iwl3945-fix-panic-in-iwl3945-driver.patch
+iwlwifi-fix-eeprom-otp-reading-endian-annotations-and-a-bug.patch
+iwlwifi-fix-more-eeprom-endian-bugs.patch
+iwlwifi-fix-40mhz-operation-setting-on-cards-that-do-not-allow-it.patch
+mac80211-fix-race-with-suspend-and-dynamic_ps_disable_work.patch
+nommu-optimise-away-the-dac_-mmap_min_addr-tests.patch
+sysctl_max_map_count-should-be-non-negative.patch
+kernel-sysctl.c-fix-the-incomplete-part-of-sysctl_max_map_count-should-be-non-negative.patch.patch
+v4l-dvb-13596-ov511.c-typo-lock-unlock.patch
+x86-ptrace-make-genregs_get-set-more-robust.patch
+memcg-avoid-oom-killing-innocent-task-in-case-of-use_hierarchy.patch
+e100-fix-broken-cbs-accounting-due-to-missing-memset.patch
+ipv6-reassembly-use-seperate-reassembly-queues-for-conntrack-and-local-delivery.patch
+netfilter-fix-crashes-in-bridge-netfilter-caused-by-fragment-jumps.patch
+hwmon-sht15-off-by-one-error-in-array-index-incorrect-constants.patch
+b43-avoid-ppc-fault-during-resume.patch
+keys-keyctl_session_to_parent-needs-tif_notify_resume-architecture-support.patch
+sched-fix-balance-vs-hotplug-race.patch
+drm-radeon-kms-fix-crtc-vblank-update-for-r600.patch
+drm-disable-all-the-possible-outputs-crtcs-before-entering-kms-mode.patch
+s390-dasd-support-diag-access-for-read-only-devices.patch
+xen-fix-is_disconnected_device-exists_disconnected_device.patch
+xen-improvement-to-wait_for_devices.patch
+xen-wait-up-to-5-minutes-for-device-connetion.patch
+orinoco-fix-gfp_kernel-in-orinoco_set_key-with-interrupts-disabled.patch
+udf-try-harder-when-looking-for-vat-inode.patch
+add-unlocked-version-of-inode_add_bytes-function.patch
+quota-decouple-fs-reserved-space-from-quota-reservation.patch
+ext4-convert-to-generic-reserved-quota-s-space-management.patch
+ext4-fix-potential-quota-deadlock.patch
+ext4-fix-sleep-inside-spinlock-issue-with-quota-and-dealloc-14739.patch
+x86-msr-unify-rdmsr_on_cpus-wrmsr_on_cpus.patch
+cpumask-use-modern-cpumask-style-in-drivers-edac-amd64_edac.c.patch
+amd64_edac-unify-mcgctl-ecc-switching.patch
+x86-msr-add-support-for-non-contiguous-cpumasks.patch
+x86-msr-msrs_alloc-free-for-config_smp-n.patch
+amd64_edac-fix-driver-instance-freeing.patch
+amd64_edac-make-driver-loading-more-robust.patch
+amd64_edac-fix-forcing-module-load-unload.patch
+sched-sched_rt_periodic_timer-vs-cpu-hotplug.patch
+ext4-update-documentation-to-correct-the-inode_readahead_blks-option-name.patch
+lguest-fix-bug-in-setting-guest-gdt-entry.patch
+vmscan-do-not-evict-inactive-pages-when-skipping-an-active-list-scan.patch
+ksm-fix-mlockfreed-to-munlocked.patch
+rt2x00-disable-powersaving-for-rt61pci-and-rt2800pci.patch
+generic_permission-may_open-is-not-write-access.patch
--- /dev/null
+From 3e85fd614c7b6bb7f33bb04a0dcb5a3bfca4c0fe Mon Sep 17 00:00:00 2001
+From: Clemens Ladisch <clemens@ladisch.de>
+Date: Fri, 18 Dec 2009 09:27:24 +0100
+Subject: sound: sgio2audio/pdaudiocf/usb-audio: initialize PCM buffer
+
+From: Clemens Ladisch <clemens@ladisch.de>
+
+commit 3e85fd614c7b6bb7f33bb04a0dcb5a3bfca4c0fe upstream.
+
+When allocating the PCM buffer, use vmalloc_user() instead of vmalloc().
+Otherwise, it would be possible for applications to play the previous
+contents of the kernel memory to the speakers, or to read it directly if
+the buffer is exported to userspace.
+
+Signed-off-by: Clemens Ladisch <clemens@ladisch.de>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ sound/mips/sgio2audio.c | 2 +-
+ sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c | 2 +-
+ sound/usb/usbaudio.c | 2 +-
+ 3 files changed, 3 insertions(+), 3 deletions(-)
+
+--- a/sound/mips/sgio2audio.c
++++ b/sound/mips/sgio2audio.c
+@@ -609,7 +609,7 @@ static int snd_sgio2audio_pcm_hw_params(
+ /* alloc virtual 'dma' area */
+ if (runtime->dma_area)
+ vfree(runtime->dma_area);
+- runtime->dma_area = vmalloc(size);
++ runtime->dma_area = vmalloc_user(size);
+ if (runtime->dma_area == NULL)
+ return -ENOMEM;
+ runtime->dma_bytes = size;
+--- a/sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c
++++ b/sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c
+@@ -51,7 +51,7 @@ static int snd_pcm_alloc_vmalloc_buffer(
+ return 0; /* already enough large */
+ vfree(runtime->dma_area);
+ }
+- runtime->dma_area = vmalloc_32(size);
++ runtime->dma_area = vmalloc_32_user(size);
+ if (! runtime->dma_area)
+ return -ENOMEM;
+ runtime->dma_bytes = size;
+--- a/sound/usb/usbaudio.c
++++ b/sound/usb/usbaudio.c
+@@ -752,7 +752,7 @@ static int snd_pcm_alloc_vmalloc_buffer(
+ return 0; /* already large enough */
+ vfree(runtime->dma_area);
+ }
+- runtime->dma_area = vmalloc(size);
++ runtime->dma_area = vmalloc_user(size);
+ if (!runtime->dma_area)
+ return -ENOMEM;
+ runtime->dma_bytes = size;
--- /dev/null
+From 70da2340fbc68e91e701762f785479ab495a0869 Mon Sep 17 00:00:00 2001
+From: Amerigo Wang <amwang@redhat.com>
+Date: Mon, 14 Dec 2009 17:59:52 -0800
+Subject: 'sysctl_max_map_count' should be non-negative
+
+From: Amerigo Wang <amwang@redhat.com>
+
+commit 70da2340fbc68e91e701762f785479ab495a0869 upstream.
+
+Jan Engelhardt reported we have this problem:
+
+setting max_map_count to a value large enough results in programs dying at
+first try. This is on 2.6.31.6:
+
+15:59 borg:/proc/sys/vm # echo $[1<<31-1] >max_map_count
+15:59 borg:/proc/sys/vm # cat max_map_count
+1073741824
+15:59 borg:/proc/sys/vm # echo $[1<<31] >max_map_count
+15:59 borg:/proc/sys/vm # cat max_map_count
+Killed
+
+This is because we have a chance to make 'max_map_count' negative. but
+it's meaningless. Make it only accept non-negative values.
+
+Reported-by: Jan Engelhardt <jengelh@medozas.de>
+Signed-off-by: WANG Cong <amwang@redhat.com>
+Cc: Ingo Molnar <mingo@elte.hu>
+Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: James Morris <jmorris@namei.org>
+Cc: Alexey Dobriyan <adobriyan@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/sysctl.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -1607,7 +1607,8 @@ static struct ctl_table debug_table[] =
+ .data = &show_unhandled_signals,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+- .proc_handler = proc_dointvec
++ .proc_handler = proc_dointvec,
++ .extra1 = &zero,
+ },
+ #endif
+ { .ctl_name = 0 }
--- /dev/null
+From e971b0b9e0dd50d9ceecb67a6a6ab80a80906033 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Mon, 30 Nov 2009 19:47:55 +0100
+Subject: udf: Try harder when looking for VAT inode
+
+From: Jan Kara <jack@suse.cz>
+
+commit e971b0b9e0dd50d9ceecb67a6a6ab80a80906033 upstream.
+
+Some disks do not contain VAT inode in the last recorded block as required
+by the standard but a few blocks earlier (or the number of recorded blocks
+is wrong). So look for the VAT inode a bit before the end of the media.
+
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/udf/super.c | 32 ++++++++++++++++++++++++--------
+ 1 file changed, 24 insertions(+), 8 deletions(-)
+
+--- a/fs/udf/super.c
++++ b/fs/udf/super.c
+@@ -1078,21 +1078,39 @@ static int udf_fill_partdesc_info(struct
+ return 0;
+ }
+
+-static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
++static void udf_find_vat_block(struct super_block *sb, int p_index,
++ int type1_index, sector_t start_block)
+ {
+ struct udf_sb_info *sbi = UDF_SB(sb);
+ struct udf_part_map *map = &sbi->s_partmaps[p_index];
++ sector_t vat_block;
+ struct kernel_lb_addr ino;
++
++ /*
++ * VAT file entry is in the last recorded block. Some broken disks have
++ * it a few blocks before so try a bit harder...
++ */
++ ino.partitionReferenceNum = type1_index;
++ for (vat_block = start_block;
++ vat_block >= map->s_partition_root &&
++ vat_block >= start_block - 3 &&
++ !sbi->s_vat_inode; vat_block--) {
++ ino.logicalBlockNum = vat_block - map->s_partition_root;
++ sbi->s_vat_inode = udf_iget(sb, &ino);
++ }
++}
++
++static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
++{
++ struct udf_sb_info *sbi = UDF_SB(sb);
++ struct udf_part_map *map = &sbi->s_partmaps[p_index];
+ struct buffer_head *bh = NULL;
+ struct udf_inode_info *vati;
+ uint32_t pos;
+ struct virtualAllocationTable20 *vat20;
+ sector_t blocks = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
+
+- /* VAT file entry is in the last recorded block */
+- ino.partitionReferenceNum = type1_index;
+- ino.logicalBlockNum = sbi->s_last_block - map->s_partition_root;
+- sbi->s_vat_inode = udf_iget(sb, &ino);
++ udf_find_vat_block(sb, p_index, type1_index, sbi->s_last_block);
+ if (!sbi->s_vat_inode &&
+ sbi->s_last_block != blocks - 1) {
+ printk(KERN_NOTICE "UDF-fs: Failed to read VAT inode from the"
+@@ -1100,9 +1118,7 @@ static int udf_load_vat(struct super_blo
+ "block of the device (%lu).\n",
+ (unsigned long)sbi->s_last_block,
+ (unsigned long)blocks - 1);
+- ino.partitionReferenceNum = type1_index;
+- ino.logicalBlockNum = blocks - 1 - map->s_partition_root;
+- sbi->s_vat_inode = udf_iget(sb, &ino);
++ udf_find_vat_block(sb, p_index, type1_index, blocks - 1);
+ }
+ if (!sbi->s_vat_inode)
+ return 1;
--- /dev/null
+From ac06c06770bb8761b1f1f9bdf2f5420fa6d3e9fa Mon Sep 17 00:00:00 2001
+From: Clemens Ladisch <clemens@ladisch.de>
+Date: Mon, 21 Dec 2009 15:36:44 -0800
+Subject: USB: emi62: fix crash when trying to load EMI 6|2 firmware
+
+From: Clemens Ladisch <clemens@ladisch.de>
+
+commit ac06c06770bb8761b1f1f9bdf2f5420fa6d3e9fa upstream.
+
+While converting emi62 to use request_firmware(), the driver was also
+changed to use the ihex helper functions. However, this broke the loading
+of the FPGA firmware because the code tries to access the addr field of
+the EOF record which works with a plain array that has an empty last
+record but not with the ihex helper functions where the end of the data is
+signaled with a NULL record pointer, resulting in:
+
+BUG: unable to handle kernel NULL pointer dereference at (null)
+IP: [<f80d248c>] emi62_load_firmware+0x33c/0x740 [emi62]
+
+This can be fixed by changing the loop condition to test the return value
+of ihex_next_binrec() directly (like in emi26.c).
+
+Signed-off-by: Clemens Ladisch <clemens@ladisch.de>
+Reported-and-tested-by: Der Mickster <retroeffective@gmail.com>
+Acked-by: David Woodhouse <David.Woodhouse@intel.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/usb/misc/emi62.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/usb/misc/emi62.c
++++ b/drivers/usb/misc/emi62.c
+@@ -167,7 +167,7 @@ static int emi62_load_firmware (struct u
+ err("%s - error loading firmware: error = %d", __func__, err);
+ goto wraperr;
+ }
+- } while (i > 0);
++ } while (rec);
+
+ /* Assert reset (stop the CPU in the EMI) */
+ err = emi62_set_reset(dev,1);
--- /dev/null
+From 37e9066b2f85480d99d3795373f5ef0b00ac1189 Mon Sep 17 00:00:00 2001
+From: pancho horrillo <pancho@pancho.name>
+Date: Wed, 23 Dec 2009 11:09:13 +0100
+Subject: USB: Fix a bug on appledisplay.c regarding signedness
+
+From: pancho horrillo <pancho@pancho.name>
+
+commit 37e9066b2f85480d99d3795373f5ef0b00ac1189 upstream.
+
+brightness status is reported by the Apple Cinema Displays as an
+'unsigned char' (u8) value, but the code used 'char' instead.
+
+Note that he driver was developed on the PowerPC architecture,
+where the two types are synonymous, which is not always the case.
+
+Fixed that. Otherwise the driver will interpret brightness
+levels > 127 as negative, and fail to load.
+
+Signed-off-by: pancho horrillo <pancho@pancho.name>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/usb/misc/appledisplay.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/usb/misc/appledisplay.c
++++ b/drivers/usb/misc/appledisplay.c
+@@ -72,8 +72,8 @@ struct appledisplay {
+ struct usb_device *udev; /* usb device */
+ struct urb *urb; /* usb request block */
+ struct backlight_device *bd; /* backlight device */
+- char *urbdata; /* interrupt URB data buffer */
+- char *msgdata; /* control message data buffer */
++ u8 *urbdata; /* interrupt URB data buffer */
++ u8 *msgdata; /* control message data buffer */
+
+ struct delayed_work work;
+ int button_pressed;
--- /dev/null
+From da307123c621b01cce147a4be313d8a754674f63 Mon Sep 17 00:00:00 2001
+From: Alan Stern <stern@rowland.harvard.edu>
+Date: Tue, 8 Dec 2009 15:54:44 -0500
+Subject: USB: fix bugs in usb_(de)authorize_device
+
+From: Alan Stern <stern@rowland.harvard.edu>
+
+commit da307123c621b01cce147a4be313d8a754674f63 upstream.
+
+This patch (as1315) fixes some bugs in the USB core authorization
+code:
+
+ usb_deauthorize_device() should deallocate the device strings
+ instead of leaking them, and it should invoke
+ usb_destroy_configuration() (which does proper reference
+ counting) instead of freeing the config information directly.
+
+ usb_authorize_device() shouldn't change the device strings
+ until it knows that the authorization will succeed, and it should
+ autosuspend the device at the end (having autoresumed the
+ device at the start).
+
+ Because the device strings can be changed, the sysfs routines
+ to display the strings must protect the string pointers by
+ locking the device.
+
+Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
+CC: Inaky Perez-Gonzalez <inaky@linux.intel.com>
+Acked-by: David Vrabel <david.vrabel@csr.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/usb/core/hub.c | 32 ++++++++++++++++++++------------
+ drivers/usb/core/sysfs.c | 6 +++++-
+ 2 files changed, 25 insertions(+), 13 deletions(-)
+
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -1803,21 +1803,23 @@ fail:
+ */
+ int usb_deauthorize_device(struct usb_device *usb_dev)
+ {
+- unsigned cnt;
+ usb_lock_device(usb_dev);
+ if (usb_dev->authorized == 0)
+ goto out_unauthorized;
++
+ usb_dev->authorized = 0;
+ usb_set_configuration(usb_dev, -1);
++
++ kfree(usb_dev->product);
+ usb_dev->product = kstrdup("n/a (unauthorized)", GFP_KERNEL);
++ kfree(usb_dev->manufacturer);
+ usb_dev->manufacturer = kstrdup("n/a (unauthorized)", GFP_KERNEL);
++ kfree(usb_dev->serial);
+ usb_dev->serial = kstrdup("n/a (unauthorized)", GFP_KERNEL);
+- kfree(usb_dev->config);
+- usb_dev->config = NULL;
+- for (cnt = 0; cnt < usb_dev->descriptor.bNumConfigurations; cnt++)
+- kfree(usb_dev->rawdescriptors[cnt]);
++
++ usb_destroy_configuration(usb_dev);
+ usb_dev->descriptor.bNumConfigurations = 0;
+- kfree(usb_dev->rawdescriptors);
++
+ out_unauthorized:
+ usb_unlock_device(usb_dev);
+ return 0;
+@@ -1827,15 +1829,11 @@ out_unauthorized:
+ int usb_authorize_device(struct usb_device *usb_dev)
+ {
+ int result = 0, c;
++
+ usb_lock_device(usb_dev);
+ if (usb_dev->authorized == 1)
+ goto out_authorized;
+- kfree(usb_dev->product);
+- usb_dev->product = NULL;
+- kfree(usb_dev->manufacturer);
+- usb_dev->manufacturer = NULL;
+- kfree(usb_dev->serial);
+- usb_dev->serial = NULL;
++
+ result = usb_autoresume_device(usb_dev);
+ if (result < 0) {
+ dev_err(&usb_dev->dev,
+@@ -1848,6 +1846,14 @@ int usb_authorize_device(struct usb_devi
+ "authorization: %d\n", result);
+ goto error_device_descriptor;
+ }
++
++ kfree(usb_dev->product);
++ usb_dev->product = NULL;
++ kfree(usb_dev->manufacturer);
++ usb_dev->manufacturer = NULL;
++ kfree(usb_dev->serial);
++ usb_dev->serial = NULL;
++
+ usb_dev->authorized = 1;
+ result = usb_enumerate_device(usb_dev);
+ if (result < 0)
+@@ -1866,8 +1872,10 @@ int usb_authorize_device(struct usb_devi
+ }
+ }
+ dev_info(&usb_dev->dev, "authorized to connect\n");
++
+ error_enumerate:
+ error_device_descriptor:
++ usb_autosuspend_device(usb_dev);
+ error_autoresume:
+ out_authorized:
+ usb_unlock_device(usb_dev); // complements locktree
+--- a/drivers/usb/core/sysfs.c
++++ b/drivers/usb/core/sysfs.c
+@@ -82,9 +82,13 @@ static ssize_t show_##name(struct devic
+ struct device_attribute *attr, char *buf) \
+ { \
+ struct usb_device *udev; \
++ int retval; \
+ \
+ udev = to_usb_device(dev); \
+- return sprintf(buf, "%s\n", udev->name); \
++ usb_lock_device(udev); \
++ retval = sprintf(buf, "%s\n", udev->name); \
++ usb_unlock_device(udev); \
++ return retval; \
+ } \
+ static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL);
+
--- /dev/null
+From 17be5c5f5ef99c94374e07f71effa78e93a20eda Mon Sep 17 00:00:00 2001
+From: Sergei Shtylyov <sshtylyov@ru.mvista.com>
+Date: Tue, 15 Dec 2009 13:30:01 +0200
+Subject: USB: musb: gadget_ep0: avoid SetupEnd interrupt
+
+From: Sergei Shtylyov <sshtylyov@ru.mvista.com>
+
+commit 17be5c5f5ef99c94374e07f71effa78e93a20eda upstream.
+
+Gadget stalling a zero-length SETUP request results in this error message:
+
+SetupEnd came in a wrong ep0stage idle
+
+In order to avoid it, always set the CSR0.DataEnd bit after detecting a zero-
+length request. Add the missing '\n' to the error message itself as well...
+
+Signed-off-by: Sergei Shtylyov <sshtylyov@ru.mvista.com>
+Acked-by: Anand Gadiyar <gadiyar@ti.com>
+Signed-off-by: Felipe Balbi <felipe.balbi@nokia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/usb/musb/musb_gadget_ep0.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+--- a/drivers/usb/musb/musb_gadget_ep0.c
++++ b/drivers/usb/musb/musb_gadget_ep0.c
+@@ -647,7 +647,7 @@ irqreturn_t musb_g_ep0_irq(struct musb *
+ musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
+ break;
+ default:
+- ERR("SetupEnd came in a wrong ep0stage %s",
++ ERR("SetupEnd came in a wrong ep0stage %s\n",
+ decode_ep0stage(musb->ep0_state));
+ }
+ csr = musb_readw(regs, MUSB_CSR0);
+@@ -770,12 +770,18 @@ setup:
+ handled = service_zero_data_request(
+ musb, &setup);
+
++ /*
++ * We're expecting no data in any case, so
++ * always set the DATAEND bit -- doing this
++ * here helps avoid SetupEnd interrupt coming
++ * in the idle stage when we're stalling...
++ */
++ musb->ackpend |= MUSB_CSR0_P_DATAEND;
++
+ /* status stage might be immediate */
+- if (handled > 0) {
+- musb->ackpend |= MUSB_CSR0_P_DATAEND;
++ if (handled > 0)
+ musb->ep0_state =
+ MUSB_EP0_STAGE_STATUSIN;
+- }
+ break;
+
+ /* sequence #1 (IN to host), includes GET_STATUS
--- /dev/null
+From c983202bd03eb82394ef1dce5906702fcbc7bb80 Mon Sep 17 00:00:00 2001
+From: Donny Kurnia <donnykurnia@gmail.com>
+Date: Wed, 23 Dec 2009 19:03:12 +0700
+Subject: USB: option: support hi speed for modem Haier CE100
+
+From: Donny Kurnia <donnykurnia@gmail.com>
+
+commit c983202bd03eb82394ef1dce5906702fcbc7bb80 upstream.
+
+I made this patch for usbserial driver to add the support for EVDO modem
+Haier CE100. The bugs report for this is here:
+https://bugs.launchpad.net/ubuntu/+source/linux/+bug/490068
+
+This patch based on these post:
+http://blankblondtank.wordpress.com/2009/09/04/mengoptimalkan-koneksi-modem-haier-ce-100-cdma-di-linux/
+http://tantos.web.id/blogs/how-to-internet-connection-using-cdma-evdo-modem-and-karmic-koala-ubuntu-9-10
+
+I hope this patch can help other that have the Haier C100 modem, mostly in my country, Indonesia.
+
+Signed-off-by: Donny Kurnia <donnykurnia@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/usb/serial/option.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -340,6 +340,10 @@ static int option_resume(struct usb_ser
+ #define FOUR_G_SYSTEMS_VENDOR_ID 0x1c9e
+ #define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603
+
++/* Haier products */
++#define HAIER_VENDOR_ID 0x201e
++#define HAIER_PRODUCT_CE100 0x2009
++
+ static struct usb_device_id option_ids[] = {
+ { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
+ { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
+@@ -641,6 +645,7 @@ static struct usb_device_id option_ids[]
+ { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
+ { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
+ { USB_DEVICE(FOUR_G_SYSTEMS_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14) },
++ { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
+ { } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
--- /dev/null
+From 8d8558d10806b7e805cb80df867ebb0a453d4765 Mon Sep 17 00:00:00 2001
+From: Alan Stern <stern@rowland.harvard.edu>
+Date: Tue, 8 Dec 2009 15:50:41 -0500
+Subject: USB: rename usb_configure_device
+
+From: Alan Stern <stern@rowland.harvard.edu>
+
+commit 8d8558d10806b7e805cb80df867ebb0a453d4765 upstream.
+
+This patch (as1314) renames usb_configure_device() and
+usb_configure_device_otg() in the hub driver. Neither name is
+appropriate because these routines enumerate devices, they don't
+configure them. That's handled by usb_choose_configuration() and
+usb_set_configuration().
+
+Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/usb/core/hub.c | 26 +++++++++++++-------------
+ 1 file changed, 13 insertions(+), 13 deletions(-)
+
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -1612,12 +1612,12 @@ static inline void announce_device(struc
+ #endif
+
+ /**
+- * usb_configure_device_otg - FIXME (usbcore-internal)
++ * usb_enumerate_device_otg - FIXME (usbcore-internal)
+ * @udev: newly addressed device (in ADDRESS state)
+ *
+- * Do configuration for On-The-Go devices
++ * Finish enumeration for On-The-Go devices
+ */
+-static int usb_configure_device_otg(struct usb_device *udev)
++static int usb_enumerate_device_otg(struct usb_device *udev)
+ {
+ int err = 0;
+
+@@ -1688,7 +1688,7 @@ fail:
+
+
+ /**
+- * usb_configure_device - Detect and probe device intfs/otg (usbcore-internal)
++ * usb_enumerate_device - Read device configs/intfs/otg (usbcore-internal)
+ * @udev: newly addressed device (in ADDRESS state)
+ *
+ * This is only called by usb_new_device() and usb_authorize_device()
+@@ -1699,7 +1699,7 @@ fail:
+ * the string descriptors, as they will be errored out by the device
+ * until it has been authorized.
+ */
+-static int usb_configure_device(struct usb_device *udev)
++static int usb_enumerate_device(struct usb_device *udev)
+ {
+ int err;
+
+@@ -1723,7 +1723,7 @@ static int usb_configure_device(struct u
+ udev->descriptor.iManufacturer);
+ udev->serial = usb_cache_string(udev, udev->descriptor.iSerialNumber);
+ }
+- err = usb_configure_device_otg(udev);
++ err = usb_enumerate_device_otg(udev);
+ fail:
+ return err;
+ }
+@@ -1733,8 +1733,8 @@ fail:
+ * usb_new_device - perform initial device setup (usbcore-internal)
+ * @udev: newly addressed device (in ADDRESS state)
+ *
+- * This is called with devices which have been enumerated, but not yet
+- * configured. The device descriptor is available, but not descriptors
++ * This is called with devices which have been detected but not fully
++ * enumerated. The device descriptor is available, but not descriptors
+ * for any device configuration. The caller must have locked either
+ * the parent hub (if udev is a normal device) or else the
+ * usb_bus_list_lock (if udev is a root hub). The parent's pointer to
+@@ -1757,8 +1757,8 @@ int usb_new_device(struct usb_device *ud
+ if (udev->parent)
+ usb_autoresume_device(udev->parent);
+
+- usb_detect_quirks(udev); /* Determine quirks */
+- err = usb_configure_device(udev); /* detect & probe dev/intfs */
++ usb_detect_quirks(udev);
++ err = usb_enumerate_device(udev); /* Read descriptors */
+ if (err < 0)
+ goto fail;
+ dev_dbg(&udev->dev, "udev %d, busnum %d, minor = %d\n",
+@@ -1849,9 +1849,9 @@ int usb_authorize_device(struct usb_devi
+ goto error_device_descriptor;
+ }
+ usb_dev->authorized = 1;
+- result = usb_configure_device(usb_dev);
++ result = usb_enumerate_device(usb_dev);
+ if (result < 0)
+- goto error_configure;
++ goto error_enumerate;
+ /* Choose and set the configuration. This registers the interfaces
+ * with the driver core and lets interface drivers bind to them.
+ */
+@@ -1866,7 +1866,7 @@ int usb_authorize_device(struct usb_devi
+ }
+ }
+ dev_info(&usb_dev->dev, "authorized to connect\n");
+-error_configure:
++error_enumerate:
+ error_device_descriptor:
+ error_autoresume:
+ out_authorized:
--- /dev/null
+From 50e9d31183ed61c787b870cb3ee8f6c3db8c8a1e Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <error27@gmail.com>
+Date: Thu, 10 Dec 2009 16:44:51 -0300
+Subject: V4L/DVB (13596): ov511.c typo: lock => unlock
+
+From: Dan Carpenter <error27@gmail.com>
+
+commit 50e9d31183ed61c787b870cb3ee8f6c3db8c8a1e upstream.
+
+This was found with a static checker and has not been tested, but it seems
+pretty clear that the mutex_lock() was supposed to be mutex_unlock()
+
+Signed-off-by: Dan Carpenter <error27@gmail.com>
+Signed-off-by: Douglas Schilling Landgraf <dougsland@redhat.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
+Cc: Brandon Philips <brandon@ifup.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/media/video/ov511.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/media/video/ov511.c
++++ b/drivers/media/video/ov511.c
+@@ -5878,7 +5878,7 @@ ov51x_probe(struct usb_interface *intf,
+ goto error;
+ }
+
+- mutex_lock(&ov->lock);
++ mutex_unlock(&ov->lock);
+
+ return 0;
+
--- /dev/null
+From b39415b2731d7dec5e612d2d12595da82399eedf Mon Sep 17 00:00:00 2001
+From: Rik van Riel <riel@redhat.com>
+Date: Mon, 14 Dec 2009 17:59:48 -0800
+Subject: vmscan: do not evict inactive pages when skipping an active list scan
+
+From: Rik van Riel <riel@redhat.com>
+
+commit b39415b2731d7dec5e612d2d12595da82399eedf upstream.
+
+In AIM7 runs, recent kernels start swapping out anonymous pages well
+before they should. This is due to shrink_list falling through to
+shrink_inactive_list if !inactive_anon_is_low(zone, sc), when all we
+really wanted to do is pre-age some anonymous pages to give them extra
+time to be referenced while on the inactive list.
+
+The obvious fix is to make sure that shrink_list does not fall through to
+scanning/reclaiming inactive pages when we called it to scan one of the
+active lists.
+
+This change should be safe because the loop in shrink_zone ensures that we
+will still shrink the anon and file inactive lists whenever we should.
+
+[kosaki.motohiro@jp.fujitsu.com: inactive_file_is_low() should be inactive_anon_is_low()]
+Reported-by: Larry Woodman <lwoodman@redhat.com>
+Signed-off-by: Rik van Riel <riel@redhat.com>
+Acked-by: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Tomasz Chmielewski <mangoo@wpkg.org>
+Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Rik Theys <rik.theys@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ mm/vmscan.c | 18 ++++++++++++------
+ 1 file changed, 12 insertions(+), 6 deletions(-)
+
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -1464,20 +1464,26 @@ static int inactive_file_is_low(struct z
+ return low;
+ }
+
++static int inactive_list_is_low(struct zone *zone, struct scan_control *sc,
++ int file)
++{
++ if (file)
++ return inactive_file_is_low(zone, sc);
++ else
++ return inactive_anon_is_low(zone, sc);
++}
++
+ static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
+ struct zone *zone, struct scan_control *sc, int priority)
+ {
+ int file = is_file_lru(lru);
+
+- if (lru == LRU_ACTIVE_FILE && inactive_file_is_low(zone, sc)) {
+- shrink_active_list(nr_to_scan, zone, sc, priority, file);
++ if (is_active_lru(lru)) {
++ if (inactive_list_is_low(zone, sc, file))
++ shrink_active_list(nr_to_scan, zone, sc, priority, file);
+ return 0;
+ }
+
+- if (lru == LRU_ACTIVE_ANON && inactive_anon_is_low(zone, sc)) {
+- shrink_active_list(nr_to_scan, zone, sc, priority, file);
+- return 0;
+- }
+ return shrink_inactive_list(nr_to_scan, zone, sc, priority, file);
+ }
+
--- /dev/null
+From 0f764806438d5576ac58898332e5dcf30bb8a679 Mon Sep 17 00:00:00 2001
+From: Joerg Roedel <joerg.roedel@amd.com>
+Date: Mon, 21 Dec 2009 15:51:23 +0100
+Subject: x86/amd-iommu: Fix initialization failure panic
+
+From: Joerg Roedel <joerg.roedel@amd.com>
+
+commit 0f764806438d5576ac58898332e5dcf30bb8a679 upstream.
+
+The assumption that acpi_table_parse passes the return value
+of the hanlder function to the caller proved wrong
+recently. The return value of the handler function is
+totally ignored. This makes the initialization code for AMD
+IOMMU buggy in a way that could cause a kernel panic on
+initialization. This patch fixes the issue in the AMD IOMMU
+driver.
+
+Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/amd_iommu_init.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/arch/x86/kernel/amd_iommu_init.c
++++ b/arch/x86/kernel/amd_iommu_init.c
+@@ -136,6 +136,11 @@ LIST_HEAD(amd_iommu_list); /* list of a
+ system */
+
+ /*
++ * Set to true if ACPI table parsing and hardware intialization went properly
++ */
++static bool amd_iommu_initialized;
++
++/*
+ * Pointer to the device table which is shared by all AMD IOMMUs
+ * it is indexed by the PCI device id or the HT unit id and contains
+ * information about the domain the device belongs to as well as the
+@@ -913,6 +918,8 @@ static int __init init_iommu_all(struct
+ }
+ WARN_ON(p != end);
+
++ amd_iommu_initialized = true;
++
+ return 0;
+ }
+
+@@ -1263,6 +1270,9 @@ int __init amd_iommu_init(void)
+ if (acpi_table_parse("IVRS", init_iommu_all) != 0)
+ goto free;
+
++ if (!amd_iommu_initialized)
++ goto free;
++
+ if (acpi_table_parse("IVRS", init_memory_definitions) != 0)
+ goto free;
+
--- /dev/null
+From 45a94d7cd45ed991914011919e7d40eb6d2546d1 Mon Sep 17 00:00:00 2001
+From: Suresh Siddha <suresh.b.siddha@intel.com>
+Date: Wed, 16 Dec 2009 16:25:42 -0800
+Subject: x86, cpuid: Add "volatile" to asm in native_cpuid()
+
+From: Suresh Siddha <suresh.b.siddha@intel.com>
+
+commit 45a94d7cd45ed991914011919e7d40eb6d2546d1 upstream.
+
+xsave_cntxt_init() does something like:
+
+ cpuid(0xd, ..); // find out what features FP/SSE/.. etc are supported
+
+ xsetbv(); // enable the features known to OS
+
+ cpuid(0xd, ..); // find out the size of the context for features enabled
+
+Depending on what features get enabled in xsetbv(), value of the
+cpuid.eax=0xd.ecx=0.ebx changes correspondingly (representing the
+size of the context that is enabled).
+
+As we don't have volatile keyword for native_cpuid(), gcc 4.1.2
+optimizes away the second cpuid and the kernel continues to use
+the cpuid information obtained before xsetbv(), ultimately leading to kernel
+crash on processors supporting more state than the legacy FP/SSE.
+
+Add "volatile" for native_cpuid().
+
+Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
+LKML-Reference: <1261009542.2745.55.camel@sbs-t61.sc.intel.com>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/include/asm/processor.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -180,7 +180,7 @@ static inline void native_cpuid(unsigned
+ unsigned int *ecx, unsigned int *edx)
+ {
+ /* ecx is often an input as well as an output. */
+- asm("cpuid"
++ asm volatile("cpuid"
+ : "=a" (*eax),
+ "=b" (*ebx),
+ "=c" (*ecx),
--- /dev/null
+From 505422517d3f126bb939439e9d15dece94e11d2c Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <borislav.petkov@amd.com>
+Date: Fri, 11 Dec 2009 18:14:40 +0100
+Subject: x86, msr: Add support for non-contiguous cpumasks
+
+From: Borislav Petkov <borislav.petkov@amd.com>
+
+commit 505422517d3f126bb939439e9d15dece94e11d2c upstream.
+
+The current rd/wrmsr_on_cpus helpers assume that the supplied
+cpumasks are contiguous. However, there are machines out there
+like some K8 multinode Opterons which have a non-contiguous core
+enumeration on each node (e.g. cores 0,2 on node 0 instead of 0,1), see
+http://www.gossamer-threads.com/lists/linux/kernel/1160268.
+
+This patch fixes out-of-bounds writes (see URL above) by adding per-CPU
+msr structs which are used on the respective cores.
+
+Additionally, two helpers, msrs_{alloc,free}, are provided for use by
+the callers of the MSR accessors.
+
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Mauro Carvalho Chehab <mchehab@redhat.com>
+Cc: Aristeu Rozanski <aris@redhat.com>
+Cc: Randy Dunlap <randy.dunlap@oracle.com>
+Cc: Doug Thompson <dougthompson@xmission.com>
+Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
+LKML-Reference: <20091211171440.GD31998@aftab>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/include/asm/msr.h | 3 ++
+ arch/x86/lib/msr.c | 26 +++++++++++++++++++++----
+ drivers/edac/amd64_edac.c | 46 ++++++++++++++++-----------------------------
+ 3 files changed, 42 insertions(+), 33 deletions(-)
+
+--- a/arch/x86/include/asm/msr.h
++++ b/arch/x86/include/asm/msr.h
+@@ -244,6 +244,9 @@ do {
+
+ #define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0)
+
++struct msr *msrs_alloc(void);
++void msrs_free(struct msr *msrs);
++
+ #ifdef CONFIG_SMP
+ int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
+ int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
+--- a/arch/x86/lib/msr.c
++++ b/arch/x86/lib/msr.c
+@@ -7,7 +7,6 @@ struct msr_info {
+ u32 msr_no;
+ struct msr reg;
+ struct msr *msrs;
+- int off;
+ int err;
+ };
+
+@@ -18,7 +17,7 @@ static void __rdmsr_on_cpu(void *info)
+ int this_cpu = raw_smp_processor_id();
+
+ if (rv->msrs)
+- reg = &rv->msrs[this_cpu - rv->off];
++ reg = per_cpu_ptr(rv->msrs, this_cpu);
+ else
+ reg = &rv->reg;
+
+@@ -32,7 +31,7 @@ static void __wrmsr_on_cpu(void *info)
+ int this_cpu = raw_smp_processor_id();
+
+ if (rv->msrs)
+- reg = &rv->msrs[this_cpu - rv->off];
++ reg = per_cpu_ptr(rv->msrs, this_cpu);
+ else
+ reg = &rv->reg;
+
+@@ -80,7 +79,6 @@ static void __rwmsr_on_cpus(const struct
+
+ memset(&rv, 0, sizeof(rv));
+
+- rv.off = cpumask_first(mask);
+ rv.msrs = msrs;
+ rv.msr_no = msr_no;
+
+@@ -120,6 +118,26 @@ void wrmsr_on_cpus(const struct cpumask
+ }
+ EXPORT_SYMBOL(wrmsr_on_cpus);
+
++struct msr *msrs_alloc(void)
++{
++ struct msr *msrs = NULL;
++
++ msrs = alloc_percpu(struct msr);
++ if (!msrs) {
++ pr_warning("%s: error allocating msrs\n", __func__);
++ return NULL;
++ }
++
++ return msrs;
++}
++EXPORT_SYMBOL(msrs_alloc);
++
++void msrs_free(struct msr *msrs)
++{
++ free_percpu(msrs);
++}
++EXPORT_SYMBOL(msrs_free);
++
+ /* These "safe" variants are slower and should be used when the target MSR
+ may not actually exist. */
+ static void __rdmsr_safe_on_cpu(void *info)
+--- a/drivers/edac/amd64_edac.c
++++ b/drivers/edac/amd64_edac.c
+@@ -13,6 +13,8 @@ module_param(report_gart_errors, int, 06
+ static int ecc_enable_override;
+ module_param(ecc_enable_override, int, 0644);
+
++static struct msr *msrs;
++
+ /* Lookup table for all possible MC control instances */
+ struct amd64_pvt;
+ static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES];
+@@ -2632,8 +2634,7 @@ static void get_cpus_on_this_dct_cpumask
+ static bool amd64_nb_mce_bank_enabled_on_node(int nid)
+ {
+ cpumask_var_t mask;
+- struct msr *msrs;
+- int cpu, nbe, idx = 0;
++ int cpu, nbe;
+ bool ret = false;
+
+ if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
+@@ -2644,32 +2645,22 @@ static bool amd64_nb_mce_bank_enabled_on
+
+ get_cpus_on_this_dct_cpumask(mask, nid);
+
+- msrs = kzalloc(sizeof(struct msr) * cpumask_weight(mask), GFP_KERNEL);
+- if (!msrs) {
+- amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
+- __func__);
+- free_cpumask_var(mask);
+- return false;
+- }
+-
+ rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
+
+ for_each_cpu(cpu, mask) {
+- nbe = msrs[idx].l & K8_MSR_MCGCTL_NBE;
++ struct msr *reg = per_cpu_ptr(msrs, cpu);
++ nbe = reg->l & K8_MSR_MCGCTL_NBE;
+
+ debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
+- cpu, msrs[idx].q,
++ cpu, reg->q,
+ (nbe ? "enabled" : "disabled"));
+
+ if (!nbe)
+ goto out;
+-
+- idx++;
+ }
+ ret = true;
+
+ out:
+- kfree(msrs);
+ free_cpumask_var(mask);
+ return ret;
+ }
+@@ -2677,8 +2668,7 @@ out:
+ static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
+ {
+ cpumask_var_t cmask;
+- struct msr *msrs = NULL;
+- int cpu, idx = 0;
++ int cpu;
+
+ if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
+ amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
+@@ -2688,34 +2678,27 @@ static int amd64_toggle_ecc_err_reportin
+
+ get_cpus_on_this_dct_cpumask(cmask, pvt->mc_node_id);
+
+- msrs = kzalloc(sizeof(struct msr) * cpumask_weight(cmask), GFP_KERNEL);
+- if (!msrs) {
+- amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
+- __func__);
+- return -ENOMEM;
+- }
+-
+ rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
+
+ for_each_cpu(cpu, cmask) {
+
++ struct msr *reg = per_cpu_ptr(msrs, cpu);
++
+ if (on) {
+- if (msrs[idx].l & K8_MSR_MCGCTL_NBE)
++ if (reg->l & K8_MSR_MCGCTL_NBE)
+ pvt->flags.ecc_report = 1;
+
+- msrs[idx].l |= K8_MSR_MCGCTL_NBE;
++ reg->l |= K8_MSR_MCGCTL_NBE;
+ } else {
+ /*
+ * Turn off ECC reporting only when it was off before
+ */
+ if (!pvt->flags.ecc_report)
+- msrs[idx].l &= ~K8_MSR_MCGCTL_NBE;
++ reg->l &= ~K8_MSR_MCGCTL_NBE;
+ }
+- idx++;
+ }
+ wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
+
+- kfree(msrs);
+ free_cpumask_var(cmask);
+
+ return 0;
+@@ -3193,6 +3176,8 @@ static int __init amd64_edac_init(void)
+ if (cache_k8_northbridges() < 0)
+ return err;
+
++ msrs = msrs_alloc();
++
+ err = pci_register_driver(&amd64_pci_driver);
+ if (err)
+ return err;
+@@ -3228,6 +3213,9 @@ static void __exit amd64_edac_exit(void)
+ edac_pci_release_generic_ctl(amd64_ctl_pci);
+
+ pci_unregister_driver(&amd64_pci_driver);
++
++ msrs_free(msrs);
++ msrs = NULL;
+ }
+
+ module_init(amd64_edac_init);
--- /dev/null
+From 6ede31e03084ee084bcee073ef3d1136f68d0906 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <petkovbb@googlemail.com>
+Date: Thu, 17 Dec 2009 00:16:25 +0100
+Subject: x86, msr: msrs_alloc/free for CONFIG_SMP=n
+
+From: Borislav Petkov <petkovbb@googlemail.com>
+
+commit 6ede31e03084ee084bcee073ef3d1136f68d0906 upstream.
+
+Randy Dunlap reported the following build error:
+
+"When CONFIG_SMP=n, CONFIG_X86_MSR=m:
+
+ERROR: "msrs_free" [drivers/edac/amd64_edac_mod.ko] undefined!
+ERROR: "msrs_alloc" [drivers/edac/amd64_edac_mod.ko] undefined!"
+
+This is due to the fact that <arch/x86/lib/msr.c> is conditioned on
+CONFIG_SMP and in the UP case we have only the stubs in the header.
+Fork off SMP functionality into a new file (msr-smp.c) and build
+msrs_{alloc,free} unconditionally.
+
+Reported-by: Randy Dunlap <randy.dunlap@oracle.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Borislav Petkov <petkovbb@gmail.com>
+LKML-Reference: <20091216231625.GD27228@liondog.tnic>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/include/asm/msr.h | 12 ++
+ arch/x86/lib/Makefile | 4
+ arch/x86/lib/msr-smp.c | 204 +++++++++++++++++++++++++++++++++++++++++++
+ arch/x86/lib/msr.c | 213 ---------------------------------------------
+ 4 files changed, 218 insertions(+), 215 deletions(-)
+
+--- a/arch/x86/include/asm/msr.h
++++ b/arch/x86/include/asm/msr.h
+@@ -27,6 +27,18 @@ struct msr {
+ };
+ };
+
++struct msr_info {
++ u32 msr_no;
++ struct msr reg;
++ struct msr *msrs;
++ int err;
++};
++
++struct msr_regs_info {
++ u32 *regs;
++ int err;
++};
++
+ static inline unsigned long long native_read_tscp(unsigned int *aux)
+ {
+ unsigned long low, high;
+--- a/arch/x86/lib/Makefile
++++ b/arch/x86/lib/Makefile
+@@ -2,14 +2,14 @@
+ # Makefile for x86 specific library files.
+ #
+
+-obj-$(CONFIG_SMP) := msr.o
++obj-$(CONFIG_SMP) += msr-smp.o
+
+ lib-y := delay.o
+ lib-y += thunk_$(BITS).o
+ lib-y += usercopy_$(BITS).o getuser.o putuser.o
+ lib-y += memcpy_$(BITS).o
+
+-obj-y += msr-reg.o msr-reg-export.o
++obj-y += msr.o msr-reg.o msr-reg-export.o
+
+ ifeq ($(CONFIG_X86_32),y)
+ obj-y += atomic64_32.o
+--- a/arch/x86/lib/msr.c
++++ b/arch/x86/lib/msr.c
+@@ -1,123 +1,7 @@
+ #include <linux/module.h>
+ #include <linux/preempt.h>
+-#include <linux/smp.h>
+ #include <asm/msr.h>
+
+-struct msr_info {
+- u32 msr_no;
+- struct msr reg;
+- struct msr *msrs;
+- int err;
+-};
+-
+-static void __rdmsr_on_cpu(void *info)
+-{
+- struct msr_info *rv = info;
+- struct msr *reg;
+- int this_cpu = raw_smp_processor_id();
+-
+- if (rv->msrs)
+- reg = per_cpu_ptr(rv->msrs, this_cpu);
+- else
+- reg = &rv->reg;
+-
+- rdmsr(rv->msr_no, reg->l, reg->h);
+-}
+-
+-static void __wrmsr_on_cpu(void *info)
+-{
+- struct msr_info *rv = info;
+- struct msr *reg;
+- int this_cpu = raw_smp_processor_id();
+-
+- if (rv->msrs)
+- reg = per_cpu_ptr(rv->msrs, this_cpu);
+- else
+- reg = &rv->reg;
+-
+- wrmsr(rv->msr_no, reg->l, reg->h);
+-}
+-
+-int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
+-{
+- int err;
+- struct msr_info rv;
+-
+- memset(&rv, 0, sizeof(rv));
+-
+- rv.msr_no = msr_no;
+- err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
+- *l = rv.reg.l;
+- *h = rv.reg.h;
+-
+- return err;
+-}
+-EXPORT_SYMBOL(rdmsr_on_cpu);
+-
+-int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
+-{
+- int err;
+- struct msr_info rv;
+-
+- memset(&rv, 0, sizeof(rv));
+-
+- rv.msr_no = msr_no;
+- rv.reg.l = l;
+- rv.reg.h = h;
+- err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
+-
+- return err;
+-}
+-EXPORT_SYMBOL(wrmsr_on_cpu);
+-
+-static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
+- struct msr *msrs,
+- void (*msr_func) (void *info))
+-{
+- struct msr_info rv;
+- int this_cpu;
+-
+- memset(&rv, 0, sizeof(rv));
+-
+- rv.msrs = msrs;
+- rv.msr_no = msr_no;
+-
+- this_cpu = get_cpu();
+-
+- if (cpumask_test_cpu(this_cpu, mask))
+- msr_func(&rv);
+-
+- smp_call_function_many(mask, msr_func, &rv, 1);
+- put_cpu();
+-}
+-
+-/* rdmsr on a bunch of CPUs
+- *
+- * @mask: which CPUs
+- * @msr_no: which MSR
+- * @msrs: array of MSR values
+- *
+- */
+-void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
+-{
+- __rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu);
+-}
+-EXPORT_SYMBOL(rdmsr_on_cpus);
+-
+-/*
+- * wrmsr on a bunch of CPUs
+- *
+- * @mask: which CPUs
+- * @msr_no: which MSR
+- * @msrs: array of MSR values
+- *
+- */
+-void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
+-{
+- __rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu);
+-}
+-EXPORT_SYMBOL(wrmsr_on_cpus);
+-
+ struct msr *msrs_alloc(void)
+ {
+ struct msr *msrs = NULL;
+@@ -137,100 +21,3 @@ void msrs_free(struct msr *msrs)
+ free_percpu(msrs);
+ }
+ EXPORT_SYMBOL(msrs_free);
+-
+-/* These "safe" variants are slower and should be used when the target MSR
+- may not actually exist. */
+-static void __rdmsr_safe_on_cpu(void *info)
+-{
+- struct msr_info *rv = info;
+-
+- rv->err = rdmsr_safe(rv->msr_no, &rv->reg.l, &rv->reg.h);
+-}
+-
+-static void __wrmsr_safe_on_cpu(void *info)
+-{
+- struct msr_info *rv = info;
+-
+- rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h);
+-}
+-
+-int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
+-{
+- int err;
+- struct msr_info rv;
+-
+- memset(&rv, 0, sizeof(rv));
+-
+- rv.msr_no = msr_no;
+- err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
+- *l = rv.reg.l;
+- *h = rv.reg.h;
+-
+- return err ? err : rv.err;
+-}
+-EXPORT_SYMBOL(rdmsr_safe_on_cpu);
+-
+-int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
+-{
+- int err;
+- struct msr_info rv;
+-
+- memset(&rv, 0, sizeof(rv));
+-
+- rv.msr_no = msr_no;
+- rv.reg.l = l;
+- rv.reg.h = h;
+- err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
+-
+- return err ? err : rv.err;
+-}
+-EXPORT_SYMBOL(wrmsr_safe_on_cpu);
+-
+-/*
+- * These variants are significantly slower, but allows control over
+- * the entire 32-bit GPR set.
+- */
+-struct msr_regs_info {
+- u32 *regs;
+- int err;
+-};
+-
+-static void __rdmsr_safe_regs_on_cpu(void *info)
+-{
+- struct msr_regs_info *rv = info;
+-
+- rv->err = rdmsr_safe_regs(rv->regs);
+-}
+-
+-static void __wrmsr_safe_regs_on_cpu(void *info)
+-{
+- struct msr_regs_info *rv = info;
+-
+- rv->err = wrmsr_safe_regs(rv->regs);
+-}
+-
+-int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
+-{
+- int err;
+- struct msr_regs_info rv;
+-
+- rv.regs = regs;
+- rv.err = -EIO;
+- err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
+-
+- return err ? err : rv.err;
+-}
+-EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
+-
+-int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
+-{
+- int err;
+- struct msr_regs_info rv;
+-
+- rv.regs = regs;
+- rv.err = -EIO;
+- err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
+-
+- return err ? err : rv.err;
+-}
+-EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu);
+--- /dev/null
++++ b/arch/x86/lib/msr-smp.c
+@@ -0,0 +1,204 @@
++#include <linux/module.h>
++#include <linux/preempt.h>
++#include <linux/smp.h>
++#include <asm/msr.h>
++
++static void __rdmsr_on_cpu(void *info)
++{
++ struct msr_info *rv = info;
++ struct msr *reg;
++ int this_cpu = raw_smp_processor_id();
++
++ if (rv->msrs)
++ reg = per_cpu_ptr(rv->msrs, this_cpu);
++ else
++ reg = &rv->reg;
++
++ rdmsr(rv->msr_no, reg->l, reg->h);
++}
++
++static void __wrmsr_on_cpu(void *info)
++{
++ struct msr_info *rv = info;
++ struct msr *reg;
++ int this_cpu = raw_smp_processor_id();
++
++ if (rv->msrs)
++ reg = per_cpu_ptr(rv->msrs, this_cpu);
++ else
++ reg = &rv->reg;
++
++ wrmsr(rv->msr_no, reg->l, reg->h);
++}
++
++int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
++{
++ int err;
++ struct msr_info rv;
++
++ memset(&rv, 0, sizeof(rv));
++
++ rv.msr_no = msr_no;
++ err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
++ *l = rv.reg.l;
++ *h = rv.reg.h;
++
++ return err;
++}
++EXPORT_SYMBOL(rdmsr_on_cpu);
++
++int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
++{
++ int err;
++ struct msr_info rv;
++
++ memset(&rv, 0, sizeof(rv));
++
++ rv.msr_no = msr_no;
++ rv.reg.l = l;
++ rv.reg.h = h;
++ err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
++
++ return err;
++}
++EXPORT_SYMBOL(wrmsr_on_cpu);
++
++static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
++ struct msr *msrs,
++ void (*msr_func) (void *info))
++{
++ struct msr_info rv;
++ int this_cpu;
++
++ memset(&rv, 0, sizeof(rv));
++
++ rv.msrs = msrs;
++ rv.msr_no = msr_no;
++
++ this_cpu = get_cpu();
++
++ if (cpumask_test_cpu(this_cpu, mask))
++ msr_func(&rv);
++
++ smp_call_function_many(mask, msr_func, &rv, 1);
++ put_cpu();
++}
++
++/* rdmsr on a bunch of CPUs
++ *
++ * @mask: which CPUs
++ * @msr_no: which MSR
++ * @msrs: array of MSR values
++ *
++ */
++void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
++{
++ __rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu);
++}
++EXPORT_SYMBOL(rdmsr_on_cpus);
++
++/*
++ * wrmsr on a bunch of CPUs
++ *
++ * @mask: which CPUs
++ * @msr_no: which MSR
++ * @msrs: array of MSR values
++ *
++ */
++void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
++{
++ __rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu);
++}
++EXPORT_SYMBOL(wrmsr_on_cpus);
++
++/* These "safe" variants are slower and should be used when the target MSR
++ may not actually exist. */
++static void __rdmsr_safe_on_cpu(void *info)
++{
++ struct msr_info *rv = info;
++
++ rv->err = rdmsr_safe(rv->msr_no, &rv->reg.l, &rv->reg.h);
++}
++
++static void __wrmsr_safe_on_cpu(void *info)
++{
++ struct msr_info *rv = info;
++
++ rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h);
++}
++
++int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
++{
++ int err;
++ struct msr_info rv;
++
++ memset(&rv, 0, sizeof(rv));
++
++ rv.msr_no = msr_no;
++ err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
++ *l = rv.reg.l;
++ *h = rv.reg.h;
++
++ return err ? err : rv.err;
++}
++EXPORT_SYMBOL(rdmsr_safe_on_cpu);
++
++int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
++{
++ int err;
++ struct msr_info rv;
++
++ memset(&rv, 0, sizeof(rv));
++
++ rv.msr_no = msr_no;
++ rv.reg.l = l;
++ rv.reg.h = h;
++ err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
++
++ return err ? err : rv.err;
++}
++EXPORT_SYMBOL(wrmsr_safe_on_cpu);
++
++/*
++ * These variants are significantly slower, but allows control over
++ * the entire 32-bit GPR set.
++ */
++static void __rdmsr_safe_regs_on_cpu(void *info)
++{
++ struct msr_regs_info *rv = info;
++
++ rv->err = rdmsr_safe_regs(rv->regs);
++}
++
++static void __wrmsr_safe_regs_on_cpu(void *info)
++{
++ struct msr_regs_info *rv = info;
++
++ rv->err = wrmsr_safe_regs(rv->regs);
++}
++
++int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
++{
++ int err;
++ struct msr_regs_info rv;
++
++ rv.regs = regs;
++ rv.err = -EIO;
++ err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
++
++ return err ? err : rv.err;
++}
++EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
++
++int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
++{
++ int err;
++ struct msr_regs_info rv;
++
++ rv.regs = regs;
++ rv.err = -EIO;
++ err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
++
++ return err ? err : rv.err;
++}
++EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu);
--- /dev/null
+From b8a4754147d61f5359a765a3afd3eb03012aa052 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <borislav.petkov@amd.com>
+Date: Thu, 30 Jul 2009 11:10:02 +0200
+Subject: x86, msr: Unify rdmsr_on_cpus/wrmsr_on_cpus
+
+From: Borislav Petkov <borislav.petkov@amd.com>
+
+commit b8a4754147d61f5359a765a3afd3eb03012aa052 upstream.
+
+Since rdmsr_on_cpus and wrmsr_on_cpus are almost identical, unify them
+into a common __rwmsr_on_cpus helper thus avoiding code duplication.
+
+While at it, convert cpumask_t's to const struct cpumask *.
+
+Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/include/asm/msr.h | 4 +--
+ arch/x86/lib/msr.c | 46 ++++++++++++++++++---------------------------
+ 2 files changed, 21 insertions(+), 29 deletions(-)
+
+--- a/arch/x86/include/asm/msr.h
++++ b/arch/x86/include/asm/msr.h
+@@ -247,8 +247,8 @@ do {
+ #ifdef CONFIG_SMP
+ int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
+ int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
+-void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs);
+-void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs);
++void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
++void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
+ int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
+ int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
+ int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
+--- a/arch/x86/lib/msr.c
++++ b/arch/x86/lib/msr.c
+@@ -71,14 +71,9 @@ int wrmsr_on_cpu(unsigned int cpu, u32 m
+ }
+ EXPORT_SYMBOL(wrmsr_on_cpu);
+
+-/* rdmsr on a bunch of CPUs
+- *
+- * @mask: which CPUs
+- * @msr_no: which MSR
+- * @msrs: array of MSR values
+- *
+- */
+-void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs)
++static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
++ struct msr *msrs,
++ void (*msr_func) (void *info))
+ {
+ struct msr_info rv;
+ int this_cpu;
+@@ -92,11 +87,23 @@ void rdmsr_on_cpus(const cpumask_t *mask
+ this_cpu = get_cpu();
+
+ if (cpumask_test_cpu(this_cpu, mask))
+- __rdmsr_on_cpu(&rv);
++ msr_func(&rv);
+
+- smp_call_function_many(mask, __rdmsr_on_cpu, &rv, 1);
++ smp_call_function_many(mask, msr_func, &rv, 1);
+ put_cpu();
+ }
++
++/* rdmsr on a bunch of CPUs
++ *
++ * @mask: which CPUs
++ * @msr_no: which MSR
++ * @msrs: array of MSR values
++ *
++ */
++void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
++{
++ __rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu);
++}
+ EXPORT_SYMBOL(rdmsr_on_cpus);
+
+ /*
+@@ -107,24 +114,9 @@ EXPORT_SYMBOL(rdmsr_on_cpus);
+ * @msrs: array of MSR values
+ *
+ */
+-void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs)
++void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
+ {
+- struct msr_info rv;
+- int this_cpu;
+-
+- memset(&rv, 0, sizeof(rv));
+-
+- rv.off = cpumask_first(mask);
+- rv.msrs = msrs;
+- rv.msr_no = msr_no;
+-
+- this_cpu = get_cpu();
+-
+- if (cpumask_test_cpu(this_cpu, mask))
+- __wrmsr_on_cpu(&rv);
+-
+- smp_call_function_many(mask, __wrmsr_on_cpu, &rv, 1);
+- put_cpu();
++ __rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu);
+ }
+ EXPORT_SYMBOL(wrmsr_on_cpus);
+
--- /dev/null
+From 04a1e62c2cec820501f93526ad1e46073b802dc4 Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Thu, 17 Dec 2009 07:04:56 -0800
+Subject: x86/ptrace: make genregs[32]_get/set more robust
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit 04a1e62c2cec820501f93526ad1e46073b802dc4 upstream.
+
+The loop condition is fragile: we compare an unsigned value to zero, and
+then decrement it by something larger than one in the loop. All the
+callers should be passing in appropriately aligned buffer lengths, but
+it's better to just not rely on it, and have some appropriate defensive
+loop limits.
+
+Acked-by: Roland McGrath <roland@redhat.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/ptrace.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/kernel/ptrace.c
++++ b/arch/x86/kernel/ptrace.c
+@@ -408,14 +408,14 @@ static int genregs_get(struct task_struc
+ {
+ if (kbuf) {
+ unsigned long *k = kbuf;
+- while (count > 0) {
++ while (count >= sizeof(*k)) {
+ *k++ = getreg(target, pos);
+ count -= sizeof(*k);
+ pos += sizeof(*k);
+ }
+ } else {
+ unsigned long __user *u = ubuf;
+- while (count > 0) {
++ while (count >= sizeof(*u)) {
+ if (__put_user(getreg(target, pos), u++))
+ return -EFAULT;
+ count -= sizeof(*u);
+@@ -434,14 +434,14 @@ static int genregs_set(struct task_struc
+ int ret = 0;
+ if (kbuf) {
+ const unsigned long *k = kbuf;
+- while (count > 0 && !ret) {
++ while (count >= sizeof(*k) && !ret) {
+ ret = putreg(target, pos, *k++);
+ count -= sizeof(*k);
+ pos += sizeof(*k);
+ }
+ } else {
+ const unsigned long __user *u = ubuf;
+- while (count > 0 && !ret) {
++ while (count >= sizeof(*u) && !ret) {
+ unsigned long word;
+ ret = __get_user(word, u++);
+ if (ret)
+@@ -1219,14 +1219,14 @@ static int genregs32_get(struct task_str
+ {
+ if (kbuf) {
+ compat_ulong_t *k = kbuf;
+- while (count > 0) {
++ while (count >= sizeof(*k)) {
+ getreg32(target, pos, k++);
+ count -= sizeof(*k);
+ pos += sizeof(*k);
+ }
+ } else {
+ compat_ulong_t __user *u = ubuf;
+- while (count > 0) {
++ while (count >= sizeof(*u)) {
+ compat_ulong_t word;
+ getreg32(target, pos, &word);
+ if (__put_user(word, u++))
+@@ -1247,14 +1247,14 @@ static int genregs32_set(struct task_str
+ int ret = 0;
+ if (kbuf) {
+ const compat_ulong_t *k = kbuf;
+- while (count > 0 && !ret) {
++ while (count >= sizeof(*k) && !ret) {
+ ret = putreg32(target, pos, *k++);
+ count -= sizeof(*k);
+ pos += sizeof(*k);
+ }
+ } else {
+ const compat_ulong_t __user *u = ubuf;
+- while (count > 0 && !ret) {
++ while (count >= sizeof(*u) && !ret) {
+ compat_ulong_t word;
+ ret = __get_user(word, u++);
+ if (ret)
--- /dev/null
+From 39d30770992895d55789de64bad2349510af68d0 Mon Sep 17 00:00:00 2001
+From: Mike Travis <travis@sgi.com>
+Date: Mon, 28 Dec 2009 13:28:25 -0800
+Subject: x86: SGI UV: Fix writes to led registers on remote uv hubs
+
+From: Mike Travis <travis@sgi.com>
+
+commit 39d30770992895d55789de64bad2349510af68d0 upstream.
+
+The wrong address was being used to write the SCIR led regs on
+remote hubs. Also, there was an inconsistency between how BIOS
+and the kernel indexed these regs. Standardize on using the
+lower 6 bits of the APIC ID as the index.
+
+This patch fixes the problem of writing to an errant address to
+a cpu # >= 64.
+
+Signed-off-by: Mike Travis <travis@sgi.com>
+Reviewed-by: Jack Steiner <steiner@sgi.com>
+Cc: Robin Holt <holt@sgi.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+LKML-Reference: <4B3922F9.3060905@sgi.com>
+[ v2: fix a number of annoying checkpatch artifacts and whitespace noise ]
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/include/asm/uv/uv_hub.h | 86 ++++++++++++++++++++-----------------
+ arch/x86/kernel/apic/x2apic_uv_x.c | 12 ++---
+ 2 files changed, 54 insertions(+), 44 deletions(-)
+
+--- a/arch/x86/include/asm/uv/uv_hub.h
++++ b/arch/x86/include/asm/uv/uv_hub.h
+@@ -31,20 +31,20 @@
+ * contiguous (although various IO spaces may punch holes in
+ * it)..
+ *
+- * N - Number of bits in the node portion of a socket physical
+- * address.
++ * N - Number of bits in the node portion of a socket physical
++ * address.
+ *
+- * NASID - network ID of a router, Mbrick or Cbrick. Nasid values of
+- * routers always have low bit of 1, C/MBricks have low bit
+- * equal to 0. Most addressing macros that target UV hub chips
+- * right shift the NASID by 1 to exclude the always-zero bit.
+- * NASIDs contain up to 15 bits.
++ * NASID - network ID of a router, Mbrick or Cbrick. Nasid values of
++ * routers always have low bit of 1, C/MBricks have low bit
++ * equal to 0. Most addressing macros that target UV hub chips
++ * right shift the NASID by 1 to exclude the always-zero bit.
++ * NASIDs contain up to 15 bits.
+ *
+ * GNODE - NASID right shifted by 1 bit. Most mmrs contain gnodes instead
+ * of nasids.
+ *
+- * PNODE - the low N bits of the GNODE. The PNODE is the most useful variant
+- * of the nasid for socket usage.
++ * PNODE - the low N bits of the GNODE. The PNODE is the most useful variant
++ * of the nasid for socket usage.
+ *
+ *
+ * NumaLink Global Physical Address Format:
+@@ -71,12 +71,12 @@
+ *
+ *
+ * APICID format
+- * NOTE!!!!!! This is the current format of the APICID. However, code
+- * should assume that this will change in the future. Use functions
+- * in this file for all APICID bit manipulations and conversion.
++ * NOTE!!!!!! This is the current format of the APICID. However, code
++ * should assume that this will change in the future. Use functions
++ * in this file for all APICID bit manipulations and conversion.
+ *
+- * 1111110000000000
+- * 5432109876543210
++ * 1111110000000000
++ * 5432109876543210
+ * pppppppppplc0cch
+ * sssssssssss
+ *
+@@ -89,9 +89,9 @@
+ * Note: Processor only supports 12 bits in the APICID register. The ACPI
+ * tables hold all 16 bits. Software needs to be aware of this.
+ *
+- * Unless otherwise specified, all references to APICID refer to
+- * the FULL value contained in ACPI tables, not the subset in the
+- * processor APICID register.
++ * Unless otherwise specified, all references to APICID refer to
++ * the FULL value contained in ACPI tables, not the subset in the
++ * processor APICID register.
+ */
+
+
+@@ -151,16 +151,16 @@ struct uv_hub_info_s {
+ };
+
+ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
+-#define uv_hub_info (&__get_cpu_var(__uv_hub_info))
++#define uv_hub_info (&__get_cpu_var(__uv_hub_info))
+ #define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu))
+
+ /*
+ * Local & Global MMR space macros.
+- * Note: macros are intended to be used ONLY by inline functions
+- * in this file - not by other kernel code.
+- * n - NASID (full 15-bit global nasid)
+- * g - GNODE (full 15-bit global nasid, right shifted 1)
+- * p - PNODE (local part of nsids, right shifted 1)
++ * Note: macros are intended to be used ONLY by inline functions
++ * in this file - not by other kernel code.
++ * n - NASID (full 15-bit global nasid)
++ * g - GNODE (full 15-bit global nasid, right shifted 1)
++ * p - PNODE (local part of nsids, right shifted 1)
+ */
+ #define UV_NASID_TO_PNODE(n) (((n) >> 1) & uv_hub_info->pnode_mask)
+ #define UV_PNODE_TO_GNODE(p) ((p) |uv_hub_info->gnode_extra)
+@@ -213,8 +213,8 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __
+ /*
+ * Macros for converting between kernel virtual addresses, socket local physical
+ * addresses, and UV global physical addresses.
+- * Note: use the standard __pa() & __va() macros for converting
+- * between socket virtual and socket physical addresses.
++ * Note: use the standard __pa() & __va() macros for converting
++ * between socket virtual and socket physical addresses.
+ */
+
+ /* socket phys RAM --> UV global physical address */
+@@ -265,21 +265,18 @@ static inline int uv_apicid_to_pnode(int
+ * Access global MMRs using the low memory MMR32 space. This region supports
+ * faster MMR access but not all MMRs are accessible in this space.
+ */
+-static inline unsigned long *uv_global_mmr32_address(int pnode,
+- unsigned long offset)
++static inline unsigned long *uv_global_mmr32_address(int pnode, unsigned long offset)
+ {
+ return __va(UV_GLOBAL_MMR32_BASE |
+ UV_GLOBAL_MMR32_PNODE_BITS(pnode) | offset);
+ }
+
+-static inline void uv_write_global_mmr32(int pnode, unsigned long offset,
+- unsigned long val)
++static inline void uv_write_global_mmr32(int pnode, unsigned long offset, unsigned long val)
+ {
+ writeq(val, uv_global_mmr32_address(pnode, offset));
+ }
+
+-static inline unsigned long uv_read_global_mmr32(int pnode,
+- unsigned long offset)
++static inline unsigned long uv_read_global_mmr32(int pnode, unsigned long offset)
+ {
+ return readq(uv_global_mmr32_address(pnode, offset));
+ }
+@@ -288,25 +285,32 @@ static inline unsigned long uv_read_glob
+ * Access Global MMR space using the MMR space located at the top of physical
+ * memory.
+ */
+-static inline unsigned long *uv_global_mmr64_address(int pnode,
+- unsigned long offset)
++static inline unsigned long *uv_global_mmr64_address(int pnode, unsigned long offset)
+ {
+ return __va(UV_GLOBAL_MMR64_BASE |
+ UV_GLOBAL_MMR64_PNODE_BITS(pnode) | offset);
+ }
+
+-static inline void uv_write_global_mmr64(int pnode, unsigned long offset,
+- unsigned long val)
++static inline void uv_write_global_mmr64(int pnode, unsigned long offset, unsigned long val)
+ {
+ writeq(val, uv_global_mmr64_address(pnode, offset));
+ }
+
+-static inline unsigned long uv_read_global_mmr64(int pnode,
+- unsigned long offset)
++static inline unsigned long uv_read_global_mmr64(int pnode, unsigned long offset)
+ {
+ return readq(uv_global_mmr64_address(pnode, offset));
+ }
+
++static inline void uv_write_global_mmr8(int pnode, unsigned long offset, unsigned char val)
++{
++ writeb(val, uv_global_mmr64_address(pnode, offset));
++}
++
++static inline unsigned char uv_read_global_mmr8(int pnode, unsigned long offset)
++{
++ return readb(uv_global_mmr64_address(pnode, offset));
++}
++
+ /*
+ * Access hub local MMRs. Faster than using global space but only local MMRs
+ * are accessible.
+@@ -426,11 +430,17 @@ static inline void uv_set_scir_bits(unsi
+ }
+ }
+
++static inline unsigned long uv_scir_offset(int apicid)
++{
++ return SCIR_LOCAL_MMR_BASE | (apicid & 0x3f);
++}
++
+ static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value)
+ {
+ if (uv_cpu_hub_info(cpu)->scir.state != value) {
++ uv_write_global_mmr8(uv_cpu_to_pnode(cpu),
++ uv_cpu_hub_info(cpu)->scir.offset, value);
+ uv_cpu_hub_info(cpu)->scir.state = value;
+- uv_write_local_mmr8(uv_cpu_hub_info(cpu)->scir.offset, value);
+ }
+ }
+
+--- a/arch/x86/kernel/apic/x2apic_uv_x.c
++++ b/arch/x86/kernel/apic/x2apic_uv_x.c
+@@ -607,8 +607,10 @@ void __init uv_system_init(void)
+ uv_rtc_init();
+
+ for_each_present_cpu(cpu) {
++ int apicid = per_cpu(x86_cpu_to_apicid, cpu);
++
+ nid = cpu_to_node(cpu);
+- pnode = uv_apicid_to_pnode(per_cpu(x86_cpu_to_apicid, cpu));
++ pnode = uv_apicid_to_pnode(apicid);
+ blade = boot_pnode_to_blade(pnode);
+ lcpu = uv_blade_info[blade].nr_possible_cpus;
+ uv_blade_info[blade].nr_possible_cpus++;
+@@ -629,15 +631,13 @@ void __init uv_system_init(void)
+ uv_cpu_hub_info(cpu)->gnode_extra = gnode_extra;
+ uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
+ uv_cpu_hub_info(cpu)->coherency_domain_number = sn_coherency_id;
+- uv_cpu_hub_info(cpu)->scir.offset = SCIR_LOCAL_MMR_BASE + lcpu;
++ uv_cpu_hub_info(cpu)->scir.offset = uv_scir_offset(apicid);
+ uv_node_to_blade[nid] = blade;
+ uv_cpu_to_blade[cpu] = blade;
+ max_pnode = max(pnode, max_pnode);
+
+- printk(KERN_DEBUG "UV: cpu %d, apicid 0x%x, pnode %d, nid %d, "
+- "lcpu %d, blade %d\n",
+- cpu, per_cpu(x86_cpu_to_apicid, cpu), pnode, nid,
+- lcpu, blade);
++ printk(KERN_DEBUG "UV: cpu %d, apicid 0x%x, pnode %d, nid %d, lcpu %d, blade %d\n",
++ cpu, apicid, pnode, nid, lcpu, blade);
+ }
+
+ /* Add blade/pnode info for nodes without cpus */
--- /dev/null
+From c6e1971139be1342902873181f3b80a979bfb33b Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Wed, 8 Jul 2009 12:27:37 +0200
+Subject: xen: fix is_disconnected_device/exists_disconnected_device
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit c6e1971139be1342902873181f3b80a979bfb33b upstream.
+
+The logic of is_disconnected_device/exists_disconnected_device is wrong
+in that they are used to test whether a device is trying to connect (i.e.
+connecting). For this reason the patch fixes them to not consider a
+Closing or Closed device to be connecting. At the same time the patch
+also renames the functions according to what they really do; you could
+say a closed device is "disconnected" (the old name), but not "connecting"
+(the new name).
+
+This patch is a backport of changeset 909 from the Xenbits tree.
+
+Cc: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/xen/xenbus/xenbus_probe.c | 13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
+
+--- a/drivers/xen/xenbus/xenbus_probe.c
++++ b/drivers/xen/xenbus/xenbus_probe.c
+@@ -843,7 +843,7 @@ postcore_initcall(xenbus_probe_init);
+
+ MODULE_LICENSE("GPL");
+
+-static int is_disconnected_device(struct device *dev, void *data)
++static int is_device_connecting(struct device *dev, void *data)
+ {
+ struct xenbus_device *xendev = to_xenbus_device(dev);
+ struct device_driver *drv = data;
+@@ -861,14 +861,15 @@ static int is_disconnected_device(struct
+ return 0;
+
+ xendrv = to_xenbus_driver(dev->driver);
+- return (xendev->state != XenbusStateConnected ||
+- (xendrv->is_ready && !xendrv->is_ready(xendev)));
++ return (xendev->state < XenbusStateConnected ||
++ (xendev->state == XenbusStateConnected &&
++ xendrv->is_ready && !xendrv->is_ready(xendev)));
+ }
+
+-static int exists_disconnected_device(struct device_driver *drv)
++static int exists_connecting_device(struct device_driver *drv)
+ {
+ return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
+- is_disconnected_device);
++ is_device_connecting);
+ }
+
+ static int print_device_status(struct device *dev, void *data)
+@@ -918,7 +919,7 @@ static void wait_for_devices(struct xenb
+ if (!ready_to_wait_for_devices || !xen_domain())
+ return;
+
+- while (exists_disconnected_device(drv)) {
++ while (exists_connecting_device(drv)) {
+ if (time_after(jiffies, timeout))
+ break;
+ schedule_timeout_interruptible(HZ/10);
--- /dev/null
+From f8dc33088febc63286b7a60e6b678de8e064de8e Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Wed, 8 Jul 2009 12:27:38 +0200
+Subject: xen: improvement to wait_for_devices()
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit f8dc33088febc63286b7a60e6b678de8e064de8e upstream.
+
+When printing a warning about a timed-out device, print the
+current state of both ends of the device connection (i.e., backend as
+well as frontend). This backports half of changeset 146 from the
+Xenbits tree.
+
+Cc: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/xen/xenbus/xenbus_probe.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/drivers/xen/xenbus/xenbus_probe.c
++++ b/drivers/xen/xenbus/xenbus_probe.c
+@@ -885,10 +885,13 @@ static int print_device_status(struct de
+ /* Information only: is this too noisy? */
+ printk(KERN_INFO "XENBUS: Device with no driver: %s\n",
+ xendev->nodename);
+- } else if (xendev->state != XenbusStateConnected) {
++ } else if (xendev->state < XenbusStateConnected) {
++ enum xenbus_state rstate = XenbusStateUnknown;
++ if (xendev->otherend)
++ rstate = xenbus_read_driver_state(xendev->otherend);
+ printk(KERN_WARNING "XENBUS: Timeout connecting "
+- "to device: %s (state %d)\n",
+- xendev->nodename, xendev->state);
++ "to device: %s (local state %d, remote state %d)\n",
++ xendev->nodename, xendev->state, rstate);
+ }
+
+ return 0;
--- /dev/null
+From ae7888012969355a548372e99b066d9e31153b62 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Wed, 8 Jul 2009 12:27:39 +0200
+Subject: xen: wait up to 5 minutes for device connetion
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit ae7888012969355a548372e99b066d9e31153b62 upstream.
+
+Increases the device timeout from 10s to 5 minutes, giving the user a
+visual indication during that time in case there are problems. The patch
+is a backport of changesets 144 and 150 in the Xenbits tree.
+
+Cc: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/xen/xenbus/xenbus_probe.c | 20 ++++++++++++++++----
+ 1 file changed, 16 insertions(+), 4 deletions(-)
+
+--- a/drivers/xen/xenbus/xenbus_probe.c
++++ b/drivers/xen/xenbus/xenbus_probe.c
+@@ -901,7 +901,7 @@ static int print_device_status(struct de
+ static int ready_to_wait_for_devices;
+
+ /*
+- * On a 10 second timeout, wait for all devices currently configured. We need
++ * On a 5-minute timeout, wait for all devices currently configured. We need
+ * to do this to guarantee that the filesystems and / or network devices
+ * needed for boot are available, before we can allow the boot to proceed.
+ *
+@@ -916,18 +916,30 @@ static int ready_to_wait_for_devices;
+ */
+ static void wait_for_devices(struct xenbus_driver *xendrv)
+ {
+- unsigned long timeout = jiffies + 10*HZ;
++ unsigned long start = jiffies;
+ struct device_driver *drv = xendrv ? &xendrv->driver : NULL;
++ unsigned int seconds_waited = 0;
+
+ if (!ready_to_wait_for_devices || !xen_domain())
+ return;
+
+ while (exists_connecting_device(drv)) {
+- if (time_after(jiffies, timeout))
+- break;
++ if (time_after(jiffies, start + (seconds_waited+5)*HZ)) {
++ if (!seconds_waited)
++ printk(KERN_WARNING "XENBUS: Waiting for "
++ "devices to initialise: ");
++ seconds_waited += 5;
++ printk("%us...", 300 - seconds_waited);
++ if (seconds_waited == 300)
++ break;
++ }
++
+ schedule_timeout_interruptible(HZ/10);
+ }
+
++ if (seconds_waited)
++ printk("\n");
++
+ bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
+ print_device_status);
+ }