]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
more .32 patches
authorGreg Kroah-Hartman <gregkh@suse.de>
Mon, 4 Jan 2010 23:30:23 +0000 (15:30 -0800)
committerGreg Kroah-Hartman <gregkh@suse.de>
Mon, 4 Jan 2010 23:30:23 +0000 (15:30 -0800)
40 files changed:
queue-2.6.32/add-unlocked-version-of-inode_add_bytes-function.patch [new file with mode: 0644]
queue-2.6.32/amd64_edac-fix-driver-instance-freeing.patch [new file with mode: 0644]
queue-2.6.32/amd64_edac-fix-forcing-module-load-unload.patch [new file with mode: 0644]
queue-2.6.32/amd64_edac-make-driver-loading-more-robust.patch [new file with mode: 0644]
queue-2.6.32/amd64_edac-unify-mcgctl-ecc-switching.patch [new file with mode: 0644]
queue-2.6.32/b43-avoid-ppc-fault-during-resume.patch [new file with mode: 0644]
queue-2.6.32/cpumask-use-modern-cpumask-style-in-drivers-edac-amd64_edac.c.patch [new file with mode: 0644]
queue-2.6.32/drm-disable-all-the-possible-outputs-crtcs-before-entering-kms-mode.patch [new file with mode: 0644]
queue-2.6.32/drm-radeon-kms-fix-crtc-vblank-update-for-r600.patch [new file with mode: 0644]
queue-2.6.32/e100-fix-broken-cbs-accounting-due-to-missing-memset.patch [new file with mode: 0644]
queue-2.6.32/ext4-convert-to-generic-reserved-quota-s-space-management.patch [new file with mode: 0644]
queue-2.6.32/ext4-fix-potential-quota-deadlock.patch [new file with mode: 0644]
queue-2.6.32/ext4-fix-sleep-inside-spinlock-issue-with-quota-and-dealloc-14739.patch [new file with mode: 0644]
queue-2.6.32/ext4-update-documentation-to-correct-the-inode_readahead_blks-option-name.patch [new file with mode: 0644]
queue-2.6.32/hwmon-sht15-off-by-one-error-in-array-index-incorrect-constants.patch [new file with mode: 0644]
queue-2.6.32/ipv6-reassembly-use-seperate-reassembly-queues-for-conntrack-and-local-delivery.patch [new file with mode: 0644]
queue-2.6.32/kernel-sysctl.c-fix-the-incomplete-part-of-sysctl_max_map_count-should-be-non-negative.patch.patch [new file with mode: 0644]
queue-2.6.32/keys-keyctl_session_to_parent-needs-tif_notify_resume-architecture-support.patch [new file with mode: 0644]
queue-2.6.32/lguest-fix-bug-in-setting-guest-gdt-entry.patch [new file with mode: 0644]
queue-2.6.32/mac80211-fix-race-with-suspend-and-dynamic_ps_disable_work.patch [new file with mode: 0644]
queue-2.6.32/memcg-avoid-oom-killing-innocent-task-in-case-of-use_hierarchy.patch [new file with mode: 0644]
queue-2.6.32/netfilter-fix-crashes-in-bridge-netfilter-caused-by-fragment-jumps.patch [new file with mode: 0644]
queue-2.6.32/nommu-optimise-away-the-dac_-mmap_min_addr-tests.patch [new file with mode: 0644]
queue-2.6.32/orinoco-fix-gfp_kernel-in-orinoco_set_key-with-interrupts-disabled.patch [new file with mode: 0644]
queue-2.6.32/quota-decouple-fs-reserved-space-from-quota-reservation.patch [new file with mode: 0644]
queue-2.6.32/s390-dasd-support-diag-access-for-read-only-devices.patch [new file with mode: 0644]
queue-2.6.32/sched-fix-balance-vs-hotplug-race.patch [new file with mode: 0644]
queue-2.6.32/sched-sched_rt_periodic_timer-vs-cpu-hotplug.patch [new file with mode: 0644]
queue-2.6.32/series
queue-2.6.32/sysctl_max_map_count-should-be-non-negative.patch [new file with mode: 0644]
queue-2.6.32/udf-try-harder-when-looking-for-vat-inode.patch [new file with mode: 0644]
queue-2.6.32/v4l-dvb-13596-ov511.c-typo-lock-unlock.patch [new file with mode: 0644]
queue-2.6.32/vmscan-do-not-evict-inactive-pages-when-skipping-an-active-list-scan.patch [new file with mode: 0644]
queue-2.6.32/x86-msr-add-support-for-non-contiguous-cpumasks.patch [new file with mode: 0644]
queue-2.6.32/x86-msr-msrs_alloc-free-for-config_smp-n.patch [new file with mode: 0644]
queue-2.6.32/x86-msr-unify-rdmsr_on_cpus-wrmsr_on_cpus.patch [new file with mode: 0644]
queue-2.6.32/x86-ptrace-make-genregs_get-set-more-robust.patch [new file with mode: 0644]
queue-2.6.32/xen-fix-is_disconnected_device-exists_disconnected_device.patch [new file with mode: 0644]
queue-2.6.32/xen-improvement-to-wait_for_devices.patch [new file with mode: 0644]
queue-2.6.32/xen-wait-up-to-5-minutes-for-device-connetion.patch [new file with mode: 0644]

diff --git a/queue-2.6.32/add-unlocked-version-of-inode_add_bytes-function.patch b/queue-2.6.32/add-unlocked-version-of-inode_add_bytes-function.patch
new file mode 100644 (file)
index 0000000..965fe82
--- /dev/null
@@ -0,0 +1,58 @@
+From b462707e7ccad058ae151e5c5b06eb5cadcb737f Mon Sep 17 00:00:00 2001
+From: Dmitry Monakhov <dmonakhov@openvz.org>
+Date: Mon, 14 Dec 2009 15:21:12 +0300
+Subject: Add unlocked version of inode_add_bytes() function
+
+From: Dmitry Monakhov <dmonakhov@openvz.org>
+
+commit b462707e7ccad058ae151e5c5b06eb5cadcb737f upstream.
+
+Quota code requires unlocked version of this function. Off course
+we can just copy-paste the code, but copy-pasting is always an evil.
+
+Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/stat.c          |   10 ++++++++--
+ include/linux/fs.h |    1 +
+ 2 files changed, 9 insertions(+), 2 deletions(-)
+
+--- a/fs/stat.c
++++ b/fs/stat.c
+@@ -401,9 +401,9 @@ SYSCALL_DEFINE4(fstatat64, int, dfd, cha
+ }
+ #endif /* __ARCH_WANT_STAT64 */
+-void inode_add_bytes(struct inode *inode, loff_t bytes)
++/* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
++void __inode_add_bytes(struct inode *inode, loff_t bytes)
+ {
+-      spin_lock(&inode->i_lock);
+       inode->i_blocks += bytes >> 9;
+       bytes &= 511;
+       inode->i_bytes += bytes;
+@@ -411,6 +411,12 @@ void inode_add_bytes(struct inode *inode
+               inode->i_blocks++;
+               inode->i_bytes -= 512;
+       }
++}
++
++void inode_add_bytes(struct inode *inode, loff_t bytes)
++{
++      spin_lock(&inode->i_lock);
++      __inode_add_bytes(inode, bytes);
+       spin_unlock(&inode->i_lock);
+ }
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -2314,6 +2314,7 @@ extern const struct inode_operations pag
+ extern int generic_readlink(struct dentry *, char __user *, int);
+ extern void generic_fillattr(struct inode *, struct kstat *);
+ extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
++void __inode_add_bytes(struct inode *inode, loff_t bytes);
+ void inode_add_bytes(struct inode *inode, loff_t bytes);
+ void inode_sub_bytes(struct inode *inode, loff_t bytes);
+ loff_t inode_get_bytes(struct inode *inode);
diff --git a/queue-2.6.32/amd64_edac-fix-driver-instance-freeing.patch b/queue-2.6.32/amd64_edac-fix-driver-instance-freeing.patch
new file mode 100644 (file)
index 0000000..c9b0cdc
--- /dev/null
@@ -0,0 +1,44 @@
+From 8f68ed9728193b1f2fb53ba06031b06bd8b3d1b4 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <borislav.petkov@amd.com>
+Date: Mon, 21 Dec 2009 15:15:59 +0100
+Subject: amd64_edac: fix driver instance freeing
+
+From: Borislav Petkov <borislav.petkov@amd.com>
+
+commit 8f68ed9728193b1f2fb53ba06031b06bd8b3d1b4 upstream.
+
+Fix use-after-free errors by pushing all memory-freeing calls to the end
+of amd64_remove_one_instance().
+
+Reported-by: Darren Jenkins <darrenrjenkins@gmail.com>
+LKML-Reference: <1261370306.11354.52.camel@ICE-BOX>
+Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/edac/amd64_edac.c |    9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+--- a/drivers/edac/amd64_edac.c
++++ b/drivers/edac/amd64_edac.c
+@@ -3082,16 +3082,15 @@ static void __devexit amd64_remove_one_i
+       amd64_free_mc_sibling_devices(pvt);
+-      kfree(pvt);
+-      mci->pvt_info = NULL;
+-
+-      mci_lookup[pvt->mc_node_id] = NULL;
+-
+       /* unregister from EDAC MCE */
+       amd_report_gart_errors(false);
+       amd_unregister_ecc_decoder(amd64_decode_bus_error);
+       /* Free the EDAC CORE resources */
++      mci->pvt_info = NULL;
++      mci_lookup[pvt->mc_node_id] = NULL;
++
++      kfree(pvt);
+       edac_mc_free(mci);
+ }
diff --git a/queue-2.6.32/amd64_edac-fix-forcing-module-load-unload.patch b/queue-2.6.32/amd64_edac-fix-forcing-module-load-unload.patch
new file mode 100644 (file)
index 0000000..f3c95f2
--- /dev/null
@@ -0,0 +1,31 @@
+From 43f5e68733cfe8bed3c30b5c14c4993dffb29766 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <borislav.petkov@amd.com>
+Date: Mon, 21 Dec 2009 18:55:18 +0100
+Subject: amd64_edac: fix forcing module load/unload
+
+From: Borislav Petkov <borislav.petkov@amd.com>
+
+commit 43f5e68733cfe8bed3c30b5c14c4993dffb29766 upstream.
+
+Clear the override flag after force-loading the module.
+
+Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/edac/amd64_edac.c |    3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/edac/amd64_edac.c
++++ b/drivers/edac/amd64_edac.c
+@@ -2836,9 +2836,8 @@ static int amd64_check_ecc_enabled(struc
+                       amd64_printk(KERN_WARNING, "%s", ecc_warning);
+                       return -ENODEV;
+               }
+-      } else
+-              /* CLEAR the override, since BIOS controlled it */
+               ecc_enable_override = 0;
++      }
+       return 0;
+ }
diff --git a/queue-2.6.32/amd64_edac-make-driver-loading-more-robust.patch b/queue-2.6.32/amd64_edac-make-driver-loading-more-robust.patch
new file mode 100644 (file)
index 0000000..dbf917a
--- /dev/null
@@ -0,0 +1,83 @@
+From 56b34b91e22313294154cee0c16e294cf8a45b61 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <borislav.petkov@amd.com>
+Date: Mon, 21 Dec 2009 18:13:01 +0100
+Subject: amd64_edac: make driver loading more robust
+
+From: Borislav Petkov <borislav.petkov@amd.com>
+
+commit 56b34b91e22313294154cee0c16e294cf8a45b61 upstream.
+
+Currently, the module does not initialize fully when the DIMMs aren't
+ECC but remains still loaded. Propagate the error when no instance of
+the driver is properly initialized and prevent further loading.
+
+Reorganize and polish error handling in amd64_edac_init() while at it.
+
+Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/edac/amd64_edac.c |   23 ++++++++++++++++-------
+ 1 file changed, 16 insertions(+), 7 deletions(-)
+
+--- a/drivers/edac/amd64_edac.c
++++ b/drivers/edac/amd64_edac.c
+@@ -3167,25 +3167,29 @@ static void amd64_setup_pci_device(void)
+ static int __init amd64_edac_init(void)
+ {
+       int nb, err = -ENODEV;
++      bool load_ok = false;
+       edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n");
+       opstate_init();
+       if (cache_k8_northbridges() < 0)
+-              return err;
++              goto err_ret;
+       msrs = msrs_alloc();
++      if (!msrs)
++              goto err_ret;
+       err = pci_register_driver(&amd64_pci_driver);
+       if (err)
+-              return err;
++              goto err_pci;
+       /*
+        * At this point, the array 'pvt_lookup[]' contains pointers to alloc'd
+        * amd64_pvt structs. These will be used in the 2nd stage init function
+        * to finish initialization of the MC instances.
+        */
++      err = -ENODEV;
+       for (nb = 0; nb < num_k8_northbridges; nb++) {
+               if (!pvt_lookup[nb])
+                       continue;
+@@ -3193,16 +3197,21 @@ static int __init amd64_edac_init(void)
+               err = amd64_init_2nd_stage(pvt_lookup[nb]);
+               if (err)
+                       goto err_2nd_stage;
+-      }
+-      amd64_setup_pci_device();
++              load_ok = true;
++      }
+-      return 0;
++      if (load_ok) {
++              amd64_setup_pci_device();
++              return 0;
++      }
+ err_2nd_stage:
+-      debugf0("2nd stage failed\n");
+       pci_unregister_driver(&amd64_pci_driver);
+-
++err_pci:
++      msrs_free(msrs);
++      msrs = NULL;
++err_ret:
+       return err;
+ }
diff --git a/queue-2.6.32/amd64_edac-unify-mcgctl-ecc-switching.patch b/queue-2.6.32/amd64_edac-unify-mcgctl-ecc-switching.patch
new file mode 100644 (file)
index 0000000..99c3054
--- /dev/null
@@ -0,0 +1,318 @@
+From f6d6ae965760906d79ab29bc38507608c5971549 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <borislav.petkov@amd.com>
+Date: Tue, 3 Nov 2009 15:29:26 +0100
+Subject: amd64_edac: unify MCGCTL ECC switching
+
+From: Borislav Petkov <borislav.petkov@amd.com>
+
+commit f6d6ae965760906d79ab29bc38507608c5971549 upstream.
+
+Unify almost identical code into one function and remove NUMA-specific
+usage (specifically cpumask_of_node()) in favor of generic topology
+methods.
+
+Remove unused defines, while at it.
+
+Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/edac/amd64_edac.c |  204 +++++++++++++++++++++++++---------------------
+ drivers/edac/amd64_edac.h |    9 --
+ 2 files changed, 117 insertions(+), 96 deletions(-)
+
+--- a/drivers/edac/amd64_edac.c
++++ b/drivers/edac/amd64_edac.c
+@@ -2618,6 +2618,109 @@ static int amd64_init_csrows(struct mem_
+       return empty;
+ }
++/* get all cores on this DCT */
++static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
++{
++      int cpu;
++
++      for_each_online_cpu(cpu)
++              if (amd_get_nb_id(cpu) == nid)
++                      cpumask_set_cpu(cpu, mask);
++}
++
++/* check MCG_CTL on all the cpus on this node */
++static bool amd64_nb_mce_bank_enabled_on_node(int nid)
++{
++      cpumask_var_t mask;
++      struct msr *msrs;
++      int cpu, nbe, idx = 0;
++      bool ret = false;
++
++      if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
++              amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
++                           __func__);
++              return false;
++      }
++
++      get_cpus_on_this_dct_cpumask(mask, nid);
++
++      msrs = kzalloc(sizeof(struct msr) * cpumask_weight(mask), GFP_KERNEL);
++      if (!msrs) {
++              amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
++                            __func__);
++              free_cpumask_var(mask);
++               return false;
++      }
++
++      rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
++
++      for_each_cpu(cpu, mask) {
++              nbe = msrs[idx].l & K8_MSR_MCGCTL_NBE;
++
++              debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
++                      cpu, msrs[idx].q,
++                      (nbe ? "enabled" : "disabled"));
++
++              if (!nbe)
++                      goto out;
++
++              idx++;
++      }
++      ret = true;
++
++out:
++      kfree(msrs);
++      free_cpumask_var(mask);
++      return ret;
++}
++
++static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
++{
++      cpumask_var_t cmask;
++      struct msr *msrs = NULL;
++      int cpu, idx = 0;
++
++      if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
++              amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
++                           __func__);
++              return false;
++      }
++
++      get_cpus_on_this_dct_cpumask(cmask, pvt->mc_node_id);
++
++      msrs = kzalloc(sizeof(struct msr) * cpumask_weight(cmask), GFP_KERNEL);
++      if (!msrs) {
++              amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
++                           __func__);
++              return -ENOMEM;
++      }
++
++      rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
++
++      for_each_cpu(cpu, cmask) {
++
++              if (on) {
++                      if (msrs[idx].l & K8_MSR_MCGCTL_NBE)
++                              pvt->flags.ecc_report = 1;
++
++                      msrs[idx].l |= K8_MSR_MCGCTL_NBE;
++              } else {
++                      /*
++                       * Turn off ECC reporting only when it was off before
++                       */
++                      if (!pvt->flags.ecc_report)
++                              msrs[idx].l &= ~K8_MSR_MCGCTL_NBE;
++              }
++              idx++;
++      }
++      wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
++
++      kfree(msrs);
++      free_cpumask_var(cmask);
++
++      return 0;
++}
++
+ /*
+  * Only if 'ecc_enable_override' is set AND BIOS had ECC disabled, do "we"
+  * enable it.
+@@ -2625,17 +2728,12 @@ static int amd64_init_csrows(struct mem_
+ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
+ {
+       struct amd64_pvt *pvt = mci->pvt_info;
+-      const struct cpumask *cpumask = cpumask_of_node(pvt->mc_node_id);
+-      int cpu, idx = 0, err = 0;
+-      struct msr msrs[cpumask_weight(cpumask)];
+-      u32 value;
+-      u32 mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
++      int err = 0;
++      u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
+       if (!ecc_enable_override)
+               return;
+-      memset(msrs, 0, sizeof(msrs));
+-
+       amd64_printk(KERN_WARNING,
+               "'ecc_enable_override' parameter is active, "
+               "Enabling AMD ECC hardware now: CAUTION\n");
+@@ -2651,16 +2749,9 @@ static void amd64_enable_ecc_error_repor
+       value |= mask;
+       pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
+-      rdmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
+-
+-      for_each_cpu(cpu, cpumask) {
+-              if (msrs[idx].l & K8_MSR_MCGCTL_NBE)
+-                      set_bit(idx, &pvt->old_mcgctl);
+-
+-              msrs[idx].l |= K8_MSR_MCGCTL_NBE;
+-              idx++;
+-      }
+-      wrmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
++      if (amd64_toggle_ecc_err_reporting(pvt, ON))
++              amd64_printk(KERN_WARNING, "Error enabling ECC reporting over "
++                                         "MCGCTL!\n");
+       err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value);
+       if (err)
+@@ -2701,17 +2792,12 @@ static void amd64_enable_ecc_error_repor
+ static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
+ {
+-      const struct cpumask *cpumask = cpumask_of_node(pvt->mc_node_id);
+-      int cpu, idx = 0, err = 0;
+-      struct msr msrs[cpumask_weight(cpumask)];
+-      u32 value;
+-      u32 mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
++      int err = 0;
++      u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
+       if (!pvt->nbctl_mcgctl_saved)
+               return;
+-      memset(msrs, 0, sizeof(msrs));
+-
+       err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCTL, &value);
+       if (err)
+               debugf0("Reading K8_NBCTL failed\n");
+@@ -2721,72 +2807,9 @@ static void amd64_restore_ecc_error_repo
+       /* restore the NB Enable MCGCTL bit */
+       pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
+-      rdmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
+-
+-      for_each_cpu(cpu, cpumask) {
+-              msrs[idx].l &= ~K8_MSR_MCGCTL_NBE;
+-              msrs[idx].l |=
+-                      test_bit(idx, &pvt->old_mcgctl) << K8_MSR_MCGCTL_NBE;
+-              idx++;
+-      }
+-
+-      wrmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
+-}
+-
+-/* get all cores on this DCT */
+-static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
+-{
+-      int cpu;
+-
+-      for_each_online_cpu(cpu)
+-              if (amd_get_nb_id(cpu) == nid)
+-                      cpumask_set_cpu(cpu, mask);
+-}
+-
+-/* check MCG_CTL on all the cpus on this node */
+-static bool amd64_nb_mce_bank_enabled_on_node(int nid)
+-{
+-      cpumask_var_t mask;
+-      struct msr *msrs;
+-      int cpu, nbe, idx = 0;
+-      bool ret = false;
+-
+-      if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
+-              amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
+-                           __func__);
+-              return false;
+-      }
+-
+-      get_cpus_on_this_dct_cpumask(mask, nid);
+-
+-      msrs = kzalloc(sizeof(struct msr) * cpumask_weight(mask), GFP_KERNEL);
+-      if (!msrs) {
+-              amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
+-                            __func__);
+-              free_cpumask_var(mask);
+-               return false;
+-      }
+-
+-      rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
+-
+-      for_each_cpu(cpu, mask) {
+-              nbe = msrs[idx].l & K8_MSR_MCGCTL_NBE;
+-
+-              debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
+-                      cpu, msrs[idx].q,
+-                      (nbe ? "enabled" : "disabled"));
+-
+-              if (!nbe)
+-                      goto out;
+-
+-              idx++;
+-      }
+-      ret = true;
+-
+-out:
+-      kfree(msrs);
+-      free_cpumask_var(mask);
+-      return ret;
++      if (amd64_toggle_ecc_err_reporting(pvt, OFF))
++              amd64_printk(KERN_WARNING, "Error restoring ECC reporting over "
++                                         "MCGCTL!\n");
+ }
+ /*
+@@ -2915,7 +2938,6 @@ static int amd64_probe_one_instance(stru
+       pvt->ext_model          = boot_cpu_data.x86_model >> 4;
+       pvt->mc_type_index      = mc_type_index;
+       pvt->ops                = family_ops(mc_type_index);
+-      pvt->old_mcgctl         = 0;
+       /*
+        * We have the dram_f2_ctl device as an argument, now go reserve its
+--- a/drivers/edac/amd64_edac.h
++++ b/drivers/edac/amd64_edac.h
+@@ -147,6 +147,8 @@
+ #define MAX_CS_COUNT                  8
+ #define DRAM_REG_COUNT                        8
++#define ON true
++#define OFF false
+ /*
+  * PCI-defined configuration space registers
+@@ -386,10 +388,7 @@ enum {
+ #define K8_NBCAP_DUAL_NODE            BIT(1)
+ #define K8_NBCAP_DCT_DUAL             BIT(0)
+-/*
+- * MSR Regs
+- */
+-#define K8_MSR_MCGCTL                 0x017b
++/* MSRs */
+ #define K8_MSR_MCGCTL_NBE             BIT(4)
+ #define K8_MSR_MC4CTL                 0x0410
+@@ -487,7 +486,6 @@ struct amd64_pvt {
+       /* Save old hw registers' values before we modified them */
+       u32 nbctl_mcgctl_saved;         /* When true, following 2 are valid */
+       u32 old_nbctl;
+-      unsigned long old_mcgctl;       /* per core on this node */
+       /* MC Type Index value: socket F vs Family 10h */
+       u32 mc_type_index;
+@@ -495,6 +493,7 @@ struct amd64_pvt {
+       /* misc settings */
+       struct flags {
+               unsigned long cf8_extcfg:1;
++              unsigned long ecc_report:1;
+       } flags;
+ };
diff --git a/queue-2.6.32/b43-avoid-ppc-fault-during-resume.patch b/queue-2.6.32/b43-avoid-ppc-fault-during-resume.patch
new file mode 100644 (file)
index 0000000..2f25847
--- /dev/null
@@ -0,0 +1,53 @@
+From c2ff581acab16c6af56d9e8c1a579bf041ec00b1 Mon Sep 17 00:00:00 2001
+From: Larry Finger <Larry.Finger@lwfinger.net>
+Date: Mon, 23 Nov 2009 18:40:45 -0600
+Subject: b43: avoid PPC fault during resume
+
+From: Larry Finger <Larry.Finger@lwfinger.net>
+
+commit c2ff581acab16c6af56d9e8c1a579bf041ec00b1 upstream.
+
+The routine b43_is_hw_radio_enabled() has long been a problem.
+For PPC architecture with PHY Revision < 3, a read of the register
+B43_MMIO_HWENABLED_LO will cause a CPU fault unless b43_status()
+returns a value of 2 (B43_STAT_STARTED) (BUG 14181). Fixing that
+results in Bug 14538 in which the driver is unable to reassociate
+after resuming from hibernation because b43_status() returns 0.
+
+The correct fix would be to determine why the status is 0; however,
+I have not yet found why that happens. The correct value is found for
+my device, which has PHY revision >= 3.
+
+Returning TRUE when the PHY revision < 3 and b43_status() returns 0 fixes
+the regression for 2.6.32.
+
+This patch fixes the problem in Red Hat Bugzilla #538523.
+
+Signed-off-by: Larry Finger <Larry.Finger@lwfinger.net>
+Tested-by: Christian Casteyde <casteyde.christian@free.fr>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/wireless/b43/rfkill.c |   10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/wireless/b43/rfkill.c
++++ b/drivers/net/wireless/b43/rfkill.c
+@@ -33,8 +33,14 @@ bool b43_is_hw_radio_enabled(struct b43_
+                     & B43_MMIO_RADIO_HWENABLED_HI_MASK))
+                       return 1;
+       } else {
+-              if (b43_status(dev) >= B43_STAT_STARTED &&
+-                  b43_read16(dev, B43_MMIO_RADIO_HWENABLED_LO)
++              /* To prevent CPU fault on PPC, do not read a register
++               * unless the interface is started; however, on resume
++               * for hibernation, this routine is entered early. When
++               * that happens, unconditionally return TRUE.
++               */
++              if (b43_status(dev) < B43_STAT_STARTED)
++                      return 1;
++              if (b43_read16(dev, B43_MMIO_RADIO_HWENABLED_LO)
+                   & B43_MMIO_RADIO_HWENABLED_LO_MASK)
+                       return 1;
+       }
diff --git a/queue-2.6.32/cpumask-use-modern-cpumask-style-in-drivers-edac-amd64_edac.c.patch b/queue-2.6.32/cpumask-use-modern-cpumask-style-in-drivers-edac-amd64_edac.c.patch
new file mode 100644 (file)
index 0000000..b0c10a9
--- /dev/null
@@ -0,0 +1,94 @@
+From ba578cb34a71fb08fff14ac0796b934a8c9991e1 Mon Sep 17 00:00:00 2001
+From: Rusty Russell <rusty@rustcorp.com.au>
+Date: Tue, 3 Nov 2009 14:56:35 +1030
+Subject: cpumask: use modern cpumask style in drivers/edac/amd64_edac.c
+
+From: Rusty Russell <rusty@rustcorp.com.au>
+
+commit ba578cb34a71fb08fff14ac0796b934a8c9991e1 upstream.
+
+cpumask_t -> struct cpumask, and don't put one on the stack.  (Note: this
+is actually on the stack unless CONFIG_CPUMASK_OFFSTACK=y).
+
+Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
+Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/edac/amd64_edac.c |   24 +++++++++++++++---------
+ 1 file changed, 15 insertions(+), 9 deletions(-)
+
+--- a/drivers/edac/amd64_edac.c
++++ b/drivers/edac/amd64_edac.c
+@@ -2625,7 +2625,7 @@ static int amd64_init_csrows(struct mem_
+ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
+ {
+       struct amd64_pvt *pvt = mci->pvt_info;
+-      const cpumask_t *cpumask = cpumask_of_node(pvt->mc_node_id);
++      const struct cpumask *cpumask = cpumask_of_node(pvt->mc_node_id);
+       int cpu, idx = 0, err = 0;
+       struct msr msrs[cpumask_weight(cpumask)];
+       u32 value;
+@@ -2701,7 +2701,7 @@ static void amd64_enable_ecc_error_repor
+ static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
+ {
+-      const cpumask_t *cpumask = cpumask_of_node(pvt->mc_node_id);
++      const struct cpumask *cpumask = cpumask_of_node(pvt->mc_node_id);
+       int cpu, idx = 0, err = 0;
+       struct msr msrs[cpumask_weight(cpumask)];
+       u32 value;
+@@ -2734,7 +2734,7 @@ static void amd64_restore_ecc_error_repo
+ }
+ /* get all cores on this DCT */
+-static void get_cpus_on_this_dct_cpumask(cpumask_t *mask, int nid)
++static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
+ {
+       int cpu;
+@@ -2746,25 +2746,30 @@ static void get_cpus_on_this_dct_cpumask
+ /* check MCG_CTL on all the cpus on this node */
+ static bool amd64_nb_mce_bank_enabled_on_node(int nid)
+ {
+-      cpumask_t mask;
++      cpumask_var_t mask;
+       struct msr *msrs;
+       int cpu, nbe, idx = 0;
+       bool ret = false;
+-      cpumask_clear(&mask);
++      if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
++              amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
++                           __func__);
++              return false;
++      }
+-      get_cpus_on_this_dct_cpumask(&mask, nid);
++      get_cpus_on_this_dct_cpumask(mask, nid);
+-      msrs = kzalloc(sizeof(struct msr) * cpumask_weight(&mask), GFP_KERNEL);
++      msrs = kzalloc(sizeof(struct msr) * cpumask_weight(mask), GFP_KERNEL);
+       if (!msrs) {
+               amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
+                             __func__);
++              free_cpumask_var(mask);
+                return false;
+       }
+-      rdmsr_on_cpus(&mask, MSR_IA32_MCG_CTL, msrs);
++      rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
+-      for_each_cpu(cpu, &mask) {
++      for_each_cpu(cpu, mask) {
+               nbe = msrs[idx].l & K8_MSR_MCGCTL_NBE;
+               debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
+@@ -2780,6 +2785,7 @@ static bool amd64_nb_mce_bank_enabled_on
+ out:
+       kfree(msrs);
++      free_cpumask_var(mask);
+       return ret;
+ }
diff --git a/queue-2.6.32/drm-disable-all-the-possible-outputs-crtcs-before-entering-kms-mode.patch b/queue-2.6.32/drm-disable-all-the-possible-outputs-crtcs-before-entering-kms-mode.patch
new file mode 100644 (file)
index 0000000..01ef5d2
--- /dev/null
@@ -0,0 +1,42 @@
+From b16d9acbdb97452d1418420e069acf7381ef10bb Mon Sep 17 00:00:00 2001
+From: Zhao Yakui <yakui.zhao@intel.com>
+Date: Wed, 9 Dec 2009 11:23:42 +0800
+Subject: drm: disable all the possible outputs/crtcs before entering KMS mode
+
+From: Zhao Yakui <yakui.zhao@intel.com>
+
+commit b16d9acbdb97452d1418420e069acf7381ef10bb upstream.
+
+Sometimes we will use a crtc for integerated LVDS, which is different with
+that assigned by BIOS. If we want to get flicker-free transitions,
+then we could read out the current state for it and set our current state
+accordingly.
+
+But it is true that if we aren't reading current state out, we do need
+to turn everything off before modesetting.  Otherwise the clocks can get very
+angry and we get things worse than a flicker at boot.
+In fact we also do the similar thing in UMS mode. We will disable all the
+possible outputs/crtcs for the first modesetting.
+
+So we disable all the possible outputs/crtcs before entering the KMS mode.
+Before we configure connector/encoder/crtc, the function of
+drm_helper_disable_unused_function can disable all the possible outputs/crtcs.
+
+Signed-off-by: Zhao Yakui <yakui.zhao@intel.com>
+Reviewed-by: Eric Anholt <eric@anholt.net>
+Reviewed-by: Rafal Milecki <zajec5@gmail.com>
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+--- a/drivers/gpu/drm/drm_crtc_helper.c
++++ b/drivers/gpu/drm/drm_crtc_helper.c
+@@ -1020,6 +1020,9 @@ bool drm_helper_initial_config(struct drm_device *dev)
+ {
+       int count = 0;
++      /* disable all the possible outputs/crtcs before entering KMS mode */
++      drm_helper_disable_unused_functions(dev);
++
+       drm_fb_helper_parse_command_line(dev);
+       count = drm_helper_probe_connector_modes(dev,
diff --git a/queue-2.6.32/drm-radeon-kms-fix-crtc-vblank-update-for-r600.patch b/queue-2.6.32/drm-radeon-kms-fix-crtc-vblank-update-for-r600.patch
new file mode 100644 (file)
index 0000000..3242272
--- /dev/null
@@ -0,0 +1,45 @@
+From airlied@gmail.com  Mon Jan  4 14:45:25 2010
+From: Dave Airlie <airlied@gmail.com>
+Date: Mon, 21 Dec 2009 14:33:52 +1000
+Subject: drm/radeon/kms: fix crtc vblank update for r600
+To: stable@kernel.org
+Cc: Dave Airlie <airlied@redhat.com>, linux-kernel@vger.kernel.org, dri-devel@lists.sf.net
+Message-ID: <1261370032-15420-1-git-send-email-airlied@gmail.com>
+
+From: Dave Airlie <airlied@redhat.com>
+
+In 2.6.32.2 r600 had no IRQ support, however the patch in
+500b758725314ab1b5316eb0caa5b0fa26740e6b to fix vblanks on avivo
+cards, needs irqs.
+
+So check for an R600 card and avoid this path if so.
+
+This is a stable only patch for 2.6.32.2 as 2.6.33 has IRQs for r600.
+
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/radeon/atombios_crtc.c |    6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -249,13 +249,15 @@ void atombios_crtc_dpms(struct drm_crtc 
+               if (ASIC_IS_DCE3(rdev))
+                       atombios_enable_crtc_memreq(crtc, 1);
+               atombios_blank_crtc(crtc, 0);
+-              drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
++              if (rdev->family < CHIP_R600)
++                      drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
+               radeon_crtc_load_lut(crtc);
+               break;
+       case DRM_MODE_DPMS_STANDBY:
+       case DRM_MODE_DPMS_SUSPEND:
+       case DRM_MODE_DPMS_OFF:
+-              drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
++              if (rdev->family < CHIP_R600)
++                      drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
+               atombios_blank_crtc(crtc, 1);
+               if (ASIC_IS_DCE3(rdev))
+                       atombios_enable_crtc_memreq(crtc, 0);
diff --git a/queue-2.6.32/e100-fix-broken-cbs-accounting-due-to-missing-memset.patch b/queue-2.6.32/e100-fix-broken-cbs-accounting-due-to-missing-memset.patch
new file mode 100644 (file)
index 0000000..dd4a355
--- /dev/null
@@ -0,0 +1,46 @@
+From 70abc8cb90e679d8519721e2761d8366a18212a6 Mon Sep 17 00:00:00 2001
+From: Roger Oksanen <roger.oksanen@cs.helsinki.fi>
+Date: Fri, 18 Dec 2009 20:18:21 -0800
+Subject: e100: Fix broken cbs accounting due to missing memset.
+
+From: Roger Oksanen <roger.oksanen@cs.helsinki.fi>
+
+commit 70abc8cb90e679d8519721e2761d8366a18212a6 upstream.
+
+Alan Stern noticed that e100 caused slab corruption.
+commit 98468efddb101f8a29af974101c17ba513b07be1 changed
+the allocation of cbs to use dma pools that don't return zeroed memory,
+especially the cb->status field used to track which cb to clean, causing
+(the visible) double freeing of skbs and a wrong free cbs count.
+
+Now the cbs are explicitly zeroed at allocation time.
+
+Reported-by: Alan Stern <stern@rowland.harvard.edu>
+Tested-by: Alan Stern <stern@rowland.harvard.edu>
+Signed-off-by: Roger Oksanen <roger.oksanen@cs.helsinki.fi>
+Acked-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/e100.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/e100.c
++++ b/drivers/net/e100.c
+@@ -1817,6 +1817,7 @@ static int e100_alloc_cbs(struct nic *ni
+                                 &nic->cbs_dma_addr);
+       if (!nic->cbs)
+               return -ENOMEM;
++      memset(nic->cbs, 0, count * sizeof(struct cb));
+       for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
+               cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
+@@ -1825,7 +1826,6 @@ static int e100_alloc_cbs(struct nic *ni
+               cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
+               cb->link = cpu_to_le32(nic->cbs_dma_addr +
+                       ((i+1) % count) * sizeof(struct cb));
+-              cb->skb = NULL;
+       }
+       nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
diff --git a/queue-2.6.32/ext4-convert-to-generic-reserved-quota-s-space-management.patch b/queue-2.6.32/ext4-convert-to-generic-reserved-quota-s-space-management.patch
new file mode 100644 (file)
index 0000000..846dfb0
--- /dev/null
@@ -0,0 +1,100 @@
+From a9e7f4472075fb6937c545af3f6329e9946bbe66 Mon Sep 17 00:00:00 2001
+From: Dmitry Monakhov <dmonakhov@openvz.org>
+Date: Mon, 14 Dec 2009 15:21:14 +0300
+Subject: ext4: Convert to generic reserved quota's space management.
+
+From: Dmitry Monakhov <dmonakhov@openvz.org>
+
+commit a9e7f4472075fb6937c545af3f6329e9946bbe66 upstream.
+
+This patch also fixes write vs chown race condition.
+
+Acked-by: "Theodore Ts'o" <tytso@mit.edu>
+Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/ext4/ext4.h  |    6 +++++-
+ fs/ext4/inode.c |   16 +++++++---------
+ fs/ext4/super.c |    5 +++++
+ 3 files changed, 17 insertions(+), 10 deletions(-)
+
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -698,6 +698,10 @@ struct ext4_inode_info {
+       __u16 i_extra_isize;
+       spinlock_t i_block_reservation_lock;
++#ifdef CONFIG_QUOTA
++      /* quota space reservation, managed internally by quota code */
++      qsize_t i_reserved_quota;
++#endif
+       /* completed async DIOs that might need unwritten extents handling */
+       struct list_head i_aio_dio_complete_list;
+@@ -1432,7 +1436,7 @@ extern int ext4_chunk_trans_blocks(struc
+ extern int ext4_block_truncate_page(handle_t *handle,
+               struct address_space *mapping, loff_t from);
+ extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
+-extern qsize_t ext4_get_reserved_space(struct inode *inode);
++extern qsize_t *ext4_get_reserved_space(struct inode *inode);
+ extern int flush_aio_dio_completed_IO(struct inode *inode);
+ /* ioctl.c */
+ extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -1045,17 +1045,12 @@ out:
+       return err;
+ }
+-qsize_t ext4_get_reserved_space(struct inode *inode)
++#ifdef CONFIG_QUOTA
++qsize_t *ext4_get_reserved_space(struct inode *inode)
+ {
+-      unsigned long long total;
+-
+-      spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
+-      total = EXT4_I(inode)->i_reserved_data_blocks +
+-              EXT4_I(inode)->i_reserved_meta_blocks;
+-      spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+-
+-      return (total << inode->i_blkbits);
++      return &EXT4_I(inode)->i_reserved_quota;
+ }
++#endif
+ /*
+  * Calculate the number of metadata blocks need to reserve
+  * to allocate @blocks for non extent file based file
+@@ -4850,6 +4845,9 @@ struct inode *ext4_iget(struct super_blo
+                       ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
+       inode->i_size = ext4_isize(raw_inode);
+       ei->i_disksize = inode->i_size;
++#ifdef CONFIG_QUOTA
++      ei->i_reserved_quota = 0;
++#endif
+       inode->i_generation = le32_to_cpu(raw_inode->i_generation);
+       ei->i_block_group = iloc.block_group;
+       ei->i_last_alloc_group = ~0;
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -704,6 +704,9 @@ static struct inode *ext4_alloc_inode(st
+       ei->i_allocated_meta_blocks = 0;
+       ei->i_delalloc_reserved_flag = 0;
+       spin_lock_init(&(ei->i_block_reservation_lock));
++#ifdef CONFIG_QUOTA
++      ei->i_reserved_quota = 0;
++#endif
+       INIT_LIST_HEAD(&ei->i_aio_dio_complete_list);
+       ei->cur_aio_dio = NULL;
+       ei->i_sync_tid = 0;
+@@ -1001,7 +1004,9 @@ static const struct dquot_operations ext
+       .reserve_space  = dquot_reserve_space,
+       .claim_space    = dquot_claim_space,
+       .release_rsv    = dquot_release_reserved_space,
++#ifdef CONFIG_QUOTA
+       .get_reserved_space = ext4_get_reserved_space,
++#endif
+       .alloc_inode    = dquot_alloc_inode,
+       .free_space     = dquot_free_space,
+       .free_inode     = dquot_free_inode,
diff --git a/queue-2.6.32/ext4-fix-potential-quota-deadlock.patch b/queue-2.6.32/ext4-fix-potential-quota-deadlock.patch
new file mode 100644 (file)
index 0000000..781d4f1
--- /dev/null
@@ -0,0 +1,245 @@
+From d21cd8f163ac44b15c465aab7306db931c606908 Mon Sep 17 00:00:00 2001
+From: Dmitry Monakhov <dmonakhov@openvz.org>
+Date: Thu, 10 Dec 2009 03:31:45 +0000
+Subject: ext4: Fix potential quota deadlock
+
+From: Dmitry Monakhov <dmonakhov@openvz.org>
+
+commit d21cd8f163ac44b15c465aab7306db931c606908 upstream.
+
+We have to delay vfs_dq_claim_space() until allocation context destruction.
+Currently we have following call-trace:
+ext4_mb_new_blocks()
+  /* task is already holding ac->alloc_semp */
+ ->ext4_mb_mark_diskspace_used
+    ->vfs_dq_claim_space()  /*  acquire dqptr_sem here. Possible deadlock */
+ ->ext4_mb_release_context() /* drop ac->alloc_semp here */
+
+Let's move quota claiming to ext4_da_update_reserve_space()
+
+ =======================================================
+ [ INFO: possible circular locking dependency detected ]
+ 2.6.32-rc7 #18
+ -------------------------------------------------------
+ write-truncate-/3465 is trying to acquire lock:
+  (&s->s_dquot.dqptr_sem){++++..}, at: [<c025e73b>] dquot_claim_space+0x3b/0x1b0
+
+ but task is already holding lock:
+  (&meta_group_info[i]->alloc_sem){++++..}, at: [<c02ce962>] ext4_mb_load_buddy+0xb2/0x370
+
+ which lock already depends on the new lock.
+
+ the existing dependency chain (in reverse order) is:
+
+ -> #3 (&meta_group_info[i]->alloc_sem){++++..}:
+        [<c017d04b>] __lock_acquire+0xd7b/0x1260
+        [<c017d5ea>] lock_acquire+0xba/0xd0
+        [<c0527191>] down_read+0x51/0x90
+        [<c02ce962>] ext4_mb_load_buddy+0xb2/0x370
+        [<c02d0c1c>] ext4_mb_free_blocks+0x46c/0x870
+        [<c029c9d3>] ext4_free_blocks+0x73/0x130
+        [<c02c8cfc>] ext4_ext_truncate+0x76c/0x8d0
+        [<c02a8087>] ext4_truncate+0x187/0x5e0
+        [<c01e0f7b>] vmtruncate+0x6b/0x70
+        [<c022ec02>] inode_setattr+0x62/0x190
+        [<c02a2d7a>] ext4_setattr+0x25a/0x370
+        [<c022ee81>] notify_change+0x151/0x340
+        [<c021349d>] do_truncate+0x6d/0xa0
+        [<c0221034>] may_open+0x1d4/0x200
+        [<c022412b>] do_filp_open+0x1eb/0x910
+        [<c021244d>] do_sys_open+0x6d/0x140
+        [<c021258e>] sys_open+0x2e/0x40
+        [<c0103100>] sysenter_do_call+0x12/0x32
+
+ -> #2 (&ei->i_data_sem){++++..}:
+        [<c017d04b>] __lock_acquire+0xd7b/0x1260
+        [<c017d5ea>] lock_acquire+0xba/0xd0
+        [<c0527191>] down_read+0x51/0x90
+        [<c02a5787>] ext4_get_blocks+0x47/0x450
+        [<c02a74c1>] ext4_getblk+0x61/0x1d0
+        [<c02a7a7f>] ext4_bread+0x1f/0xa0
+        [<c02bcddc>] ext4_quota_write+0x12c/0x310
+        [<c0262d23>] qtree_write_dquot+0x93/0x120
+        [<c0261708>] v2_write_dquot+0x28/0x30
+        [<c025d3fb>] dquot_commit+0xab/0xf0
+        [<c02be977>] ext4_write_dquot+0x77/0x90
+        [<c02be9bf>] ext4_mark_dquot_dirty+0x2f/0x50
+        [<c025e321>] dquot_alloc_inode+0x101/0x180
+        [<c029fec2>] ext4_new_inode+0x602/0xf00
+        [<c02ad789>] ext4_create+0x89/0x150
+        [<c0221ff2>] vfs_create+0xa2/0xc0
+        [<c02246e7>] do_filp_open+0x7a7/0x910
+        [<c021244d>] do_sys_open+0x6d/0x140
+        [<c021258e>] sys_open+0x2e/0x40
+        [<c0103100>] sysenter_do_call+0x12/0x32
+
+ -> #1 (&sb->s_type->i_mutex_key#7/4){+.+...}:
+        [<c017d04b>] __lock_acquire+0xd7b/0x1260
+        [<c017d5ea>] lock_acquire+0xba/0xd0
+        [<c0526505>] mutex_lock_nested+0x65/0x2d0
+        [<c0260c9d>] vfs_load_quota_inode+0x4bd/0x5a0
+        [<c02610af>] vfs_quota_on_path+0x5f/0x70
+        [<c02bc812>] ext4_quota_on+0x112/0x190
+        [<c026345a>] sys_quotactl+0x44a/0x8a0
+        [<c0103100>] sysenter_do_call+0x12/0x32
+
+ -> #0 (&s->s_dquot.dqptr_sem){++++..}:
+        [<c017d361>] __lock_acquire+0x1091/0x1260
+        [<c017d5ea>] lock_acquire+0xba/0xd0
+        [<c0527191>] down_read+0x51/0x90
+        [<c025e73b>] dquot_claim_space+0x3b/0x1b0
+        [<c02cb95f>] ext4_mb_mark_diskspace_used+0x36f/0x380
+        [<c02d210a>] ext4_mb_new_blocks+0x34a/0x530
+        [<c02c83fb>] ext4_ext_get_blocks+0x122b/0x13c0
+        [<c02a5966>] ext4_get_blocks+0x226/0x450
+        [<c02a5ff3>] mpage_da_map_blocks+0xc3/0xaa0
+        [<c02a6ed6>] ext4_da_writepages+0x506/0x790
+        [<c01de272>] do_writepages+0x22/0x50
+        [<c01d766d>] __filemap_fdatawrite_range+0x6d/0x80
+        [<c01d7b9b>] filemap_flush+0x2b/0x30
+        [<c02a40ac>] ext4_alloc_da_blocks+0x5c/0x60
+        [<c029e595>] ext4_release_file+0x75/0xb0
+        [<c0216b59>] __fput+0xf9/0x210
+        [<c0216c97>] fput+0x27/0x30
+        [<c02122dc>] filp_close+0x4c/0x80
+        [<c014510e>] put_files_struct+0x6e/0xd0
+        [<c01451b7>] exit_files+0x47/0x60
+        [<c0146a24>] do_exit+0x144/0x710
+        [<c0147028>] do_group_exit+0x38/0xa0
+        [<c0159abc>] get_signal_to_deliver+0x2ac/0x410
+        [<c0102849>] do_notify_resume+0xb9/0x890
+        [<c01032d2>] work_notifysig+0x13/0x21
+
+ other info that might help us debug this:
+
+ 3 locks held by write-truncate-/3465:
+  #0:  (jbd2_handle){+.+...}, at: [<c02e1f8f>] start_this_handle+0x38f/0x5c0
+  #1:  (&ei->i_data_sem){++++..}, at: [<c02a57f6>] ext4_get_blocks+0xb6/0x450
+  #2:  (&meta_group_info[i]->alloc_sem){++++..}, at: [<c02ce962>] ext4_mb_load_buddy+0xb2/0x370
+
+ stack backtrace:
+ Pid: 3465, comm: write-truncate- Not tainted 2.6.32-rc7 #18
+ Call Trace:
+  [<c0524cb3>] ? printk+0x1d/0x22
+  [<c017ac9a>] print_circular_bug+0xca/0xd0
+  [<c017d361>] __lock_acquire+0x1091/0x1260
+  [<c016bca2>] ? sched_clock_local+0xd2/0x170
+  [<c0178fd0>] ? trace_hardirqs_off_caller+0x20/0xd0
+  [<c017d5ea>] lock_acquire+0xba/0xd0
+  [<c025e73b>] ? dquot_claim_space+0x3b/0x1b0
+  [<c0527191>] down_read+0x51/0x90
+  [<c025e73b>] ? dquot_claim_space+0x3b/0x1b0
+  [<c025e73b>] dquot_claim_space+0x3b/0x1b0
+  [<c02cb95f>] ext4_mb_mark_diskspace_used+0x36f/0x380
+  [<c02d210a>] ext4_mb_new_blocks+0x34a/0x530
+  [<c02c601d>] ? ext4_ext_find_extent+0x25d/0x280
+  [<c02c83fb>] ext4_ext_get_blocks+0x122b/0x13c0
+  [<c016bca2>] ? sched_clock_local+0xd2/0x170
+  [<c016be60>] ? sched_clock_cpu+0x120/0x160
+  [<c016beef>] ? cpu_clock+0x4f/0x60
+  [<c0178fd0>] ? trace_hardirqs_off_caller+0x20/0xd0
+  [<c052712c>] ? down_write+0x8c/0xa0
+  [<c02a5966>] ext4_get_blocks+0x226/0x450
+  [<c016be60>] ? sched_clock_cpu+0x120/0x160
+  [<c016beef>] ? cpu_clock+0x4f/0x60
+  [<c017908b>] ? trace_hardirqs_off+0xb/0x10
+  [<c02a5ff3>] mpage_da_map_blocks+0xc3/0xaa0
+  [<c01d69cc>] ? find_get_pages_tag+0x16c/0x180
+  [<c01d6860>] ? find_get_pages_tag+0x0/0x180
+  [<c02a73bd>] ? __mpage_da_writepage+0x16d/0x1a0
+  [<c01dfc4e>] ? pagevec_lookup_tag+0x2e/0x40
+  [<c01ddf1b>] ? write_cache_pages+0xdb/0x3d0
+  [<c02a7250>] ? __mpage_da_writepage+0x0/0x1a0
+  [<c02a6ed6>] ext4_da_writepages+0x506/0x790
+  [<c016beef>] ? cpu_clock+0x4f/0x60
+  [<c016bca2>] ? sched_clock_local+0xd2/0x170
+  [<c016be60>] ? sched_clock_cpu+0x120/0x160
+  [<c016be60>] ? sched_clock_cpu+0x120/0x160
+  [<c02a69d0>] ? ext4_da_writepages+0x0/0x790
+  [<c01de272>] do_writepages+0x22/0x50
+  [<c01d766d>] __filemap_fdatawrite_range+0x6d/0x80
+  [<c01d7b9b>] filemap_flush+0x2b/0x30
+  [<c02a40ac>] ext4_alloc_da_blocks+0x5c/0x60
+  [<c029e595>] ext4_release_file+0x75/0xb0
+  [<c0216b59>] __fput+0xf9/0x210
+  [<c0216c97>] fput+0x27/0x30
+  [<c02122dc>] filp_close+0x4c/0x80
+  [<c014510e>] put_files_struct+0x6e/0xd0
+  [<c01451b7>] exit_files+0x47/0x60
+  [<c0146a24>] do_exit+0x144/0x710
+  [<c017b163>] ? lock_release_holdtime+0x33/0x210
+  [<c0528137>] ? _spin_unlock_irq+0x27/0x30
+  [<c0147028>] do_group_exit+0x38/0xa0
+  [<c017babb>] ? trace_hardirqs_on+0xb/0x10
+  [<c0159abc>] get_signal_to_deliver+0x2ac/0x410
+  [<c0102849>] do_notify_resume+0xb9/0x890
+  [<c0178fd0>] ? trace_hardirqs_off_caller+0x20/0xd0
+  [<c017b163>] ? lock_release_holdtime+0x33/0x210
+  [<c0165b50>] ? autoremove_wake_function+0x0/0x50
+  [<c017ba54>] ? trace_hardirqs_on_caller+0x134/0x190
+  [<c017babb>] ? trace_hardirqs_on+0xb/0x10
+  [<c0300ba4>] ? security_file_permission+0x14/0x20
+  [<c0215761>] ? vfs_write+0x131/0x190
+  [<c0214f50>] ? do_sync_write+0x0/0x120
+  [<c0103115>] ? sysenter_do_call+0x27/0x32
+  [<c01032d2>] work_notifysig+0x13/0x21
+
+CC: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/ext4/inode.c   |    9 +++++++--
+ fs/ext4/mballoc.c |    6 ------
+ 2 files changed, 7 insertions(+), 8 deletions(-)
+
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -1088,7 +1088,7 @@ static int ext4_calc_metadata_amount(str
+ static void ext4_da_update_reserve_space(struct inode *inode, int used)
+ {
+       struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+-      int total, mdb, mdb_free;
++      int total, mdb, mdb_free, mdb_claim = 0;
+       spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
+       /* recalculate the number of metablocks still need to be reserved */
+@@ -1101,7 +1101,9 @@ static void ext4_da_update_reserve_space
+       if (mdb_free) {
+               /* Account for allocated meta_blocks */
+-              mdb_free -= EXT4_I(inode)->i_allocated_meta_blocks;
++              mdb_claim = EXT4_I(inode)->i_allocated_meta_blocks;
++              BUG_ON(mdb_free < mdb_claim);
++              mdb_free -= mdb_claim;
+               /* update fs dirty blocks counter */
+               percpu_counter_sub(&sbi->s_dirtyblocks_counter, mdb_free);
+@@ -1112,8 +1114,11 @@ static void ext4_da_update_reserve_space
+       /* update per-inode reservations */
+       BUG_ON(used  > EXT4_I(inode)->i_reserved_data_blocks);
+       EXT4_I(inode)->i_reserved_data_blocks -= used;
++      percpu_counter_sub(&sbi->s_dirtyblocks_counter, used + mdb_claim);
+       spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
++      vfs_dq_claim_block(inode, used + mdb_claim);
++
+       /*
+        * free those over-booking quota for metadata blocks
+        */
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -2755,12 +2755,6 @@ ext4_mb_mark_diskspace_used(struct ext4_
+       if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
+               /* release all the reserved blocks if non delalloc */
+               percpu_counter_sub(&sbi->s_dirtyblocks_counter, reserv_blks);
+-      else {
+-              percpu_counter_sub(&sbi->s_dirtyblocks_counter,
+-                                              ac->ac_b_ex.fe_len);
+-              /* convert reserved quota blocks to real quota blocks */
+-              vfs_dq_claim_block(ac->ac_inode, ac->ac_b_ex.fe_len);
+-      }
+       if (sbi->s_log_groups_per_flex) {
+               ext4_group_t flex_group = ext4_flex_group(sbi,
diff --git a/queue-2.6.32/ext4-fix-sleep-inside-spinlock-issue-with-quota-and-dealloc-14739.patch b/queue-2.6.32/ext4-fix-sleep-inside-spinlock-issue-with-quota-and-dealloc-14739.patch
new file mode 100644 (file)
index 0000000..ddd9d50
--- /dev/null
@@ -0,0 +1,59 @@
+From 39bc680a8160bb9d6743f7873b535d553ff61058 Mon Sep 17 00:00:00 2001
+From: Dmitry Monakhov <dmonakhov@openvz.org>
+Date: Thu, 10 Dec 2009 16:36:27 +0000
+Subject: ext4: fix sleep inside spinlock issue with quota and dealloc (#14739)
+
+From: Dmitry Monakhov <dmonakhov@openvz.org>
+
+commit 39bc680a8160bb9d6743f7873b535d553ff61058 upstream.
+
+Unlock i_block_reservation_lock before vfs_dq_reserve_block().
+This patch fixes http://bugzilla.kernel.org/show_bug.cgi?id=14739
+
+Cc: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/ext4/inode.c |   11 +++++------
+ 1 file changed, 5 insertions(+), 6 deletions(-)
+
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -1858,19 +1858,17 @@ repeat:
+       md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks;
+       total = md_needed + nrblocks;
++      spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+       /*
+        * Make quota reservation here to prevent quota overflow
+        * later. Real quota accounting is done at pages writeout
+        * time.
+        */
+-      if (vfs_dq_reserve_block(inode, total)) {
+-              spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
++      if (vfs_dq_reserve_block(inode, total))
+               return -EDQUOT;
+-      }
+       if (ext4_claim_free_blocks(sbi, total)) {
+-              spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+               vfs_dq_release_reservation_block(inode, total);
+               if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
+                       yield();
+@@ -1878,10 +1876,11 @@ repeat:
+               }
+               return -ENOSPC;
+       }
++      spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
+       EXT4_I(inode)->i_reserved_data_blocks += nrblocks;
+-      EXT4_I(inode)->i_reserved_meta_blocks = mdblocks;
+-
++      EXT4_I(inode)->i_reserved_meta_blocks += md_needed;
+       spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
++
+       return 0;       /* success */
+ }
diff --git a/queue-2.6.32/ext4-update-documentation-to-correct-the-inode_readahead_blks-option-name.patch b/queue-2.6.32/ext4-update-documentation-to-correct-the-inode_readahead_blks-option-name.patch
new file mode 100644 (file)
index 0000000..1bc1df5
--- /dev/null
@@ -0,0 +1,31 @@
+From 6d3b82f2d31f22085e5711b28dddcb9fb3d97a25 Mon Sep 17 00:00:00 2001
+From: Fang Wenqi <anton.fang@gmail.com>
+Date: Thu, 24 Dec 2009 17:51:42 -0500
+Subject: ext4: Update documentation to correct the inode_readahead_blks option name
+
+From: Fang Wenqi <anton.fang@gmail.com>
+
+commit 6d3b82f2d31f22085e5711b28dddcb9fb3d97a25 upstream.
+
+Per commit 240799cd, the option name for readahead should be
+inode_readahead_blks, not inode_readahead.
+
+Signed-off-by: Fang Wenqi <antonf@turbolinux.com.cn>
+Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ Documentation/filesystems/ext4.txt |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/Documentation/filesystems/ext4.txt
++++ b/Documentation/filesystems/ext4.txt
+@@ -196,7 +196,7 @@ nobarrier          This also requires an IO stac
+                       also be used to enable or disable barriers, for
+                       consistency with other ext4 mount options.
+-inode_readahead=n     This tuning parameter controls the maximum
++inode_readahead_blks=n        This tuning parameter controls the maximum
+                       number of inode table blocks that ext4's inode
+                       table readahead algorithm will pre-read into
+                       the buffer cache.  The default value is 32 blocks.
diff --git a/queue-2.6.32/hwmon-sht15-off-by-one-error-in-array-index-incorrect-constants.patch b/queue-2.6.32/hwmon-sht15-off-by-one-error-in-array-index-incorrect-constants.patch
new file mode 100644 (file)
index 0000000..85d1366
--- /dev/null
@@ -0,0 +1,46 @@
+From 4235f684b66d6f00d2cd8849c884cf8f8b57ecad Mon Sep 17 00:00:00 2001
+From: Jonathan Cameron <jic23@cam.ac.uk>
+Date: Wed, 16 Dec 2009 21:38:28 +0100
+Subject: hwmon: (sht15) Off-by-one error in array index + incorrect constants
+
+From: Jonathan Cameron <jic23@cam.ac.uk>
+
+commit 4235f684b66d6f00d2cd8849c884cf8f8b57ecad upstream.
+
+Fix an off-by-one error in array index + incorrect constants.
+
+Signed-off-by: Christoph Walser <walser@tik.ee.ethz.ch>
+Signed-off-by: Jonathan Cameron <jic23@cam.ac.uk>
+Signed-off-by: Jean Delvare <khali@linux-fr.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/hwmon/sht15.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/hwmon/sht15.c
++++ b/drivers/hwmon/sht15.c
+@@ -305,7 +305,7 @@ static inline int sht15_calc_temp(struct
+       int d1 = 0;
+       int i;
+-      for (i = 1; i < ARRAY_SIZE(temppoints) - 1; i++)
++      for (i = 1; i < ARRAY_SIZE(temppoints); i++)
+               /* Find pointer to interpolate */
+               if (data->supply_uV > temppoints[i - 1].vdd) {
+                       d1 = (data->supply_uV/1000 - temppoints[i - 1].vdd)
+@@ -332,12 +332,12 @@ static inline int sht15_calc_humid(struc
+       const int c1 = -4;
+       const int c2 = 40500; /* x 10 ^ -6 */
+-      const int c3 = 2800; /* x10 ^ -9 */
++      const int c3 = -2800; /* x10 ^ -9 */
+       RHlinear = c1*1000
+               + c2 * data->val_humid/1000
+               + (data->val_humid * data->val_humid * c3)/1000000;
+-      return (temp - 25000) * (10000 + 800 * data->val_humid)
++      return (temp - 25000) * (10000 + 80 * data->val_humid)
+               / 1000000 + RHlinear;
+ }
diff --git a/queue-2.6.32/ipv6-reassembly-use-seperate-reassembly-queues-for-conntrack-and-local-delivery.patch b/queue-2.6.32/ipv6-reassembly-use-seperate-reassembly-queues-for-conntrack-and-local-delivery.patch
new file mode 100644 (file)
index 0000000..faf462a
--- /dev/null
@@ -0,0 +1,158 @@
+From 0b5ccb2ee250136dd7385b1c7da28417d0d4d32d Mon Sep 17 00:00:00 2001
+From: Patrick McHardy <kaber@trash.net>
+Date: Tue, 15 Dec 2009 16:59:18 +0100
+Subject: ipv6: reassembly: use seperate reassembly queues for conntrack and local delivery
+
+From: Patrick McHardy <kaber@trash.net>
+
+commit 0b5ccb2ee250136dd7385b1c7da28417d0d4d32d upstream.
+
+Currently the same reassembly queue might be used for packets reassembled
+by conntrack in different positions in the stack (PREROUTING/LOCAL_OUT),
+as well as local delivery. This can cause "packet jumps" when the fragment
+completing a reassembled packet is queued from a different position in the
+stack than the previous ones.
+
+Add a "user" identifier to the reassembly queue key to seperate the queues
+of each caller, similar to what we do for IPv4.
+
+Signed-off-by: Patrick McHardy <kaber@trash.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/net/ipv6.h                             |    7 +++++++
+ include/net/netfilter/ipv6/nf_conntrack_ipv6.h |    2 +-
+ net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c |   13 +++++++++++--
+ net/ipv6/netfilter/nf_conntrack_reasm.c        |    7 ++++---
+ net/ipv6/reassembly.c                          |    5 ++++-
+ 5 files changed, 27 insertions(+), 7 deletions(-)
+
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -354,8 +354,15 @@ static inline int ipv6_prefix_equal(cons
+ struct inet_frag_queue;
++enum ip6_defrag_users {
++      IP6_DEFRAG_LOCAL_DELIVER,
++      IP6_DEFRAG_CONNTRACK_IN,
++      IP6_DEFRAG_CONNTRACK_OUT,
++};
++
+ struct ip6_create_arg {
+       __be32 id;
++      u32 user;
+       struct in6_addr *src;
+       struct in6_addr *dst;
+ };
+--- a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
++++ b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
+@@ -9,7 +9,7 @@ extern struct nf_conntrack_l4proto nf_co
+ extern int nf_ct_frag6_init(void);
+ extern void nf_ct_frag6_cleanup(void);
+-extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb);
++extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user);
+ extern void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
+                              struct net_device *in,
+                              struct net_device *out,
+--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
++++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+@@ -187,6 +187,16 @@ out:
+       return nf_conntrack_confirm(skb);
+ }
++static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
++                                              struct sk_buff *skb)
++{
++      if (hooknum == NF_INET_PRE_ROUTING)
++              return IP6_DEFRAG_CONNTRACK_IN;
++      else
++              return IP6_DEFRAG_CONNTRACK_OUT;
++
++}
++
+ static unsigned int ipv6_defrag(unsigned int hooknum,
+                               struct sk_buff *skb,
+                               const struct net_device *in,
+@@ -199,8 +209,7 @@ static unsigned int ipv6_defrag(unsigned
+       if (skb->nfct)
+               return NF_ACCEPT;
+-      reasm = nf_ct_frag6_gather(skb);
+-
++      reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb));
+       /* queued */
+       if (reasm == NULL)
+               return NF_STOLEN;
+--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
+@@ -170,13 +170,14 @@ out:
+ /* Creation primitives. */
+ static __inline__ struct nf_ct_frag6_queue *
+-fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst)
++fq_find(__be32 id, u32 user, struct in6_addr *src, struct in6_addr *dst)
+ {
+       struct inet_frag_queue *q;
+       struct ip6_create_arg arg;
+       unsigned int hash;
+       arg.id = id;
++      arg.user = user;
+       arg.src = src;
+       arg.dst = dst;
+@@ -561,7 +562,7 @@ find_prev_fhdr(struct sk_buff *skb, u8 *
+       return 0;
+ }
+-struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
++struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
+ {
+       struct sk_buff *clone;
+       struct net_device *dev = skb->dev;
+@@ -607,7 +608,7 @@ struct sk_buff *nf_ct_frag6_gather(struc
+       if (atomic_read(&nf_init_frags.mem) > nf_init_frags.high_thresh)
+               nf_ct_frag6_evictor();
+-      fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr);
++      fq = fq_find(fhdr->identification, user, &hdr->saddr, &hdr->daddr);
+       if (fq == NULL) {
+               pr_debug("Can't find and can't create new queue\n");
+               goto ret_orig;
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -72,6 +72,7 @@ struct frag_queue
+       struct inet_frag_queue  q;
+       __be32                  id;             /* fragment id          */
++      u32                     user;
+       struct in6_addr         saddr;
+       struct in6_addr         daddr;
+@@ -141,7 +142,7 @@ int ip6_frag_match(struct inet_frag_queu
+       struct ip6_create_arg *arg = a;
+       fq = container_of(q, struct frag_queue, q);
+-      return (fq->id == arg->id &&
++      return (fq->id == arg->id && fq->user == arg->user &&
+                       ipv6_addr_equal(&fq->saddr, arg->src) &&
+                       ipv6_addr_equal(&fq->daddr, arg->dst));
+ }
+@@ -163,6 +164,7 @@ void ip6_frag_init(struct inet_frag_queu
+       struct ip6_create_arg *arg = a;
+       fq->id = arg->id;
++      fq->user = arg->user;
+       ipv6_addr_copy(&fq->saddr, arg->src);
+       ipv6_addr_copy(&fq->daddr, arg->dst);
+ }
+@@ -244,6 +246,7 @@ fq_find(struct net *net, __be32 id, stru
+       unsigned int hash;
+       arg.id = id;
++      arg.user = IP6_DEFRAG_LOCAL_DELIVER;
+       arg.src = src;
+       arg.dst = dst;
diff --git a/queue-2.6.32/kernel-sysctl.c-fix-the-incomplete-part-of-sysctl_max_map_count-should-be-non-negative.patch.patch b/queue-2.6.32/kernel-sysctl.c-fix-the-incomplete-part-of-sysctl_max_map_count-should-be-non-negative.patch.patch
new file mode 100644 (file)
index 0000000..20efd53
--- /dev/null
@@ -0,0 +1,32 @@
+From 3e26120cc7c819c97bc07281ca1fb9017cfe9a39 Mon Sep 17 00:00:00 2001
+From: WANG Cong <amwang@redhat.com>
+Date: Thu, 17 Dec 2009 15:27:05 -0800
+Subject: kernel/sysctl.c: fix the incomplete part of sysctl_max_map_count-should-be-non-negative.patch
+
+From: WANG Cong <amwang@redhat.com>
+
+commit 3e26120cc7c819c97bc07281ca1fb9017cfe9a39 upstream.
+
+It is a mistake that we used 'proc_dointvec', it should be
+'proc_dointvec_minmax', as in the original patch.
+
+Signed-off-by: WANG Cong <amwang@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/sysctl.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -1607,7 +1607,7 @@ static struct ctl_table debug_table[] = 
+               .data           = &show_unhandled_signals,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+-              .proc_handler   = proc_dointvec,
++              .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &zero,
+       },
+ #endif
diff --git a/queue-2.6.32/keys-keyctl_session_to_parent-needs-tif_notify_resume-architecture-support.patch b/queue-2.6.32/keys-keyctl_session_to_parent-needs-tif_notify_resume-architecture-support.patch
new file mode 100644 (file)
index 0000000..d4d3f62
--- /dev/null
@@ -0,0 +1,56 @@
+From a00ae4d21b2fa9379914f270ffffd8d3bec55430 Mon Sep 17 00:00:00 2001
+From: Geert Uytterhoeven <geert@linux-m68k.org>
+Date: Sun, 13 Dec 2009 20:21:34 +0100
+Subject: Keys: KEYCTL_SESSION_TO_PARENT needs TIF_NOTIFY_RESUME architecture support
+
+From: Geert Uytterhoeven <geert@linux-m68k.org>
+
+commit a00ae4d21b2fa9379914f270ffffd8d3bec55430 upstream.
+
+As of commit ee18d64c1f632043a02e6f5ba5e045bb26a5465f ("KEYS: Add a keyctl to
+install a process's session keyring on its parent [try #6]"), CONFIG_KEYS=y
+fails to build on architectures that haven't implemented TIF_NOTIFY_RESUME yet:
+
+security/keys/keyctl.c: In function 'keyctl_session_to_parent':
+security/keys/keyctl.c:1312: error: 'TIF_NOTIFY_RESUME' undeclared (first use in this function)
+security/keys/keyctl.c:1312: error: (Each undeclared identifier is reported only once
+security/keys/keyctl.c:1312: error: for each function it appears in.)
+
+Make KEYCTL_SESSION_TO_PARENT depend on TIF_NOTIFY_RESUME until
+m68k, and xtensa have implemented it.
+
+Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
+Signed-off-by: James Morris <jmorris@namei.org>
+Acked-by: Mike Frysinger <vapier@gentoo.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ security/keys/keyctl.c |   10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/security/keys/keyctl.c
++++ b/security/keys/keyctl.c
+@@ -1236,6 +1236,7 @@ long keyctl_get_security(key_serial_t ke
+  */
+ long keyctl_session_to_parent(void)
+ {
++#ifdef TIF_NOTIFY_RESUME
+       struct task_struct *me, *parent;
+       const struct cred *mycred, *pcred;
+       struct cred *cred, *oldcred;
+@@ -1326,6 +1327,15 @@ not_permitted:
+ error_keyring:
+       key_ref_put(keyring_r);
+       return ret;
++
++#else /* !TIF_NOTIFY_RESUME */
++      /*
++       * To be removed when TIF_NOTIFY_RESUME has been implemented on
++       * m68k/xtensa
++       */
++#warning TIF_NOTIFY_RESUME not implemented
++      return -EOPNOTSUPP;
++#endif /* !TIF_NOTIFY_RESUME */
+ }
+ /*****************************************************************************/
diff --git a/queue-2.6.32/lguest-fix-bug-in-setting-guest-gdt-entry.patch b/queue-2.6.32/lguest-fix-bug-in-setting-guest-gdt-entry.patch
new file mode 100644 (file)
index 0000000..bb41ef2
--- /dev/null
@@ -0,0 +1,34 @@
+From 3e27249c84beed1c79d767b350e52ad038db9053 Mon Sep 17 00:00:00 2001
+From: Rusty Russell <rusty@rustcorp.com.au>
+Date: Mon, 4 Jan 2010 19:26:14 +1030
+Subject: lguest: fix bug in setting guest GDT entry
+
+From: Rusty Russell <rusty@rustcorp.com.au>
+
+commit 3e27249c84beed1c79d767b350e52ad038db9053 upstream.
+
+We kill the guest, but then we blatt random stuff.
+
+Reported-by: Dan Carpenter <error27@gmail.com>
+Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/lguest/segments.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/lguest/segments.c
++++ b/drivers/lguest/segments.c
+@@ -179,8 +179,10 @@ void load_guest_gdt_entry(struct lg_cpu 
+        * We assume the Guest has the same number of GDT entries as the
+        * Host, otherwise we'd have to dynamically allocate the Guest GDT.
+        */
+-      if (num >= ARRAY_SIZE(cpu->arch.gdt))
++      if (num >= ARRAY_SIZE(cpu->arch.gdt)) {
+               kill_guest(cpu, "too many gdt entries %i", num);
++              return;
++      }
+       /* Set it up, then fix it. */
+       cpu->arch.gdt[num].a = lo;
diff --git a/queue-2.6.32/mac80211-fix-race-with-suspend-and-dynamic_ps_disable_work.patch b/queue-2.6.32/mac80211-fix-race-with-suspend-and-dynamic_ps_disable_work.patch
new file mode 100644 (file)
index 0000000..c35b760
--- /dev/null
@@ -0,0 +1,48 @@
+From b98c06b6debfe84c90200143bb1102f312f50a33 Mon Sep 17 00:00:00 2001
+From: Luis R. Rodriguez <lrodriguez@atheros.com>
+Date: Thu, 24 Dec 2009 15:26:09 -0500
+Subject: mac80211: fix race with suspend and dynamic_ps_disable_work
+
+From: Luis R. Rodriguez <lrodriguez@atheros.com>
+
+commit b98c06b6debfe84c90200143bb1102f312f50a33 upstream.
+
+When mac80211 suspends it calls a driver's suspend callback
+as a last step and after that the driver assumes no calls will
+be made to it until we resume and its start callback is kicked.
+If such calls are made, however, suspend can end up throwing
+hardware in an unexpected state and making the device unusable
+upon resume.
+
+Fix this by preventing mac80211 to schedule dynamic_ps_disable_work
+by checking for when mac80211 starts to suspend and starts
+quiescing. Frames should be allowed to go through though as
+that is part of the quiescing steps and we do not flush the
+mac80211 workqueue since it was already done towards the
+beginning of suspend cycle.
+
+The other mac80211 issue will be hanled in the next patch.
+
+For further details see refer to the thread:
+
+http://marc.info/?t=126144866100001&r=1&w=2
+
+Cc: stable@kernel.org
+Signed-off-by: Luis R. Rodriguez <lrodriguez@atheros.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/mac80211/tx.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -1401,6 +1401,7 @@ static void ieee80211_xmit(struct ieee80
+       if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
+           local->hw.conf.dynamic_ps_timeout > 0 &&
++          !local->quiescing &&
+           !(local->scanning) && local->ps_sdata) {
+               if (local->hw.conf.flags & IEEE80211_CONF_PS) {
+                       ieee80211_stop_queues_by_reason(&local->hw,
diff --git a/queue-2.6.32/memcg-avoid-oom-killing-innocent-task-in-case-of-use_hierarchy.patch b/queue-2.6.32/memcg-avoid-oom-killing-innocent-task-in-case-of-use_hierarchy.patch
new file mode 100644 (file)
index 0000000..66c1428
--- /dev/null
@@ -0,0 +1,63 @@
+From d31f56dbf8bafaacb0c617f9a6f137498d5c7aed Mon Sep 17 00:00:00 2001
+From: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
+Date: Tue, 15 Dec 2009 16:47:12 -0800
+Subject: memcg: avoid oom-killing innocent task in case of use_hierarchy
+
+From: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
+
+commit d31f56dbf8bafaacb0c617f9a6f137498d5c7aed upstream.
+
+task_in_mem_cgroup(), which is called by select_bad_process() to check
+whether a task can be a candidate for being oom-killed from memcg's limit,
+checks "curr->use_hierarchy"("curr" is the mem_cgroup the task belongs
+to).
+
+But this check return true(it's false positive) when:
+
+       <some path>/aa          use_hierarchy == 0      <- hitting limit
+         <some path>/aa/00     use_hierarchy == 1      <- the task belongs to
+
+This leads to killing an innocent task in aa/00.  This patch is a fix for
+this bug.  And this patch also fixes the arg for
+mem_cgroup_print_oom_info().  We should print information of mem_cgroup
+which the task being killed, not current, belongs to.
+
+Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
+Acked-by: Balbir Singh <balbir@linux.vnet.ibm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ mm/memcontrol.c |    8 +++++++-
+ mm/oom_kill.c   |    2 +-
+ 2 files changed, 8 insertions(+), 2 deletions(-)
+
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -758,7 +758,13 @@ int task_in_mem_cgroup(struct task_struc
+       task_unlock(task);
+       if (!curr)
+               return 0;
+-      if (curr->use_hierarchy)
++      /*
++       * We should check use_hierarchy of "mem" not "curr". Because checking
++       * use_hierarchy of "curr" here make this function true if hierarchy is
++       * enabled in "curr" and "curr" is a child of "mem" in *cgroup*
++       * hierarchy(even if use_hierarchy is disabled in "mem").
++       */
++      if (mem->use_hierarchy)
+               ret = css_is_ancestor(&curr->css, &mem->css);
+       else
+               ret = (curr == mem);
+--- a/mm/oom_kill.c
++++ b/mm/oom_kill.c
+@@ -404,7 +404,7 @@ static int oom_kill_process(struct task_
+               cpuset_print_task_mems_allowed(current);
+               task_unlock(current);
+               dump_stack();
+-              mem_cgroup_print_oom_info(mem, current);
++              mem_cgroup_print_oom_info(mem, p);
+               show_mem();
+               if (sysctl_oom_dump_tasks)
+                       dump_tasks(mem);
diff --git a/queue-2.6.32/netfilter-fix-crashes-in-bridge-netfilter-caused-by-fragment-jumps.patch b/queue-2.6.32/netfilter-fix-crashes-in-bridge-netfilter-caused-by-fragment-jumps.patch
new file mode 100644 (file)
index 0000000..5171991
--- /dev/null
@@ -0,0 +1,120 @@
+From 8fa9ff6849bb86c59cc2ea9faadf3cb2d5223497 Mon Sep 17 00:00:00 2001
+From: Patrick McHardy <kaber@trash.net>
+Date: Tue, 15 Dec 2009 16:59:59 +0100
+Subject: netfilter: fix crashes in bridge netfilter caused by fragment jumps
+
+From: Patrick McHardy <kaber@trash.net>
+
+commit 8fa9ff6849bb86c59cc2ea9faadf3cb2d5223497 upstream.
+
+When fragments from bridge netfilter are passed to IPv4 or IPv6 conntrack
+and a reassembly queue with the same fragment key already exists from
+reassembling a similar packet received on a different device (f.i. with
+multicasted fragments), the reassembled packet might continue on a different
+codepath than where the head fragment originated. This can cause crashes
+in bridge netfilter when a fragment received on a non-bridge device (and
+thus with skb->nf_bridge == NULL) continues through the bridge netfilter
+code.
+
+Add a new reassembly identifier for packets originating from bridge
+netfilter and use it to put those packets in insolated queues.
+
+Fixes http://bugzilla.kernel.org/show_bug.cgi?id=14805
+
+Reported-and-Tested-by: Chong Qiao <qiaochong@loongson.cn>
+Signed-off-by: Patrick McHardy <kaber@trash.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/net/ip.h                               |    1 +
+ include/net/ipv6.h                             |    1 +
+ net/ipv4/netfilter/nf_defrag_ipv4.c            |   21 +++++++++++++++++----
+ net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c |    6 ++++++
+ 4 files changed, 25 insertions(+), 4 deletions(-)
+
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -342,6 +342,7 @@ enum ip_defrag_users
+       IP_DEFRAG_CALL_RA_CHAIN,
+       IP_DEFRAG_CONNTRACK_IN,
+       IP_DEFRAG_CONNTRACK_OUT,
++      IP_DEFRAG_CONNTRACK_BRIDGE_IN,
+       IP_DEFRAG_VS_IN,
+       IP_DEFRAG_VS_OUT,
+       IP_DEFRAG_VS_FWD
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -358,6 +358,7 @@ enum ip6_defrag_users {
+       IP6_DEFRAG_LOCAL_DELIVER,
+       IP6_DEFRAG_CONNTRACK_IN,
+       IP6_DEFRAG_CONNTRACK_OUT,
++      IP6_DEFRAG_CONNTRACK_BRIDGE_IN,
+ };
+ struct ip6_create_arg {
+--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
++++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
+@@ -14,6 +14,7 @@
+ #include <net/route.h>
+ #include <net/ip.h>
++#include <linux/netfilter_bridge.h>
+ #include <linux/netfilter_ipv4.h>
+ #include <net/netfilter/ipv4/nf_defrag_ipv4.h>
+@@ -34,6 +35,20 @@ static int nf_ct_ipv4_gather_frags(struc
+       return err;
+ }
++static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
++                                            struct sk_buff *skb)
++{
++#ifdef CONFIG_BRIDGE_NETFILTER
++      if (skb->nf_bridge &&
++          skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
++              return IP_DEFRAG_CONNTRACK_BRIDGE_IN;
++#endif
++      if (hooknum == NF_INET_PRE_ROUTING)
++              return IP_DEFRAG_CONNTRACK_IN;
++      else
++              return IP_DEFRAG_CONNTRACK_OUT;
++}
++
+ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
+                                         struct sk_buff *skb,
+                                         const struct net_device *in,
+@@ -50,10 +65,8 @@ static unsigned int ipv4_conntrack_defra
+ #endif
+       /* Gather fragments. */
+       if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
+-              if (nf_ct_ipv4_gather_frags(skb,
+-                                          hooknum == NF_INET_PRE_ROUTING ?
+-                                          IP_DEFRAG_CONNTRACK_IN :
+-                                          IP_DEFRAG_CONNTRACK_OUT))
++              enum ip_defrag_users user = nf_ct_defrag_user(hooknum, skb);
++              if (nf_ct_ipv4_gather_frags(skb, user))
+                       return NF_STOLEN;
+       }
+       return NF_ACCEPT;
+--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
++++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+@@ -20,6 +20,7 @@
+ #include <net/ipv6.h>
+ #include <net/inet_frag.h>
++#include <linux/netfilter_bridge.h>
+ #include <linux/netfilter_ipv6.h>
+ #include <net/netfilter/nf_conntrack.h>
+ #include <net/netfilter/nf_conntrack_helper.h>
+@@ -190,6 +191,11 @@ out:
+ static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
+                                               struct sk_buff *skb)
+ {
++#ifdef CONFIG_BRIDGE_NETFILTER
++      if (skb->nf_bridge &&
++          skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
++              return IP6_DEFRAG_CONNTRACK_BRIDGE_IN;
++#endif
+       if (hooknum == NF_INET_PRE_ROUTING)
+               return IP6_DEFRAG_CONNTRACK_IN;
+       else
diff --git a/queue-2.6.32/nommu-optimise-away-the-dac_-mmap_min_addr-tests.patch b/queue-2.6.32/nommu-optimise-away-the-dac_-mmap_min_addr-tests.patch
new file mode 100644 (file)
index 0000000..a5c6fb7
--- /dev/null
@@ -0,0 +1,99 @@
+From 6e1415467614e854fee660ff6648bd10fa976e95 Mon Sep 17 00:00:00 2001
+From: David Howells <dhowells@redhat.com>
+Date: Tue, 15 Dec 2009 19:27:45 +0000
+Subject: NOMMU: Optimise away the {dac_,}mmap_min_addr tests
+
+From: David Howells <dhowells@redhat.com>
+
+commit 6e1415467614e854fee660ff6648bd10fa976e95 upstream.
+
+In NOMMU mode clamp dac_mmap_min_addr to zero to cause the tests on it to be
+skipped by the compiler.  We do this as the minimum mmap address doesn't make
+any sense in NOMMU mode.
+
+mmap_min_addr and round_hint_to_min() can be discarded entirely in NOMMU mode.
+
+Signed-off-by: David Howells <dhowells@redhat.com>
+Acked-by: Eric Paris <eparis@redhat.com>
+Signed-off-by: James Morris <jmorris@namei.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/linux/security.h |    7 +++++++
+ kernel/sysctl.c          |    2 ++
+ mm/Kconfig               |    1 +
+ security/Makefile        |    3 ++-
+ 4 files changed, 12 insertions(+), 1 deletion(-)
+
+--- a/include/linux/security.h
++++ b/include/linux/security.h
+@@ -95,8 +95,13 @@ struct seq_file;
+ extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
+ extern int cap_netlink_recv(struct sk_buff *skb, int cap);
++#ifdef CONFIG_MMU
+ extern unsigned long mmap_min_addr;
+ extern unsigned long dac_mmap_min_addr;
++#else
++#define dac_mmap_min_addr     0UL
++#endif
++
+ /*
+  * Values used in the task_security_ops calls
+  */
+@@ -121,6 +126,7 @@ struct request_sock;
+ #define LSM_UNSAFE_PTRACE     2
+ #define LSM_UNSAFE_PTRACE_CAP 4
++#ifdef CONFIG_MMU
+ /*
+  * If a hint addr is less than mmap_min_addr change hint to be as
+  * low as possible but still greater than mmap_min_addr
+@@ -135,6 +141,7 @@ static inline unsigned long round_hint_t
+ }
+ extern int mmap_min_addr_handler(struct ctl_table *table, int write,
+                                void __user *buffer, size_t *lenp, loff_t *ppos);
++#endif
+ #ifdef CONFIG_SECURITY
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -1200,6 +1200,7 @@ static struct ctl_table vm_table[] = {
+               .extra2         = (void *)&hugetlb_infinity,
+       },
+ #endif
++#ifdef CONFIG_MMU
+       {
+               .ctl_name       = VM_LOWMEM_RESERVE_RATIO,
+               .procname       = "lowmem_reserve_ratio",
+@@ -1353,6 +1354,7 @@ static struct ctl_table vm_table[] = {
+               .mode           = 0644,
+               .proc_handler   = &mmap_min_addr_handler,
+       },
++#endif
+ #ifdef CONFIG_NUMA
+       {
+               .ctl_name       = CTL_UNNUMBERED,
+--- a/mm/Kconfig
++++ b/mm/Kconfig
+@@ -227,6 +227,7 @@ config KSM
+ config DEFAULT_MMAP_MIN_ADDR
+         int "Low address space to protect from user allocation"
++      depends on MMU
+         default 4096
+         help
+         This is the portion of low virtual memory which should be protected
+--- a/security/Makefile
++++ b/security/Makefile
+@@ -8,7 +8,8 @@ subdir-$(CONFIG_SECURITY_SMACK)                += smac
+ subdir-$(CONFIG_SECURITY_TOMOYO)        += tomoyo
+ # always enable default capabilities
+-obj-y         += commoncap.o min_addr.o
++obj-y                                 += commoncap.o
++obj-$(CONFIG_MMU)                     += min_addr.o
+ # Object file lists
+ obj-$(CONFIG_SECURITY)                        += security.o capability.o
diff --git a/queue-2.6.32/orinoco-fix-gfp_kernel-in-orinoco_set_key-with-interrupts-disabled.patch b/queue-2.6.32/orinoco-fix-gfp_kernel-in-orinoco_set_key-with-interrupts-disabled.patch
new file mode 100644 (file)
index 0000000..7f4aba4
--- /dev/null
@@ -0,0 +1,84 @@
+From 5b0691508aa99d309101a49b4b084dc16b3d7019 Mon Sep 17 00:00:00 2001
+From: Andrey Borzenkov <arvidjaar@mail.ru>
+Date: Tue, 22 Dec 2009 21:38:44 +0300
+Subject: orinoco: fix GFP_KERNEL in orinoco_set_key with interrupts disabled
+
+From: Andrey Borzenkov <arvidjaar@mail.ru>
+
+commit 5b0691508aa99d309101a49b4b084dc16b3d7019 upstream.
+
+orinoco_set_key is called from two places both with interrupts disabled
+(under orinoco_lock). Use GFP_ATOMIC instead of GFP_KERNEL. Fixes following
+warning:
+
+[   77.254109] WARNING: at /home/bor/src/linux-git/kernel/lockdep.c:2465 lockdep_trace_alloc+0x9a/0xa0()
+[   77.254109] Hardware name: PORTEGE 4000
+[   77.254109] Modules linked in: af_packet irnet ppp_generic slhc ircomm_tty ircomm binfmt_misc dm_mirror dm_region_hash dm_log dm_round_robin dm_multipath dm_mod loop nvram toshiba cryptomgr aead pcompress crypto_blkcipher michael_mic crypto_hash crypto_algapi orinoco_cs orinoco cfg80211 smsc_ircc2 pcmcia irda toshiba_acpi yenta_socket video i2c_ali1535 backlight rsrc_nonstatic ali_agp pcmcia_core psmouse output crc_ccitt i2c_core alim1535_wdt rfkill sg evdev ohci_hcd agpgart usbcore pata_ali libata reiserfs [last unloaded: scsi_wait_scan]
+[   77.254109] Pid: 2296, comm: wpa_supplicant Not tainted 2.6.32-1avb #1
+[   77.254109] Call Trace:
+[   77.254109]  [<c011f0ad>] warn_slowpath_common+0x6d/0xa0
+[   77.254109]  [<c014206a>] ? lockdep_trace_alloc+0x9a/0xa0
+[   77.254109]  [<c014206a>] ? lockdep_trace_alloc+0x9a/0xa0
+[   77.254109]  [<c011f0f5>] warn_slowpath_null+0x15/0x20
+[   77.254109]  [<c014206a>] lockdep_trace_alloc+0x9a/0xa0
+[   77.254109]  [<c018d296>] __kmalloc+0x36/0x130
+[   77.254109]  [<dffcb6a8>] ? orinoco_set_key+0x48/0x1c0 [orinoco]
+[   77.254109]  [<dffcb6a8>] orinoco_set_key+0x48/0x1c0 [orinoco]
+[   77.254109]  [<dffcb9fc>] orinoco_ioctl_set_encodeext+0x1dc/0x2d0 [orinoco]
+[   77.254109]  [<c035b117>] ioctl_standard_call+0x207/0x3b0
+[   77.254109]  [<dffcb820>] ? orinoco_ioctl_set_encodeext+0x0/0x2d0 [orinoco]
+[   77.254109]  [<c0307f1f>] ? rtnl_lock+0xf/0x20
+[   77.254109]  [<c0307f1f>] ? rtnl_lock+0xf/0x20
+[   77.254109]  [<c02fb115>] ? __dev_get_by_name+0x85/0xb0
+[   77.254109]  [<c035b616>] wext_handle_ioctl+0x176/0x200
+[   77.254109]  [<dffcb820>] ? orinoco_ioctl_set_encodeext+0x0/0x2d0 [orinoco]
+[   77.254109]  [<c030020f>] dev_ioctl+0x6af/0x730
+[   77.254109]  [<c02eec65>] ? move_addr_to_kernel+0x55/0x60
+[   77.254109]  [<c02eed59>] ? sys_sendto+0xe9/0x130
+[   77.254109]  [<c02ed77e>] sock_ioctl+0x7e/0x250
+[   77.254109]  [<c02ed700>] ? sock_ioctl+0x0/0x250
+[   77.254109]  [<c019cf4c>] vfs_ioctl+0x1c/0x70
+[   77.254109]  [<c019d1fa>] do_vfs_ioctl+0x6a/0x590
+[   77.254109]  [<c0178e50>] ? might_fault+0x90/0xa0
+[   77.254109]  [<c0178e0a>] ? might_fault+0x4a/0xa0
+[   77.254109]  [<c02ef90e>] ? sys_socketcall+0x17e/0x280
+[   77.254109]  [<c019d759>] sys_ioctl+0x39/0x60
+[   77.254109]  [<c0102e3b>] sysenter_do_call+0x12/0x32
+[   77.254109] ---[ end trace 95ef563548d21efd ]---
+
+Signed-off-by: Andrey Borzenkov <arvidjaar@mail.ru>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/wireless/orinoco/wext.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/wireless/orinoco/wext.c
++++ b/drivers/net/wireless/orinoco/wext.c
+@@ -23,7 +23,7 @@
+ #define MAX_RID_LEN 1024
+ /* Helper routine to record keys
+- * Do not call from interrupt context */
++ * It is called under orinoco_lock so it may not sleep */
+ static int orinoco_set_key(struct orinoco_private *priv, int index,
+                          enum orinoco_alg alg, const u8 *key, int key_len,
+                          const u8 *seq, int seq_len)
+@@ -32,14 +32,14 @@ static int orinoco_set_key(struct orinoc
+       kzfree(priv->keys[index].seq);
+       if (key_len) {
+-              priv->keys[index].key = kzalloc(key_len, GFP_KERNEL);
++              priv->keys[index].key = kzalloc(key_len, GFP_ATOMIC);
+               if (!priv->keys[index].key)
+                       goto nomem;
+       } else
+               priv->keys[index].key = NULL;
+       if (seq_len) {
+-              priv->keys[index].seq = kzalloc(seq_len, GFP_KERNEL);
++              priv->keys[index].seq = kzalloc(seq_len, GFP_ATOMIC);
+               if (!priv->keys[index].seq)
+                       goto free_key;
+       } else
diff --git a/queue-2.6.32/quota-decouple-fs-reserved-space-from-quota-reservation.patch b/queue-2.6.32/quota-decouple-fs-reserved-space-from-quota-reservation.patch
new file mode 100644 (file)
index 0000000..d6992a0
--- /dev/null
@@ -0,0 +1,382 @@
+From fd8fbfc1709822bd94247c5b2ab15a5f5041e103 Mon Sep 17 00:00:00 2001
+From: Dmitry Monakhov <dmonakhov@openvz.org>
+Date: Mon, 14 Dec 2009 15:21:13 +0300
+Subject: quota: decouple fs reserved space from quota reservation
+
+From: Dmitry Monakhov <dmonakhov@openvz.org>
+
+commit fd8fbfc1709822bd94247c5b2ab15a5f5041e103 upstream.
+
+Currently inode_reservation is managed by fs itself and this
+reservation is transfered on dquot_transfer(). This means what
+inode_reservation must always be in sync with
+dquot->dq_dqb.dqb_rsvspace. Otherwise dquot_transfer() will result
+in incorrect quota(WARN_ON in dquot_claim_reserved_space() will be
+triggered)
+This is not easy because of complex locking order issues
+for example http://bugzilla.kernel.org/show_bug.cgi?id=14739
+
+The patch introduce quota reservation field for each fs-inode
+(fs specific inode is used in order to prevent bloating generic
+vfs inode). This reservation is managed by quota code internally
+similar to i_blocks/i_bytes and may not be always in sync with
+internal fs reservation.
+
+Also perform some code rearrangement:
+- Unify dquot_reserve_space() and dquot_reserve_space()
+- Unify dquot_release_reserved_space() and dquot_free_space()
+- Also this patch add missing warning update to release_rsv()
+  dquot_release_reserved_space() must call flush_warnings() as
+  dquot_free_space() does.
+
+Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/quota/dquot.c      |  213 +++++++++++++++++++++++++++-----------------------
+ include/linux/quota.h |    5 -
+ 2 files changed, 122 insertions(+), 96 deletions(-)
+
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -1388,6 +1388,67 @@ void vfs_dq_drop(struct inode *inode)
+ EXPORT_SYMBOL(vfs_dq_drop);
+ /*
++ * inode_reserved_space is managed internally by quota, and protected by
++ * i_lock similar to i_blocks+i_bytes.
++ */
++static qsize_t *inode_reserved_space(struct inode * inode)
++{
++      /* Filesystem must explicitly define it's own method in order to use
++       * quota reservation interface */
++      BUG_ON(!inode->i_sb->dq_op->get_reserved_space);
++      return inode->i_sb->dq_op->get_reserved_space(inode);
++}
++
++static void inode_add_rsv_space(struct inode *inode, qsize_t number)
++{
++      spin_lock(&inode->i_lock);
++      *inode_reserved_space(inode) += number;
++      spin_unlock(&inode->i_lock);
++}
++
++
++static void inode_claim_rsv_space(struct inode *inode, qsize_t number)
++{
++      spin_lock(&inode->i_lock);
++      *inode_reserved_space(inode) -= number;
++      __inode_add_bytes(inode, number);
++      spin_unlock(&inode->i_lock);
++}
++
++static void inode_sub_rsv_space(struct inode *inode, qsize_t number)
++{
++      spin_lock(&inode->i_lock);
++      *inode_reserved_space(inode) -= number;
++      spin_unlock(&inode->i_lock);
++}
++
++static qsize_t inode_get_rsv_space(struct inode *inode)
++{
++      qsize_t ret;
++      spin_lock(&inode->i_lock);
++      ret = *inode_reserved_space(inode);
++      spin_unlock(&inode->i_lock);
++      return ret;
++}
++
++static void inode_incr_space(struct inode *inode, qsize_t number,
++                              int reserve)
++{
++      if (reserve)
++              inode_add_rsv_space(inode, number);
++      else
++              inode_add_bytes(inode, number);
++}
++
++static void inode_decr_space(struct inode *inode, qsize_t number, int reserve)
++{
++      if (reserve)
++              inode_sub_rsv_space(inode, number);
++      else
++              inode_sub_bytes(inode, number);
++}
++
++/*
+  * Following four functions update i_blocks+i_bytes fields and
+  * quota information (together with appropriate checks)
+  * NOTE: We absolutely rely on the fact that caller dirties
+@@ -1405,6 +1466,21 @@ int __dquot_alloc_space(struct inode *in
+       int cnt, ret = QUOTA_OK;
+       char warntype[MAXQUOTAS];
++      /*
++       * First test before acquiring mutex - solves deadlocks when we
++       * re-enter the quota code and are already holding the mutex
++       */
++      if (IS_NOQUOTA(inode)) {
++              inode_incr_space(inode, number, reserve);
++              goto out;
++      }
++
++      down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
++      if (IS_NOQUOTA(inode)) {
++              inode_incr_space(inode, number, reserve);
++              goto out_unlock;
++      }
++
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+               warntype[cnt] = QUOTA_NL_NOWARN;
+@@ -1415,7 +1491,8 @@ int __dquot_alloc_space(struct inode *in
+               if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt)
+                   == NO_QUOTA) {
+                       ret = NO_QUOTA;
+-                      goto out_unlock;
++                      spin_unlock(&dq_data_lock);
++                      goto out_flush_warn;
+               }
+       }
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+@@ -1426,64 +1503,32 @@ int __dquot_alloc_space(struct inode *in
+               else
+                       dquot_incr_space(inode->i_dquot[cnt], number);
+       }
+-      if (!reserve)
+-              inode_add_bytes(inode, number);
+-out_unlock:
++      inode_incr_space(inode, number, reserve);
+       spin_unlock(&dq_data_lock);
+-      flush_warnings(inode->i_dquot, warntype);
+-      return ret;
+-}
+-
+-int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
+-{
+-      int cnt, ret = QUOTA_OK;
+-
+-      /*
+-       * First test before acquiring mutex - solves deadlocks when we
+-       * re-enter the quota code and are already holding the mutex
+-       */
+-      if (IS_NOQUOTA(inode)) {
+-              inode_add_bytes(inode, number);
+-              goto out;
+-      }
+-
+-      down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+-      if (IS_NOQUOTA(inode)) {
+-              inode_add_bytes(inode, number);
+-              goto out_unlock;
+-      }
+-
+-      ret = __dquot_alloc_space(inode, number, warn, 0);
+-      if (ret == NO_QUOTA)
+-              goto out_unlock;
++      if (reserve)
++              goto out_flush_warn;
+       /* Dirtify all the dquots - this can block when journalling */
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+               if (inode->i_dquot[cnt])
+                       mark_dquot_dirty(inode->i_dquot[cnt]);
++out_flush_warn:
++      flush_warnings(inode->i_dquot, warntype);
+ out_unlock:
+       up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ out:
+       return ret;
+ }
++
++int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
++{
++      return __dquot_alloc_space(inode, number, warn, 0);
++}
+ EXPORT_SYMBOL(dquot_alloc_space);
+ int dquot_reserve_space(struct inode *inode, qsize_t number, int warn)
+ {
+-      int ret = QUOTA_OK;
+-
+-      if (IS_NOQUOTA(inode))
+-              goto out;
+-
+-      down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+-      if (IS_NOQUOTA(inode))
+-              goto out_unlock;
+-
+-      ret = __dquot_alloc_space(inode, number, warn, 1);
+-out_unlock:
+-      up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+-out:
+-      return ret;
++      return __dquot_alloc_space(inode, number, warn, 1);
+ }
+ EXPORT_SYMBOL(dquot_reserve_space);
+@@ -1540,14 +1585,14 @@ int dquot_claim_space(struct inode *inod
+       int ret = QUOTA_OK;
+       if (IS_NOQUOTA(inode)) {
+-              inode_add_bytes(inode, number);
++              inode_claim_rsv_space(inode, number);
+               goto out;
+       }
+       down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+       if (IS_NOQUOTA(inode))  {
+               up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+-              inode_add_bytes(inode, number);
++              inode_claim_rsv_space(inode, number);
+               goto out;
+       }
+@@ -1559,7 +1604,7 @@ int dquot_claim_space(struct inode *inod
+                                                       number);
+       }
+       /* Update inode bytes */
+-      inode_add_bytes(inode, number);
++      inode_claim_rsv_space(inode, number);
+       spin_unlock(&dq_data_lock);
+       /* Dirtify all the dquots - this can block when journalling */
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+@@ -1572,38 +1617,9 @@ out:
+ EXPORT_SYMBOL(dquot_claim_space);
+ /*
+- * Release reserved quota space
+- */
+-void dquot_release_reserved_space(struct inode *inode, qsize_t number)
+-{
+-      int cnt;
+-
+-      if (IS_NOQUOTA(inode))
+-              goto out;
+-
+-      down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+-      if (IS_NOQUOTA(inode))
+-              goto out_unlock;
+-
+-      spin_lock(&dq_data_lock);
+-      /* Release reserved dquots */
+-      for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+-              if (inode->i_dquot[cnt])
+-                      dquot_free_reserved_space(inode->i_dquot[cnt], number);
+-      }
+-      spin_unlock(&dq_data_lock);
+-
+-out_unlock:
+-      up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+-out:
+-      return;
+-}
+-EXPORT_SYMBOL(dquot_release_reserved_space);
+-
+-/*
+  * This operation can block, but only after everything is updated
+  */
+-int dquot_free_space(struct inode *inode, qsize_t number)
++int __dquot_free_space(struct inode *inode, qsize_t number, int reserve)
+ {
+       unsigned int cnt;
+       char warntype[MAXQUOTAS];
+@@ -1612,7 +1628,7 @@ int dquot_free_space(struct inode *inode
+          * re-enter the quota code and are already holding the mutex */
+       if (IS_NOQUOTA(inode)) {
+ out_sub:
+-              inode_sub_bytes(inode, number);
++              inode_decr_space(inode, number, reserve);
+               return QUOTA_OK;
+       }
+@@ -1627,21 +1643,43 @@ out_sub:
+               if (!inode->i_dquot[cnt])
+                       continue;
+               warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number);
+-              dquot_decr_space(inode->i_dquot[cnt], number);
++              if (reserve)
++                      dquot_free_reserved_space(inode->i_dquot[cnt], number);
++              else
++                      dquot_decr_space(inode->i_dquot[cnt], number);
+       }
+-      inode_sub_bytes(inode, number);
++      inode_decr_space(inode, number, reserve);
+       spin_unlock(&dq_data_lock);
++
++      if (reserve)
++              goto out_unlock;
+       /* Dirtify all the dquots - this can block when journalling */
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+               if (inode->i_dquot[cnt])
+                       mark_dquot_dirty(inode->i_dquot[cnt]);
++out_unlock:
+       flush_warnings(inode->i_dquot, warntype);
+       up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+       return QUOTA_OK;
+ }
++
++int dquot_free_space(struct inode *inode, qsize_t number)
++{
++      return  __dquot_free_space(inode, number, 0);
++}
+ EXPORT_SYMBOL(dquot_free_space);
+ /*
++ * Release reserved quota space
++ */
++void dquot_release_reserved_space(struct inode *inode, qsize_t number)
++{
++      __dquot_free_space(inode, number, 1);
++
++}
++EXPORT_SYMBOL(dquot_release_reserved_space);
++
++/*
+  * This operation can block, but only after everything is updated
+  */
+ int dquot_free_inode(const struct inode *inode, qsize_t number)
+@@ -1679,19 +1717,6 @@ int dquot_free_inode(const struct inode 
+ EXPORT_SYMBOL(dquot_free_inode);
+ /*
+- * call back function, get reserved quota space from underlying fs
+- */
+-qsize_t dquot_get_reserved_space(struct inode *inode)
+-{
+-      qsize_t reserved_space = 0;
+-
+-      if (sb_any_quota_active(inode->i_sb) &&
+-          inode->i_sb->dq_op->get_reserved_space)
+-              reserved_space = inode->i_sb->dq_op->get_reserved_space(inode);
+-      return reserved_space;
+-}
+-
+-/*
+  * Transfer the number of inode and blocks from one diskquota to an other.
+  *
+  * This operation can block, but only after everything is updated
+@@ -1734,7 +1759,7 @@ int dquot_transfer(struct inode *inode, 
+       }
+       spin_lock(&dq_data_lock);
+       cur_space = inode_get_bytes(inode);
+-      rsv_space = dquot_get_reserved_space(inode);
++      rsv_space = inode_get_rsv_space(inode);
+       space = cur_space + rsv_space;
+       /* Build the transfer_from list and check the limits */
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+--- a/include/linux/quota.h
++++ b/include/linux/quota.h
+@@ -313,8 +313,9 @@ struct dquot_operations {
+       int (*claim_space) (struct inode *, qsize_t);
+       /* release rsved quota for delayed alloc */
+       void (*release_rsv) (struct inode *, qsize_t);
+-      /* get reserved quota for delayed alloc */
+-      qsize_t (*get_reserved_space) (struct inode *);
++      /* get reserved quota for delayed alloc, value returned is managed by
++       * quota code only */
++      qsize_t *(*get_reserved_space) (struct inode *);
+ };
+ /* Operations handling requests from userspace */
diff --git a/queue-2.6.32/s390-dasd-support-diag-access-for-read-only-devices.patch b/queue-2.6.32/s390-dasd-support-diag-access-for-read-only-devices.patch
new file mode 100644 (file)
index 0000000..c25feb3
--- /dev/null
@@ -0,0 +1,72 @@
+From 22825ab7693fd29769518a0d25ba43c01a50092a Mon Sep 17 00:00:00 2001
+From: Stefan Weinhuber <wein@de.ibm.com>
+Date: Mon, 7 Dec 2009 12:51:48 +0100
+Subject: S390: dasd: support DIAG access for read-only devices
+
+From: Stefan Weinhuber <wein@de.ibm.com>
+
+commit 22825ab7693fd29769518a0d25ba43c01a50092a upstream.
+
+When a DASD device is used with the DIAG discipline, the DIAG
+initialization will indicate success or error with a respective
+return code. So far we have interpreted a return code of 4 as error,
+but it actually means that the initialization was successful, but
+the device is read-only. To allow read-only devices to be used with
+DIAG we need to accept a return code of 4 as success.
+
+Re-initialization of the DIAG access is also part of the DIAG error
+recovery. If we find that the access mode of a device has been
+changed from writable to read-only while the device was in use,
+we print an error message.
+
+Signed-off-by: Stefan Weinhuber <wein@de.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Cc: Stephen Powell <zlinuxman@wowway.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/s390/block/dasd_diag.c |   19 ++++++++++++++++---
+ 1 file changed, 16 insertions(+), 3 deletions(-)
+
+--- a/drivers/s390/block/dasd_diag.c
++++ b/drivers/s390/block/dasd_diag.c
+@@ -145,6 +145,15 @@ dasd_diag_erp(struct dasd_device *device
+       mdsk_term_io(device);
+       rc = mdsk_init_io(device, device->block->bp_block, 0, NULL);
++      if (rc == 4) {
++              if (!(device->features & DASD_FEATURE_READONLY)) {
++                      dev_warn(&device->cdev->dev,
++                               "The access mode of a DIAG device changed"
++                               " to read-only");
++                      device->features |= DASD_FEATURE_READONLY;
++              }
++              rc = 0;
++      }
+       if (rc)
+               dev_warn(&device->cdev->dev, "DIAG ERP failed with "
+                           "rc=%d\n", rc);
+@@ -433,16 +442,20 @@ dasd_diag_check_device(struct dasd_devic
+       for (sb = 512; sb < bsize; sb = sb << 1)
+               block->s2b_shift++;
+       rc = mdsk_init_io(device, block->bp_block, 0, NULL);
+-      if (rc) {
++      if (rc && (rc != 4)) {
+               dev_warn(&device->cdev->dev, "DIAG initialization "
+                       "failed with rc=%d\n", rc);
+               rc = -EIO;
+       } else {
++              if (rc == 4)
++                      device->features |= DASD_FEATURE_READONLY;
+               dev_info(&device->cdev->dev,
+-                       "New DASD with %ld byte/block, total size %ld KB\n",
++                       "New DASD with %ld byte/block, total size %ld KB%s\n",
+                        (unsigned long) block->bp_block,
+                        (unsigned long) (block->blocks <<
+-                                        block->s2b_shift) >> 1);
++                                        block->s2b_shift) >> 1,
++                       (rc == 4) ? ", read-only device" : "");
++              rc = 0;
+       }
+ out_label:
+       free_page((long) label);
diff --git a/queue-2.6.32/sched-fix-balance-vs-hotplug-race.patch b/queue-2.6.32/sched-fix-balance-vs-hotplug-race.patch
new file mode 100644 (file)
index 0000000..d4d5302
--- /dev/null
@@ -0,0 +1,297 @@
+From 6ad4c18884e864cf4c77f9074d3d1816063f99cd Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Date: Wed, 25 Nov 2009 13:31:39 +0100
+Subject: sched: Fix balance vs hotplug race
+
+From: Peter Zijlstra <a.p.zijlstra@chello.nl>
+
+commit 6ad4c18884e864cf4c77f9074d3d1816063f99cd upstream.
+
+Since (e761b77: cpu hotplug, sched: Introduce cpu_active_map and redo
+sched domain managment) we have cpu_active_mask which is suppose to rule
+scheduler migration and load-balancing, except it never (fully) did.
+
+The particular problem being solved here is a crash in try_to_wake_up()
+where select_task_rq() ends up selecting an offline cpu because
+select_task_rq_fair() trusts the sched_domain tree to reflect the
+current state of affairs, similarly select_task_rq_rt() trusts the
+root_domain.
+
+However, the sched_domains are updated from CPU_DEAD, which is after the
+cpu is taken offline and after stop_machine is done. Therefore it can
+race perfectly well with code assuming the domains are right.
+
+Cure this by building the domains from cpu_active_mask on
+CPU_DOWN_PREPARE.
+
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+LKML-Reference: <new-submission>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Cc: Mike Galbraith <efault@gmx.de>
+Cc: Holger Hoffstätte <holger.hoffstaette@googlemail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/linux/cpumask.h |    2 ++
+ kernel/cpu.c            |   18 +++++++++++++-----
+ kernel/cpuset.c         |   16 +++++++++-------
+ kernel/sched.c          |   32 +++++++++++++++++---------------
+ 4 files changed, 41 insertions(+), 27 deletions(-)
+
+--- a/include/linux/cpumask.h
++++ b/include/linux/cpumask.h
+@@ -84,6 +84,7 @@ extern const struct cpumask *const cpu_a
+ #define num_online_cpus()     cpumask_weight(cpu_online_mask)
+ #define num_possible_cpus()   cpumask_weight(cpu_possible_mask)
+ #define num_present_cpus()    cpumask_weight(cpu_present_mask)
++#define num_active_cpus()     cpumask_weight(cpu_active_mask)
+ #define cpu_online(cpu)               cpumask_test_cpu((cpu), cpu_online_mask)
+ #define cpu_possible(cpu)     cpumask_test_cpu((cpu), cpu_possible_mask)
+ #define cpu_present(cpu)      cpumask_test_cpu((cpu), cpu_present_mask)
+@@ -92,6 +93,7 @@ extern const struct cpumask *const cpu_a
+ #define num_online_cpus()     1
+ #define num_possible_cpus()   1
+ #define num_present_cpus()    1
++#define num_active_cpus()     1
+ #define cpu_online(cpu)               ((cpu) == 0)
+ #define cpu_possible(cpu)     ((cpu) == 0)
+ #define cpu_present(cpu)      ((cpu) == 0)
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -212,6 +212,8 @@ static int __ref _cpu_down(unsigned int 
+       err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
+                                       hcpu, -1, &nr_calls);
+       if (err == NOTIFY_BAD) {
++              set_cpu_active(cpu, true);
++
+               nr_calls--;
+               __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
+                                         hcpu, nr_calls, NULL);
+@@ -223,11 +225,11 @@ static int __ref _cpu_down(unsigned int 
+       /* Ensure that we are not runnable on dying cpu */
+       cpumask_copy(old_allowed, &current->cpus_allowed);
+-      set_cpus_allowed_ptr(current,
+-                           cpumask_of(cpumask_any_but(cpu_online_mask, cpu)));
++      set_cpus_allowed_ptr(current, cpu_active_mask);
+       err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
+       if (err) {
++              set_cpu_active(cpu, true);
+               /* CPU didn't die: tell everyone.  Can't complain. */
+               if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
+                                           hcpu) == NOTIFY_BAD)
+@@ -292,9 +294,6 @@ int __ref cpu_down(unsigned int cpu)
+       err = _cpu_down(cpu, 0);
+-      if (cpu_online(cpu))
+-              set_cpu_active(cpu, true);
+-
+ out:
+       cpu_maps_update_done();
+       stop_machine_destroy();
+@@ -387,6 +386,15 @@ int disable_nonboot_cpus(void)
+        * with the userspace trying to use the CPU hotplug at the same time
+        */
+       cpumask_clear(frozen_cpus);
++
++      for_each_online_cpu(cpu) {
++              if (cpu == first_cpu)
++                      continue;
++              set_cpu_active(cpu, false);
++      }
++
++      synchronize_sched();
++
+       printk("Disabling non-boot CPUs ...\n");
+       for_each_online_cpu(cpu) {
+               if (cpu == first_cpu)
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -873,7 +873,7 @@ static int update_cpumask(struct cpuset 
+               if (retval < 0)
+                       return retval;
+-              if (!cpumask_subset(trialcs->cpus_allowed, cpu_online_mask))
++              if (!cpumask_subset(trialcs->cpus_allowed, cpu_active_mask))
+                       return -EINVAL;
+       }
+       retval = validate_change(cs, trialcs);
+@@ -2011,7 +2011,7 @@ static void scan_for_empty_cpusets(struc
+               }
+               /* Continue past cpusets with all cpus, mems online */
+-              if (cpumask_subset(cp->cpus_allowed, cpu_online_mask) &&
++              if (cpumask_subset(cp->cpus_allowed, cpu_active_mask) &&
+                   nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY]))
+                       continue;
+@@ -2020,7 +2020,7 @@ static void scan_for_empty_cpusets(struc
+               /* Remove offline cpus and mems from this cpuset. */
+               mutex_lock(&callback_mutex);
+               cpumask_and(cp->cpus_allowed, cp->cpus_allowed,
+-                          cpu_online_mask);
++                          cpu_active_mask);
+               nodes_and(cp->mems_allowed, cp->mems_allowed,
+                                               node_states[N_HIGH_MEMORY]);
+               mutex_unlock(&callback_mutex);
+@@ -2058,8 +2058,10 @@ static int cpuset_track_online_cpus(stru
+       switch (phase) {
+       case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
+-      case CPU_DEAD:
+-      case CPU_DEAD_FROZEN:
++      case CPU_DOWN_PREPARE:
++      case CPU_DOWN_PREPARE_FROZEN:
++      case CPU_DOWN_FAILED:
++      case CPU_DOWN_FAILED_FROZEN:
+               break;
+       default:
+@@ -2068,7 +2070,7 @@ static int cpuset_track_online_cpus(stru
+       cgroup_lock();
+       mutex_lock(&callback_mutex);
+-      cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask);
++      cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
+       mutex_unlock(&callback_mutex);
+       scan_for_empty_cpusets(&top_cpuset);
+       ndoms = generate_sched_domains(&doms, &attr);
+@@ -2115,7 +2117,7 @@ static int cpuset_track_online_nodes(str
+ void __init cpuset_init_smp(void)
+ {
+-      cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask);
++      cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
+       top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
+       hotcpu_notifier(cpuset_track_online_cpus, 0);
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -4139,7 +4139,7 @@ static int load_balance(int this_cpu, st
+       unsigned long flags;
+       struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
+-      cpumask_copy(cpus, cpu_online_mask);
++      cpumask_copy(cpus, cpu_active_mask);
+       /*
+        * When power savings policy is enabled for the parent domain, idle
+@@ -4302,7 +4302,7 @@ load_balance_newidle(int this_cpu, struc
+       int all_pinned = 0;
+       struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
+-      cpumask_copy(cpus, cpu_online_mask);
++      cpumask_copy(cpus, cpu_active_mask);
+       /*
+        * When power savings policy is enabled for the parent domain, idle
+@@ -4699,7 +4699,7 @@ int select_nohz_load_balancer(int stop_t
+               cpumask_set_cpu(cpu, nohz.cpu_mask);
+               /* time for ilb owner also to sleep */
+-              if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) {
++              if (cpumask_weight(nohz.cpu_mask) == num_active_cpus()) {
+                       if (atomic_read(&nohz.load_balancer) == cpu)
+                               atomic_set(&nohz.load_balancer, -1);
+                       return 0;
+@@ -7075,7 +7075,7 @@ int set_cpus_allowed_ptr(struct task_str
+       int ret = 0;
+       rq = task_rq_lock(p, &flags);
+-      if (!cpumask_intersects(new_mask, cpu_online_mask)) {
++      if (!cpumask_intersects(new_mask, cpu_active_mask)) {
+               ret = -EINVAL;
+               goto out;
+       }
+@@ -7097,7 +7097,7 @@ int set_cpus_allowed_ptr(struct task_str
+       if (cpumask_test_cpu(task_cpu(p), new_mask))
+               goto out;
+-      if (migrate_task(p, cpumask_any_and(cpu_online_mask, new_mask), &req)) {
++      if (migrate_task(p, cpumask_any_and(cpu_active_mask, new_mask), &req)) {
+               /* Need help from migration thread: drop lock and wait. */
+               struct task_struct *mt = rq->migration_thread;
+@@ -7251,19 +7251,19 @@ static void move_task_off_dead_cpu(int d
+ again:
+       /* Look for allowed, online CPU in same node. */
+-      for_each_cpu_and(dest_cpu, nodemask, cpu_online_mask)
++      for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
+               if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
+                       goto move;
+       /* Any allowed, online CPU? */
+-      dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_online_mask);
++      dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
+       if (dest_cpu < nr_cpu_ids)
+               goto move;
+       /* No more Mr. Nice Guy. */
+       if (dest_cpu >= nr_cpu_ids) {
+               cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
+-              dest_cpu = cpumask_any_and(cpu_online_mask, &p->cpus_allowed);
++              dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
+               /*
+                * Don't tell them about moving exiting tasks or
+@@ -7292,7 +7292,7 @@ move:
+  */
+ static void migrate_nr_uninterruptible(struct rq *rq_src)
+ {
+-      struct rq *rq_dest = cpu_rq(cpumask_any(cpu_online_mask));
++      struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
+       unsigned long flags;
+       local_irq_save(flags);
+@@ -7546,7 +7546,7 @@ static ctl_table *sd_alloc_ctl_cpu_table
+ static struct ctl_table_header *sd_sysctl_header;
+ static void register_sched_domain_sysctl(void)
+ {
+-      int i, cpu_num = num_online_cpus();
++      int i, cpu_num = num_possible_cpus();
+       struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
+       char buf[32];
+@@ -7556,7 +7556,7 @@ static void register_sched_domain_sysctl
+       if (entry == NULL)
+               return;
+-      for_each_online_cpu(i) {
++      for_each_possible_cpu(i) {
+               snprintf(buf, 32, "cpu%d", i);
+               entry->procname = kstrdup(buf, GFP_KERNEL);
+               entry->mode = 0555;
+@@ -9042,7 +9042,7 @@ match1:
+       if (doms_new == NULL) {
+               ndoms_cur = 0;
+               doms_new = fallback_doms;
+-              cpumask_andnot(&doms_new[0], cpu_online_mask, cpu_isolated_map);
++              cpumask_andnot(&doms_new[0], cpu_active_mask, cpu_isolated_map);
+               WARN_ON_ONCE(dattr_new);
+       }
+@@ -9173,8 +9173,10 @@ static int update_sched_domains(struct n
+       switch (action) {
+       case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
+-      case CPU_DEAD:
+-      case CPU_DEAD_FROZEN:
++      case CPU_DOWN_PREPARE:
++      case CPU_DOWN_PREPARE_FROZEN:
++      case CPU_DOWN_FAILED:
++      case CPU_DOWN_FAILED_FROZEN:
+               partition_sched_domains(1, NULL, NULL);
+               return NOTIFY_OK;
+@@ -9221,7 +9223,7 @@ void __init sched_init_smp(void)
+ #endif
+       get_online_cpus();
+       mutex_lock(&sched_domains_mutex);
+-      arch_init_sched_domains(cpu_online_mask);
++      arch_init_sched_domains(cpu_active_mask);
+       cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
+       if (cpumask_empty(non_isolated_cpus))
+               cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
diff --git a/queue-2.6.32/sched-sched_rt_periodic_timer-vs-cpu-hotplug.patch b/queue-2.6.32/sched-sched_rt_periodic_timer-vs-cpu-hotplug.patch
new file mode 100644 (file)
index 0000000..bea3d45
--- /dev/null
@@ -0,0 +1,43 @@
+From 047106adcc85e3023da210143a6ab8a55df9e0fc Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Date: Mon, 16 Nov 2009 10:28:09 +0100
+Subject: sched: Sched_rt_periodic_timer vs cpu hotplug
+
+From: Peter Zijlstra <a.p.zijlstra@chello.nl>
+
+commit 047106adcc85e3023da210143a6ab8a55df9e0fc upstream.
+
+Heiko reported a case where a timer interrupt managed to
+reference a root_domain structure that was already freed by a
+concurrent hot-un-plug operation.
+
+Solve this like the regular sched_domain stuff is also
+synchronized, by adding a synchronize_sched() stmt to the free
+path, this ensures that a root_domain stays present for any
+atomic section that could have observed it.
+
+Reported-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Acked-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Cc: Gregory Haskins <ghaskins@novell.com>
+Cc: Siddha Suresh B <suresh.b.siddha@intel.com>
+Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
+LKML-Reference: <1258363873.26714.83.camel@laptop>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/sched.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -7925,6 +7925,8 @@ sd_parent_degenerate(struct sched_domain
+ static void free_rootdomain(struct root_domain *rd)
+ {
++      synchronize_sched();
++
+       cpupri_cleanup(&rd->cpupri);
+       free_cpumask_var(rd->rto_mask);
index a97f1cc42ccec3bbbfc989472c4313bd8e76fd4f..5a8d8972ff56ddb444ba51d2fdb4e8f1bf4b3803 100644 (file)
@@ -56,3 +56,42 @@ iwl3945-fix-panic-in-iwl3945-driver.patch
 iwlwifi-fix-eeprom-otp-reading-endian-annotations-and-a-bug.patch
 iwlwifi-fix-more-eeprom-endian-bugs.patch
 iwlwifi-fix-40mhz-operation-setting-on-cards-that-do-not-allow-it.patch
+mac80211-fix-race-with-suspend-and-dynamic_ps_disable_work.patch
+nommu-optimise-away-the-dac_-mmap_min_addr-tests.patch
+sysctl_max_map_count-should-be-non-negative.patch
+kernel-sysctl.c-fix-the-incomplete-part-of-sysctl_max_map_count-should-be-non-negative.patch.patch
+v4l-dvb-13596-ov511.c-typo-lock-unlock.patch
+x86-ptrace-make-genregs_get-set-more-robust.patch
+memcg-avoid-oom-killing-innocent-task-in-case-of-use_hierarchy.patch
+e100-fix-broken-cbs-accounting-due-to-missing-memset.patch
+ipv6-reassembly-use-seperate-reassembly-queues-for-conntrack-and-local-delivery.patch
+netfilter-fix-crashes-in-bridge-netfilter-caused-by-fragment-jumps.patch
+hwmon-sht15-off-by-one-error-in-array-index-incorrect-constants.patch
+b43-avoid-ppc-fault-during-resume.patch
+keys-keyctl_session_to_parent-needs-tif_notify_resume-architecture-support.patch
+sched-fix-balance-vs-hotplug-race.patch
+drm-radeon-kms-fix-crtc-vblank-update-for-r600.patch
+drm-disable-all-the-possible-outputs-crtcs-before-entering-kms-mode.patch
+s390-dasd-support-diag-access-for-read-only-devices.patch
+xen-fix-is_disconnected_device-exists_disconnected_device.patch
+xen-improvement-to-wait_for_devices.patch
+xen-wait-up-to-5-minutes-for-device-connetion.patch
+orinoco-fix-gfp_kernel-in-orinoco_set_key-with-interrupts-disabled.patch
+udf-try-harder-when-looking-for-vat-inode.patch
+add-unlocked-version-of-inode_add_bytes-function.patch
+quota-decouple-fs-reserved-space-from-quota-reservation.patch
+ext4-convert-to-generic-reserved-quota-s-space-management.patch
+ext4-fix-potential-quota-deadlock.patch
+ext4-fix-sleep-inside-spinlock-issue-with-quota-and-dealloc-14739.patch
+x86-msr-unify-rdmsr_on_cpus-wrmsr_on_cpus.patch
+cpumask-use-modern-cpumask-style-in-drivers-edac-amd64_edac.c.patch
+amd64_edac-unify-mcgctl-ecc-switching.patch
+x86-msr-add-support-for-non-contiguous-cpumasks.patch
+x86-msr-msrs_alloc-free-for-config_smp-n.patch
+amd64_edac-fix-driver-instance-freeing.patch
+amd64_edac-make-driver-loading-more-robust.patch
+amd64_edac-fix-forcing-module-load-unload.patch
+sched-sched_rt_periodic_timer-vs-cpu-hotplug.patch
+ext4-update-documentation-to-correct-the-inode_readahead_blks-option-name.patch
+lguest-fix-bug-in-setting-guest-gdt-entry.patch
+vmscan-do-not-evict-inactive-pages-when-skipping-an-active-list-scan.patch
diff --git a/queue-2.6.32/sysctl_max_map_count-should-be-non-negative.patch b/queue-2.6.32/sysctl_max_map_count-should-be-non-negative.patch
new file mode 100644 (file)
index 0000000..a365e53
--- /dev/null
@@ -0,0 +1,50 @@
+From 70da2340fbc68e91e701762f785479ab495a0869 Mon Sep 17 00:00:00 2001
+From: Amerigo Wang <amwang@redhat.com>
+Date: Mon, 14 Dec 2009 17:59:52 -0800
+Subject: 'sysctl_max_map_count' should be non-negative
+
+From: Amerigo Wang <amwang@redhat.com>
+
+commit 70da2340fbc68e91e701762f785479ab495a0869 upstream.
+
+Jan Engelhardt reported we have this problem:
+
+setting max_map_count to a value large enough results in programs dying at
+first try.  This is on 2.6.31.6:
+
+15:59 borg:/proc/sys/vm # echo $[1<<31-1] >max_map_count
+15:59 borg:/proc/sys/vm # cat max_map_count
+1073741824
+15:59 borg:/proc/sys/vm # echo $[1<<31] >max_map_count
+15:59 borg:/proc/sys/vm # cat max_map_count
+Killed
+
+This is because we have a chance to make 'max_map_count' negative.  but
+it's meaningless.  Make it only accept non-negative values.
+
+Reported-by: Jan Engelhardt <jengelh@medozas.de>
+Signed-off-by: WANG Cong <amwang@redhat.com>
+Cc: Ingo Molnar <mingo@elte.hu>
+Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: James Morris <jmorris@namei.org>
+Cc: Alexey Dobriyan <adobriyan@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/sysctl.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -1607,7 +1607,8 @@ static struct ctl_table debug_table[] = 
+               .data           = &show_unhandled_signals,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+-              .proc_handler   = proc_dointvec
++              .proc_handler   = proc_dointvec,
++              .extra1         = &zero,
+       },
+ #endif
+       { .ctl_name = 0 }
diff --git a/queue-2.6.32/udf-try-harder-when-looking-for-vat-inode.patch b/queue-2.6.32/udf-try-harder-when-looking-for-vat-inode.patch
new file mode 100644 (file)
index 0000000..cb5ab3b
--- /dev/null
@@ -0,0 +1,78 @@
+From e971b0b9e0dd50d9ceecb67a6a6ab80a80906033 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Mon, 30 Nov 2009 19:47:55 +0100
+Subject: udf: Try harder when looking for VAT inode
+
+From: Jan Kara <jack@suse.cz>
+
+commit e971b0b9e0dd50d9ceecb67a6a6ab80a80906033 upstream.
+
+Some disks do not contain VAT inode in the last recorded block as required
+by the standard but a few blocks earlier (or the number of recorded blocks
+is wrong). So look for the VAT inode a bit before the end of the media.
+
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/udf/super.c |   32 ++++++++++++++++++++++++--------
+ 1 file changed, 24 insertions(+), 8 deletions(-)
+
+--- a/fs/udf/super.c
++++ b/fs/udf/super.c
+@@ -1078,21 +1078,39 @@ static int udf_fill_partdesc_info(struct
+       return 0;
+ }
+-static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
++static void udf_find_vat_block(struct super_block *sb, int p_index,
++                             int type1_index, sector_t start_block)
+ {
+       struct udf_sb_info *sbi = UDF_SB(sb);
+       struct udf_part_map *map = &sbi->s_partmaps[p_index];
++      sector_t vat_block;
+       struct kernel_lb_addr ino;
++
++      /*
++       * VAT file entry is in the last recorded block. Some broken disks have
++       * it a few blocks before so try a bit harder...
++       */
++      ino.partitionReferenceNum = type1_index;
++      for (vat_block = start_block;
++           vat_block >= map->s_partition_root &&
++           vat_block >= start_block - 3 &&
++           !sbi->s_vat_inode; vat_block--) {
++              ino.logicalBlockNum = vat_block - map->s_partition_root;
++              sbi->s_vat_inode = udf_iget(sb, &ino);
++      }
++}
++
++static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
++{
++      struct udf_sb_info *sbi = UDF_SB(sb);
++      struct udf_part_map *map = &sbi->s_partmaps[p_index];
+       struct buffer_head *bh = NULL;
+       struct udf_inode_info *vati;
+       uint32_t pos;
+       struct virtualAllocationTable20 *vat20;
+       sector_t blocks = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
+-      /* VAT file entry is in the last recorded block */
+-      ino.partitionReferenceNum = type1_index;
+-      ino.logicalBlockNum = sbi->s_last_block - map->s_partition_root;
+-      sbi->s_vat_inode = udf_iget(sb, &ino);
++      udf_find_vat_block(sb, p_index, type1_index, sbi->s_last_block);
+       if (!sbi->s_vat_inode &&
+           sbi->s_last_block != blocks - 1) {
+               printk(KERN_NOTICE "UDF-fs: Failed to read VAT inode from the"
+@@ -1100,9 +1118,7 @@ static int udf_load_vat(struct super_blo
+                      "block of the device (%lu).\n",
+                      (unsigned long)sbi->s_last_block,
+                      (unsigned long)blocks - 1);
+-              ino.partitionReferenceNum = type1_index;
+-              ino.logicalBlockNum = blocks - 1 - map->s_partition_root;
+-              sbi->s_vat_inode = udf_iget(sb, &ino);
++              udf_find_vat_block(sb, p_index, type1_index, blocks - 1);
+       }
+       if (!sbi->s_vat_inode)
+               return 1;
diff --git a/queue-2.6.32/v4l-dvb-13596-ov511.c-typo-lock-unlock.patch b/queue-2.6.32/v4l-dvb-13596-ov511.c-typo-lock-unlock.patch
new file mode 100644 (file)
index 0000000..99af476
--- /dev/null
@@ -0,0 +1,33 @@
+From 50e9d31183ed61c787b870cb3ee8f6c3db8c8a1e Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <error27@gmail.com>
+Date: Thu, 10 Dec 2009 16:44:51 -0300
+Subject: V4L/DVB (13596): ov511.c typo: lock => unlock
+
+From: Dan Carpenter <error27@gmail.com>
+
+commit 50e9d31183ed61c787b870cb3ee8f6c3db8c8a1e upstream.
+
+This was found with a static checker and has not been tested, but it seems
+pretty clear that the mutex_lock() was supposed to be mutex_unlock()
+
+Signed-off-by: Dan Carpenter <error27@gmail.com>
+Signed-off-by: Douglas Schilling Landgraf <dougsland@redhat.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
+Cc: Brandon Philips <brandon@ifup.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/media/video/ov511.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/media/video/ov511.c
++++ b/drivers/media/video/ov511.c
+@@ -5878,7 +5878,7 @@ ov51x_probe(struct usb_interface *intf, 
+               goto error;
+       }
+-      mutex_lock(&ov->lock);
++      mutex_unlock(&ov->lock);
+       return 0;
diff --git a/queue-2.6.32/vmscan-do-not-evict-inactive-pages-when-skipping-an-active-list-scan.patch b/queue-2.6.32/vmscan-do-not-evict-inactive-pages-when-skipping-an-active-list-scan.patch
new file mode 100644 (file)
index 0000000..30fcf89
--- /dev/null
@@ -0,0 +1,72 @@
+From b39415b2731d7dec5e612d2d12595da82399eedf Mon Sep 17 00:00:00 2001
+From: Rik van Riel <riel@redhat.com>
+Date: Mon, 14 Dec 2009 17:59:48 -0800
+Subject: vmscan: do not evict inactive pages when skipping an active list scan
+
+From: Rik van Riel <riel@redhat.com>
+
+commit b39415b2731d7dec5e612d2d12595da82399eedf upstream.
+
+In AIM7 runs, recent kernels start swapping out anonymous pages well
+before they should.  This is due to shrink_list falling through to
+shrink_inactive_list if !inactive_anon_is_low(zone, sc), when all we
+really wanted to do is pre-age some anonymous pages to give them extra
+time to be referenced while on the inactive list.
+
+The obvious fix is to make sure that shrink_list does not fall through to
+scanning/reclaiming inactive pages when we called it to scan one of the
+active lists.
+
+This change should be safe because the loop in shrink_zone ensures that we
+will still shrink the anon and file inactive lists whenever we should.
+
+[kosaki.motohiro@jp.fujitsu.com: inactive_file_is_low() should be inactive_anon_is_low()]
+Reported-by: Larry Woodman <lwoodman@redhat.com>
+Signed-off-by: Rik van Riel <riel@redhat.com>
+Acked-by: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Tomasz Chmielewski <mangoo@wpkg.org>
+Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Rik Theys <rik.theys@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ mm/vmscan.c |   18 ++++++++++++------
+ 1 file changed, 12 insertions(+), 6 deletions(-)
+
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -1464,20 +1464,26 @@ static int inactive_file_is_low(struct z
+       return low;
+ }
++static int inactive_list_is_low(struct zone *zone, struct scan_control *sc,
++                              int file)
++{
++      if (file)
++              return inactive_file_is_low(zone, sc);
++      else
++              return inactive_anon_is_low(zone, sc);
++}
++
+ static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
+       struct zone *zone, struct scan_control *sc, int priority)
+ {
+       int file = is_file_lru(lru);
+-      if (lru == LRU_ACTIVE_FILE && inactive_file_is_low(zone, sc)) {
+-              shrink_active_list(nr_to_scan, zone, sc, priority, file);
++      if (is_active_lru(lru)) {
++              if (inactive_list_is_low(zone, sc, file))
++                  shrink_active_list(nr_to_scan, zone, sc, priority, file);
+               return 0;
+       }
+-      if (lru == LRU_ACTIVE_ANON && inactive_anon_is_low(zone, sc)) {
+-              shrink_active_list(nr_to_scan, zone, sc, priority, file);
+-              return 0;
+-      }
+       return shrink_inactive_list(nr_to_scan, zone, sc, priority, file);
+ }
diff --git a/queue-2.6.32/x86-msr-add-support-for-non-contiguous-cpumasks.patch b/queue-2.6.32/x86-msr-add-support-for-non-contiguous-cpumasks.patch
new file mode 100644 (file)
index 0000000..3b7dc51
--- /dev/null
@@ -0,0 +1,238 @@
+From 505422517d3f126bb939439e9d15dece94e11d2c Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <borislav.petkov@amd.com>
+Date: Fri, 11 Dec 2009 18:14:40 +0100
+Subject: x86, msr: Add support for non-contiguous cpumasks
+
+From: Borislav Petkov <borislav.petkov@amd.com>
+
+commit 505422517d3f126bb939439e9d15dece94e11d2c upstream.
+
+The current rd/wrmsr_on_cpus helpers assume that the supplied
+cpumasks are contiguous. However, there are machines out there
+like some K8 multinode Opterons which have a non-contiguous core
+enumeration on each node (e.g. cores 0,2 on node 0 instead of 0,1), see
+http://www.gossamer-threads.com/lists/linux/kernel/1160268.
+
+This patch fixes out-of-bounds writes (see URL above) by adding per-CPU
+msr structs which are used on the respective cores.
+
+Additionally, two helpers, msrs_{alloc,free}, are provided for use by
+the callers of the MSR accessors.
+
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Mauro Carvalho Chehab <mchehab@redhat.com>
+Cc: Aristeu Rozanski <aris@redhat.com>
+Cc: Randy Dunlap <randy.dunlap@oracle.com>
+Cc: Doug Thompson <dougthompson@xmission.com>
+Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
+LKML-Reference: <20091211171440.GD31998@aftab>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/include/asm/msr.h |    3 ++
+ arch/x86/lib/msr.c         |   26 +++++++++++++++++++++----
+ drivers/edac/amd64_edac.c  |   46 ++++++++++++++++-----------------------------
+ 3 files changed, 42 insertions(+), 33 deletions(-)
+
+--- a/arch/x86/include/asm/msr.h
++++ b/arch/x86/include/asm/msr.h
+@@ -244,6 +244,9 @@ do {                                    
+ #define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0)
++struct msr *msrs_alloc(void);
++void msrs_free(struct msr *msrs);
++
+ #ifdef CONFIG_SMP
+ int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
+ int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
+--- a/arch/x86/lib/msr.c
++++ b/arch/x86/lib/msr.c
+@@ -7,7 +7,6 @@ struct msr_info {
+       u32 msr_no;
+       struct msr reg;
+       struct msr *msrs;
+-      int off;
+       int err;
+ };
+@@ -18,7 +17,7 @@ static void __rdmsr_on_cpu(void *info)
+       int this_cpu = raw_smp_processor_id();
+       if (rv->msrs)
+-              reg = &rv->msrs[this_cpu - rv->off];
++              reg = per_cpu_ptr(rv->msrs, this_cpu);
+       else
+               reg = &rv->reg;
+@@ -32,7 +31,7 @@ static void __wrmsr_on_cpu(void *info)
+       int this_cpu = raw_smp_processor_id();
+       if (rv->msrs)
+-              reg = &rv->msrs[this_cpu - rv->off];
++              reg = per_cpu_ptr(rv->msrs, this_cpu);
+       else
+               reg = &rv->reg;
+@@ -80,7 +79,6 @@ static void __rwmsr_on_cpus(const struct
+       memset(&rv, 0, sizeof(rv));
+-      rv.off    = cpumask_first(mask);
+       rv.msrs   = msrs;
+       rv.msr_no = msr_no;
+@@ -120,6 +118,26 @@ void wrmsr_on_cpus(const struct cpumask 
+ }
+ EXPORT_SYMBOL(wrmsr_on_cpus);
++struct msr *msrs_alloc(void)
++{
++      struct msr *msrs = NULL;
++
++      msrs = alloc_percpu(struct msr);
++      if (!msrs) {
++              pr_warning("%s: error allocating msrs\n", __func__);
++              return NULL;
++      }
++
++      return msrs;
++}
++EXPORT_SYMBOL(msrs_alloc);
++
++void msrs_free(struct msr *msrs)
++{
++      free_percpu(msrs);
++}
++EXPORT_SYMBOL(msrs_free);
++
+ /* These "safe" variants are slower and should be used when the target MSR
+    may not actually exist. */
+ static void __rdmsr_safe_on_cpu(void *info)
+--- a/drivers/edac/amd64_edac.c
++++ b/drivers/edac/amd64_edac.c
+@@ -13,6 +13,8 @@ module_param(report_gart_errors, int, 06
+ static int ecc_enable_override;
+ module_param(ecc_enable_override, int, 0644);
++static struct msr *msrs;
++
+ /* Lookup table for all possible MC control instances */
+ struct amd64_pvt;
+ static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES];
+@@ -2632,8 +2634,7 @@ static void get_cpus_on_this_dct_cpumask
+ static bool amd64_nb_mce_bank_enabled_on_node(int nid)
+ {
+       cpumask_var_t mask;
+-      struct msr *msrs;
+-      int cpu, nbe, idx = 0;
++      int cpu, nbe;
+       bool ret = false;
+       if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
+@@ -2644,32 +2645,22 @@ static bool amd64_nb_mce_bank_enabled_on
+       get_cpus_on_this_dct_cpumask(mask, nid);
+-      msrs = kzalloc(sizeof(struct msr) * cpumask_weight(mask), GFP_KERNEL);
+-      if (!msrs) {
+-              amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
+-                            __func__);
+-              free_cpumask_var(mask);
+-               return false;
+-      }
+-
+       rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
+       for_each_cpu(cpu, mask) {
+-              nbe = msrs[idx].l & K8_MSR_MCGCTL_NBE;
++              struct msr *reg = per_cpu_ptr(msrs, cpu);
++              nbe = reg->l & K8_MSR_MCGCTL_NBE;
+               debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
+-                      cpu, msrs[idx].q,
++                      cpu, reg->q,
+                       (nbe ? "enabled" : "disabled"));
+               if (!nbe)
+                       goto out;
+-
+-              idx++;
+       }
+       ret = true;
+ out:
+-      kfree(msrs);
+       free_cpumask_var(mask);
+       return ret;
+ }
+@@ -2677,8 +2668,7 @@ out:
+ static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
+ {
+       cpumask_var_t cmask;
+-      struct msr *msrs = NULL;
+-      int cpu, idx = 0;
++      int cpu;
+       if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
+               amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
+@@ -2688,34 +2678,27 @@ static int amd64_toggle_ecc_err_reportin
+       get_cpus_on_this_dct_cpumask(cmask, pvt->mc_node_id);
+-      msrs = kzalloc(sizeof(struct msr) * cpumask_weight(cmask), GFP_KERNEL);
+-      if (!msrs) {
+-              amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
+-                           __func__);
+-              return -ENOMEM;
+-      }
+-
+       rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
+       for_each_cpu(cpu, cmask) {
++              struct msr *reg = per_cpu_ptr(msrs, cpu);
++
+               if (on) {
+-                      if (msrs[idx].l & K8_MSR_MCGCTL_NBE)
++                      if (reg->l & K8_MSR_MCGCTL_NBE)
+                               pvt->flags.ecc_report = 1;
+-                      msrs[idx].l |= K8_MSR_MCGCTL_NBE;
++                      reg->l |= K8_MSR_MCGCTL_NBE;
+               } else {
+                       /*
+                        * Turn off ECC reporting only when it was off before
+                        */
+                       if (!pvt->flags.ecc_report)
+-                              msrs[idx].l &= ~K8_MSR_MCGCTL_NBE;
++                              reg->l &= ~K8_MSR_MCGCTL_NBE;
+               }
+-              idx++;
+       }
+       wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
+-      kfree(msrs);
+       free_cpumask_var(cmask);
+       return 0;
+@@ -3193,6 +3176,8 @@ static int __init amd64_edac_init(void)
+       if (cache_k8_northbridges() < 0)
+               return err;
++      msrs = msrs_alloc();
++
+       err = pci_register_driver(&amd64_pci_driver);
+       if (err)
+               return err;
+@@ -3228,6 +3213,9 @@ static void __exit amd64_edac_exit(void)
+               edac_pci_release_generic_ctl(amd64_ctl_pci);
+       pci_unregister_driver(&amd64_pci_driver);
++
++      msrs_free(msrs);
++      msrs = NULL;
+ }
+ module_init(amd64_edac_init);
diff --git a/queue-2.6.32/x86-msr-msrs_alloc-free-for-config_smp-n.patch b/queue-2.6.32/x86-msr-msrs_alloc-free-for-config_smp-n.patch
new file mode 100644 (file)
index 0000000..77037b6
--- /dev/null
@@ -0,0 +1,509 @@
+From 6ede31e03084ee084bcee073ef3d1136f68d0906 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <petkovbb@googlemail.com>
+Date: Thu, 17 Dec 2009 00:16:25 +0100
+Subject: x86, msr: msrs_alloc/free for CONFIG_SMP=n
+
+From: Borislav Petkov <petkovbb@googlemail.com>
+
+commit 6ede31e03084ee084bcee073ef3d1136f68d0906 upstream.
+
+Randy Dunlap reported the following build error:
+
+"When CONFIG_SMP=n, CONFIG_X86_MSR=m:
+
+ERROR: "msrs_free" [drivers/edac/amd64_edac_mod.ko] undefined!
+ERROR: "msrs_alloc" [drivers/edac/amd64_edac_mod.ko] undefined!"
+
+This is due to the fact that <arch/x86/lib/msr.c> is conditioned on
+CONFIG_SMP and in the UP case we have only the stubs in the header.
+Fork off SMP functionality into a new file (msr-smp.c) and build
+msrs_{alloc,free} unconditionally.
+
+Reported-by: Randy Dunlap <randy.dunlap@oracle.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Borislav Petkov <petkovbb@gmail.com>
+LKML-Reference: <20091216231625.GD27228@liondog.tnic>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/include/asm/msr.h |   12 ++
+ arch/x86/lib/Makefile      |    4 
+ arch/x86/lib/msr-smp.c     |  204 +++++++++++++++++++++++++++++++++++++++++++
+ arch/x86/lib/msr.c         |  213 ---------------------------------------------
+ 4 files changed, 218 insertions(+), 215 deletions(-)
+
+--- a/arch/x86/include/asm/msr.h
++++ b/arch/x86/include/asm/msr.h
+@@ -27,6 +27,18 @@ struct msr {
+       };
+ };
++struct msr_info {
++      u32 msr_no;
++      struct msr reg;
++      struct msr *msrs;
++      int err;
++};
++
++struct msr_regs_info {
++      u32 *regs;
++      int err;
++};
++
+ static inline unsigned long long native_read_tscp(unsigned int *aux)
+ {
+       unsigned long low, high;
+--- a/arch/x86/lib/Makefile
++++ b/arch/x86/lib/Makefile
+@@ -2,14 +2,14 @@
+ # Makefile for x86 specific library files.
+ #
+-obj-$(CONFIG_SMP) := msr.o
++obj-$(CONFIG_SMP) += msr-smp.o
+ lib-y := delay.o
+ lib-y += thunk_$(BITS).o
+ lib-y += usercopy_$(BITS).o getuser.o putuser.o
+ lib-y += memcpy_$(BITS).o
+-obj-y += msr-reg.o msr-reg-export.o
++obj-y += msr.o msr-reg.o msr-reg-export.o
+ ifeq ($(CONFIG_X86_32),y)
+         obj-y += atomic64_32.o
+--- a/arch/x86/lib/msr.c
++++ b/arch/x86/lib/msr.c
+@@ -1,123 +1,7 @@
+ #include <linux/module.h>
+ #include <linux/preempt.h>
+-#include <linux/smp.h>
+ #include <asm/msr.h>
+-struct msr_info {
+-      u32 msr_no;
+-      struct msr reg;
+-      struct msr *msrs;
+-      int err;
+-};
+-
+-static void __rdmsr_on_cpu(void *info)
+-{
+-      struct msr_info *rv = info;
+-      struct msr *reg;
+-      int this_cpu = raw_smp_processor_id();
+-
+-      if (rv->msrs)
+-              reg = per_cpu_ptr(rv->msrs, this_cpu);
+-      else
+-              reg = &rv->reg;
+-
+-      rdmsr(rv->msr_no, reg->l, reg->h);
+-}
+-
+-static void __wrmsr_on_cpu(void *info)
+-{
+-      struct msr_info *rv = info;
+-      struct msr *reg;
+-      int this_cpu = raw_smp_processor_id();
+-
+-      if (rv->msrs)
+-              reg = per_cpu_ptr(rv->msrs, this_cpu);
+-      else
+-              reg = &rv->reg;
+-
+-      wrmsr(rv->msr_no, reg->l, reg->h);
+-}
+-
+-int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
+-{
+-      int err;
+-      struct msr_info rv;
+-
+-      memset(&rv, 0, sizeof(rv));
+-
+-      rv.msr_no = msr_no;
+-      err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
+-      *l = rv.reg.l;
+-      *h = rv.reg.h;
+-
+-      return err;
+-}
+-EXPORT_SYMBOL(rdmsr_on_cpu);
+-
+-int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
+-{
+-      int err;
+-      struct msr_info rv;
+-
+-      memset(&rv, 0, sizeof(rv));
+-
+-      rv.msr_no = msr_no;
+-      rv.reg.l = l;
+-      rv.reg.h = h;
+-      err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
+-
+-      return err;
+-}
+-EXPORT_SYMBOL(wrmsr_on_cpu);
+-
+-static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
+-                          struct msr *msrs,
+-                          void (*msr_func) (void *info))
+-{
+-      struct msr_info rv;
+-      int this_cpu;
+-
+-      memset(&rv, 0, sizeof(rv));
+-
+-      rv.msrs   = msrs;
+-      rv.msr_no = msr_no;
+-
+-      this_cpu = get_cpu();
+-
+-      if (cpumask_test_cpu(this_cpu, mask))
+-              msr_func(&rv);
+-
+-      smp_call_function_many(mask, msr_func, &rv, 1);
+-      put_cpu();
+-}
+-
+-/* rdmsr on a bunch of CPUs
+- *
+- * @mask:       which CPUs
+- * @msr_no:     which MSR
+- * @msrs:       array of MSR values
+- *
+- */
+-void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
+-{
+-      __rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu);
+-}
+-EXPORT_SYMBOL(rdmsr_on_cpus);
+-
+-/*
+- * wrmsr on a bunch of CPUs
+- *
+- * @mask:       which CPUs
+- * @msr_no:     which MSR
+- * @msrs:       array of MSR values
+- *
+- */
+-void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
+-{
+-      __rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu);
+-}
+-EXPORT_SYMBOL(wrmsr_on_cpus);
+-
+ struct msr *msrs_alloc(void)
+ {
+       struct msr *msrs = NULL;
+@@ -137,100 +21,3 @@ void msrs_free(struct msr *msrs)
+       free_percpu(msrs);
+ }
+ EXPORT_SYMBOL(msrs_free);
+-
+-/* These "safe" variants are slower and should be used when the target MSR
+-   may not actually exist. */
+-static void __rdmsr_safe_on_cpu(void *info)
+-{
+-      struct msr_info *rv = info;
+-
+-      rv->err = rdmsr_safe(rv->msr_no, &rv->reg.l, &rv->reg.h);
+-}
+-
+-static void __wrmsr_safe_on_cpu(void *info)
+-{
+-      struct msr_info *rv = info;
+-
+-      rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h);
+-}
+-
+-int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
+-{
+-      int err;
+-      struct msr_info rv;
+-
+-      memset(&rv, 0, sizeof(rv));
+-
+-      rv.msr_no = msr_no;
+-      err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
+-      *l = rv.reg.l;
+-      *h = rv.reg.h;
+-
+-      return err ? err : rv.err;
+-}
+-EXPORT_SYMBOL(rdmsr_safe_on_cpu);
+-
+-int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
+-{
+-      int err;
+-      struct msr_info rv;
+-
+-      memset(&rv, 0, sizeof(rv));
+-
+-      rv.msr_no = msr_no;
+-      rv.reg.l = l;
+-      rv.reg.h = h;
+-      err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
+-
+-      return err ? err : rv.err;
+-}
+-EXPORT_SYMBOL(wrmsr_safe_on_cpu);
+-
+-/*
+- * These variants are significantly slower, but allows control over
+- * the entire 32-bit GPR set.
+- */
+-struct msr_regs_info {
+-      u32 *regs;
+-      int err;
+-};
+-
+-static void __rdmsr_safe_regs_on_cpu(void *info)
+-{
+-      struct msr_regs_info *rv = info;
+-
+-      rv->err = rdmsr_safe_regs(rv->regs);
+-}
+-
+-static void __wrmsr_safe_regs_on_cpu(void *info)
+-{
+-      struct msr_regs_info *rv = info;
+-
+-      rv->err = wrmsr_safe_regs(rv->regs);
+-}
+-
+-int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
+-{
+-      int err;
+-      struct msr_regs_info rv;
+-
+-      rv.regs   = regs;
+-      rv.err    = -EIO;
+-      err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
+-
+-      return err ? err : rv.err;
+-}
+-EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
+-
+-int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
+-{
+-      int err;
+-      struct msr_regs_info rv;
+-
+-      rv.regs = regs;
+-      rv.err  = -EIO;
+-      err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
+-
+-      return err ? err : rv.err;
+-}
+-EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu);
+--- /dev/null
++++ b/arch/x86/lib/msr-smp.c
+@@ -0,0 +1,204 @@
++#include <linux/module.h>
++#include <linux/preempt.h>
++#include <linux/smp.h>
++#include <asm/msr.h>
++
++static void __rdmsr_on_cpu(void *info)
++{
++      struct msr_info *rv = info;
++      struct msr *reg;
++      int this_cpu = raw_smp_processor_id();
++
++      if (rv->msrs)
++              reg = per_cpu_ptr(rv->msrs, this_cpu);
++      else
++              reg = &rv->reg;
++
++      rdmsr(rv->msr_no, reg->l, reg->h);
++}
++
++static void __wrmsr_on_cpu(void *info)
++{
++      struct msr_info *rv = info;
++      struct msr *reg;
++      int this_cpu = raw_smp_processor_id();
++
++      if (rv->msrs)
++              reg = per_cpu_ptr(rv->msrs, this_cpu);
++      else
++              reg = &rv->reg;
++
++      wrmsr(rv->msr_no, reg->l, reg->h);
++}
++
++int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
++{
++      int err;
++      struct msr_info rv;
++
++      memset(&rv, 0, sizeof(rv));
++
++      rv.msr_no = msr_no;
++      err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
++      *l = rv.reg.l;
++      *h = rv.reg.h;
++
++      return err;
++}
++EXPORT_SYMBOL(rdmsr_on_cpu);
++
++int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
++{
++      int err;
++      struct msr_info rv;
++
++      memset(&rv, 0, sizeof(rv));
++
++      rv.msr_no = msr_no;
++      rv.reg.l = l;
++      rv.reg.h = h;
++      err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
++
++      return err;
++}
++EXPORT_SYMBOL(wrmsr_on_cpu);
++
++static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
++                          struct msr *msrs,
++                          void (*msr_func) (void *info))
++{
++      struct msr_info rv;
++      int this_cpu;
++
++      memset(&rv, 0, sizeof(rv));
++
++      rv.msrs   = msrs;
++      rv.msr_no = msr_no;
++
++      this_cpu = get_cpu();
++
++      if (cpumask_test_cpu(this_cpu, mask))
++              msr_func(&rv);
++
++      smp_call_function_many(mask, msr_func, &rv, 1);
++      put_cpu();
++}
++
++/* rdmsr on a bunch of CPUs
++ *
++ * @mask:       which CPUs
++ * @msr_no:     which MSR
++ * @msrs:       array of MSR values
++ *
++ */
++void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
++{
++      __rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu);
++}
++EXPORT_SYMBOL(rdmsr_on_cpus);
++
++/*
++ * wrmsr on a bunch of CPUs
++ *
++ * @mask:       which CPUs
++ * @msr_no:     which MSR
++ * @msrs:       array of MSR values
++ *
++ */
++void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
++{
++      __rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu);
++}
++EXPORT_SYMBOL(wrmsr_on_cpus);
++
++/* These "safe" variants are slower and should be used when the target MSR
++   may not actually exist. */
++static void __rdmsr_safe_on_cpu(void *info)
++{
++      struct msr_info *rv = info;
++
++      rv->err = rdmsr_safe(rv->msr_no, &rv->reg.l, &rv->reg.h);
++}
++
++static void __wrmsr_safe_on_cpu(void *info)
++{
++      struct msr_info *rv = info;
++
++      rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h);
++}
++
++int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
++{
++      int err;
++      struct msr_info rv;
++
++      memset(&rv, 0, sizeof(rv));
++
++      rv.msr_no = msr_no;
++      err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
++      *l = rv.reg.l;
++      *h = rv.reg.h;
++
++      return err ? err : rv.err;
++}
++EXPORT_SYMBOL(rdmsr_safe_on_cpu);
++
++int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
++{
++      int err;
++      struct msr_info rv;
++
++      memset(&rv, 0, sizeof(rv));
++
++      rv.msr_no = msr_no;
++      rv.reg.l = l;
++      rv.reg.h = h;
++      err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
++
++      return err ? err : rv.err;
++}
++EXPORT_SYMBOL(wrmsr_safe_on_cpu);
++
++/*
++ * These variants are significantly slower, but allows control over
++ * the entire 32-bit GPR set.
++ */
++static void __rdmsr_safe_regs_on_cpu(void *info)
++{
++      struct msr_regs_info *rv = info;
++
++      rv->err = rdmsr_safe_regs(rv->regs);
++}
++
++static void __wrmsr_safe_regs_on_cpu(void *info)
++{
++      struct msr_regs_info *rv = info;
++
++      rv->err = wrmsr_safe_regs(rv->regs);
++}
++
++int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
++{
++      int err;
++      struct msr_regs_info rv;
++
++      rv.regs   = regs;
++      rv.err    = -EIO;
++      err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
++
++      return err ? err : rv.err;
++}
++EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
++
++int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
++{
++      int err;
++      struct msr_regs_info rv;
++
++      rv.regs = regs;
++      rv.err  = -EIO;
++      err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
++
++      return err ? err : rv.err;
++}
++EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu);
diff --git a/queue-2.6.32/x86-msr-unify-rdmsr_on_cpus-wrmsr_on_cpus.patch b/queue-2.6.32/x86-msr-unify-rdmsr_on_cpus-wrmsr_on_cpus.patch
new file mode 100644 (file)
index 0000000..372c8b1
--- /dev/null
@@ -0,0 +1,110 @@
+From b8a4754147d61f5359a765a3afd3eb03012aa052 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <borislav.petkov@amd.com>
+Date: Thu, 30 Jul 2009 11:10:02 +0200
+Subject: x86, msr: Unify rdmsr_on_cpus/wrmsr_on_cpus
+
+From: Borislav Petkov <borislav.petkov@amd.com>
+
+commit b8a4754147d61f5359a765a3afd3eb03012aa052 upstream.
+
+Since rdmsr_on_cpus and wrmsr_on_cpus are almost identical, unify them
+into a common __rwmsr_on_cpus helper thus avoiding code duplication.
+
+While at it, convert cpumask_t's to const struct cpumask *.
+
+Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/include/asm/msr.h |    4 +--
+ arch/x86/lib/msr.c         |   46 ++++++++++++++++++---------------------------
+ 2 files changed, 21 insertions(+), 29 deletions(-)
+
+--- a/arch/x86/include/asm/msr.h
++++ b/arch/x86/include/asm/msr.h
+@@ -247,8 +247,8 @@ do {                                    
+ #ifdef CONFIG_SMP
+ int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
+ int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
+-void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs);
+-void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs);
++void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
++void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
+ int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
+ int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
+ int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
+--- a/arch/x86/lib/msr.c
++++ b/arch/x86/lib/msr.c
+@@ -71,14 +71,9 @@ int wrmsr_on_cpu(unsigned int cpu, u32 m
+ }
+ EXPORT_SYMBOL(wrmsr_on_cpu);
+-/* rdmsr on a bunch of CPUs
+- *
+- * @mask:       which CPUs
+- * @msr_no:     which MSR
+- * @msrs:       array of MSR values
+- *
+- */
+-void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs)
++static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
++                          struct msr *msrs,
++                          void (*msr_func) (void *info))
+ {
+       struct msr_info rv;
+       int this_cpu;
+@@ -92,11 +87,23 @@ void rdmsr_on_cpus(const cpumask_t *mask
+       this_cpu = get_cpu();
+       if (cpumask_test_cpu(this_cpu, mask))
+-              __rdmsr_on_cpu(&rv);
++              msr_func(&rv);
+-      smp_call_function_many(mask, __rdmsr_on_cpu, &rv, 1);
++      smp_call_function_many(mask, msr_func, &rv, 1);
+       put_cpu();
+ }
++
++/* rdmsr on a bunch of CPUs
++ *
++ * @mask:       which CPUs
++ * @msr_no:     which MSR
++ * @msrs:       array of MSR values
++ *
++ */
++void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
++{
++      __rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu);
++}
+ EXPORT_SYMBOL(rdmsr_on_cpus);
+ /*
+@@ -107,24 +114,9 @@ EXPORT_SYMBOL(rdmsr_on_cpus);
+  * @msrs:       array of MSR values
+  *
+  */
+-void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs)
++void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
+ {
+-      struct msr_info rv;
+-      int this_cpu;
+-
+-      memset(&rv, 0, sizeof(rv));
+-
+-      rv.off    = cpumask_first(mask);
+-      rv.msrs   = msrs;
+-      rv.msr_no = msr_no;
+-
+-      this_cpu = get_cpu();
+-
+-      if (cpumask_test_cpu(this_cpu, mask))
+-              __wrmsr_on_cpu(&rv);
+-
+-      smp_call_function_many(mask, __wrmsr_on_cpu, &rv, 1);
+-      put_cpu();
++      __rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu);
+ }
+ EXPORT_SYMBOL(wrmsr_on_cpus);
diff --git a/queue-2.6.32/x86-ptrace-make-genregs_get-set-more-robust.patch b/queue-2.6.32/x86-ptrace-make-genregs_get-set-more-robust.patch
new file mode 100644 (file)
index 0000000..043d585
--- /dev/null
@@ -0,0 +1,93 @@
+From 04a1e62c2cec820501f93526ad1e46073b802dc4 Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Thu, 17 Dec 2009 07:04:56 -0800
+Subject: x86/ptrace: make genregs[32]_get/set more robust
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit 04a1e62c2cec820501f93526ad1e46073b802dc4 upstream.
+
+The loop condition is fragile: we compare an unsigned value to zero, and
+then decrement it by something larger than one in the loop.  All the
+callers should be passing in appropriately aligned buffer lengths, but
+it's better to just not rely on it, and have some appropriate defensive
+loop limits.
+
+Acked-by: Roland McGrath <roland@redhat.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/ptrace.c |   16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/kernel/ptrace.c
++++ b/arch/x86/kernel/ptrace.c
+@@ -408,14 +408,14 @@ static int genregs_get(struct task_struc
+ {
+       if (kbuf) {
+               unsigned long *k = kbuf;
+-              while (count > 0) {
++              while (count >= sizeof(*k)) {
+                       *k++ = getreg(target, pos);
+                       count -= sizeof(*k);
+                       pos += sizeof(*k);
+               }
+       } else {
+               unsigned long __user *u = ubuf;
+-              while (count > 0) {
++              while (count >= sizeof(*u)) {
+                       if (__put_user(getreg(target, pos), u++))
+                               return -EFAULT;
+                       count -= sizeof(*u);
+@@ -434,14 +434,14 @@ static int genregs_set(struct task_struc
+       int ret = 0;
+       if (kbuf) {
+               const unsigned long *k = kbuf;
+-              while (count > 0 && !ret) {
++              while (count >= sizeof(*k) && !ret) {
+                       ret = putreg(target, pos, *k++);
+                       count -= sizeof(*k);
+                       pos += sizeof(*k);
+               }
+       } else {
+               const unsigned long  __user *u = ubuf;
+-              while (count > 0 && !ret) {
++              while (count >= sizeof(*u) && !ret) {
+                       unsigned long word;
+                       ret = __get_user(word, u++);
+                       if (ret)
+@@ -1219,14 +1219,14 @@ static int genregs32_get(struct task_str
+ {
+       if (kbuf) {
+               compat_ulong_t *k = kbuf;
+-              while (count > 0) {
++              while (count >= sizeof(*k)) {
+                       getreg32(target, pos, k++);
+                       count -= sizeof(*k);
+                       pos += sizeof(*k);
+               }
+       } else {
+               compat_ulong_t __user *u = ubuf;
+-              while (count > 0) {
++              while (count >= sizeof(*u)) {
+                       compat_ulong_t word;
+                       getreg32(target, pos, &word);
+                       if (__put_user(word, u++))
+@@ -1247,14 +1247,14 @@ static int genregs32_set(struct task_str
+       int ret = 0;
+       if (kbuf) {
+               const compat_ulong_t *k = kbuf;
+-              while (count > 0 && !ret) {
++              while (count >= sizeof(*k) && !ret) {
+                       ret = putreg32(target, pos, *k++);
+                       count -= sizeof(*k);
+                       pos += sizeof(*k);
+               }
+       } else {
+               const compat_ulong_t __user *u = ubuf;
+-              while (count > 0 && !ret) {
++              while (count >= sizeof(*u) && !ret) {
+                       compat_ulong_t word;
+                       ret = __get_user(word, u++);
+                       if (ret)
diff --git a/queue-2.6.32/xen-fix-is_disconnected_device-exists_disconnected_device.patch b/queue-2.6.32/xen-fix-is_disconnected_device-exists_disconnected_device.patch
new file mode 100644 (file)
index 0000000..30705ff
--- /dev/null
@@ -0,0 +1,68 @@
+From c6e1971139be1342902873181f3b80a979bfb33b Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Wed, 8 Jul 2009 12:27:37 +0200
+Subject: xen: fix is_disconnected_device/exists_disconnected_device
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit c6e1971139be1342902873181f3b80a979bfb33b upstream.
+
+The logic of is_disconnected_device/exists_disconnected_device is wrong
+in that they are used to test whether a device is trying to connect (i.e.
+connecting).  For this reason the patch fixes them to not consider a
+Closing or Closed device to be connecting.  At the same time the patch
+also renames the functions according to what they really do; you could
+say a closed device is "disconnected" (the old name), but not "connecting"
+(the new name).
+
+This patch is a backport of changeset 909 from the Xenbits tree.
+
+Cc: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/xen/xenbus/xenbus_probe.c |   13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
+
+--- a/drivers/xen/xenbus/xenbus_probe.c
++++ b/drivers/xen/xenbus/xenbus_probe.c
+@@ -843,7 +843,7 @@ postcore_initcall(xenbus_probe_init);
+ MODULE_LICENSE("GPL");
+-static int is_disconnected_device(struct device *dev, void *data)
++static int is_device_connecting(struct device *dev, void *data)
+ {
+       struct xenbus_device *xendev = to_xenbus_device(dev);
+       struct device_driver *drv = data;
+@@ -861,14 +861,15 @@ static int is_disconnected_device(struct
+               return 0;
+       xendrv = to_xenbus_driver(dev->driver);
+-      return (xendev->state != XenbusStateConnected ||
+-              (xendrv->is_ready && !xendrv->is_ready(xendev)));
++      return (xendev->state < XenbusStateConnected ||
++              (xendev->state == XenbusStateConnected &&
++               xendrv->is_ready && !xendrv->is_ready(xendev)));
+ }
+-static int exists_disconnected_device(struct device_driver *drv)
++static int exists_connecting_device(struct device_driver *drv)
+ {
+       return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
+-                              is_disconnected_device);
++                              is_device_connecting);
+ }
+ static int print_device_status(struct device *dev, void *data)
+@@ -918,7 +919,7 @@ static void wait_for_devices(struct xenb
+       if (!ready_to_wait_for_devices || !xen_domain())
+               return;
+-      while (exists_disconnected_device(drv)) {
++      while (exists_connecting_device(drv)) {
+               if (time_after(jiffies, timeout))
+                       break;
+               schedule_timeout_interruptible(HZ/10);
diff --git a/queue-2.6.32/xen-improvement-to-wait_for_devices.patch b/queue-2.6.32/xen-improvement-to-wait_for_devices.patch
new file mode 100644 (file)
index 0000000..7e7bcf5
--- /dev/null
@@ -0,0 +1,42 @@
+From f8dc33088febc63286b7a60e6b678de8e064de8e Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Wed, 8 Jul 2009 12:27:38 +0200
+Subject: xen: improvement to wait_for_devices()
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit f8dc33088febc63286b7a60e6b678de8e064de8e upstream.
+
+When printing a warning about a timed-out device, print the
+current state of both ends of the device connection (i.e., backend as
+well as frontend).  This backports half of changeset 146 from the
+Xenbits tree.
+
+Cc: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/xen/xenbus/xenbus_probe.c |    9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/drivers/xen/xenbus/xenbus_probe.c
++++ b/drivers/xen/xenbus/xenbus_probe.c
+@@ -885,10 +885,13 @@ static int print_device_status(struct de
+               /* Information only: is this too noisy? */
+               printk(KERN_INFO "XENBUS: Device with no driver: %s\n",
+                      xendev->nodename);
+-      } else if (xendev->state != XenbusStateConnected) {
++      } else if (xendev->state < XenbusStateConnected) {
++              enum xenbus_state rstate = XenbusStateUnknown;
++              if (xendev->otherend)
++                      rstate = xenbus_read_driver_state(xendev->otherend);
+               printk(KERN_WARNING "XENBUS: Timeout connecting "
+-                     "to device: %s (state %d)\n",
+-                     xendev->nodename, xendev->state);
++                     "to device: %s (local state %d, remote state %d)\n",
++                     xendev->nodename, xendev->state, rstate);
+       }
+       return 0;
diff --git a/queue-2.6.32/xen-wait-up-to-5-minutes-for-device-connetion.patch b/queue-2.6.32/xen-wait-up-to-5-minutes-for-device-connetion.patch
new file mode 100644 (file)
index 0000000..339177d
--- /dev/null
@@ -0,0 +1,67 @@
+From ae7888012969355a548372e99b066d9e31153b62 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Wed, 8 Jul 2009 12:27:39 +0200
+Subject: xen: wait up to 5 minutes for device connetion
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit ae7888012969355a548372e99b066d9e31153b62 upstream.
+
+Increases the device timeout from 10s to 5 minutes, giving the user a
+visual indication during that time in case there are problems.  The patch
+is a backport of changesets 144 and 150 in the Xenbits tree.
+
+Cc: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/xen/xenbus/xenbus_probe.c |   20 ++++++++++++++++----
+ 1 file changed, 16 insertions(+), 4 deletions(-)
+
+--- a/drivers/xen/xenbus/xenbus_probe.c
++++ b/drivers/xen/xenbus/xenbus_probe.c
+@@ -901,7 +901,7 @@ static int print_device_status(struct de
+ static int ready_to_wait_for_devices;
+ /*
+- * On a 10 second timeout, wait for all devices currently configured.  We need
++ * On a 5-minute timeout, wait for all devices currently configured.  We need
+  * to do this to guarantee that the filesystems and / or network devices
+  * needed for boot are available, before we can allow the boot to proceed.
+  *
+@@ -916,18 +916,30 @@ static int ready_to_wait_for_devices;
+  */
+ static void wait_for_devices(struct xenbus_driver *xendrv)
+ {
+-      unsigned long timeout = jiffies + 10*HZ;
++      unsigned long start = jiffies;
+       struct device_driver *drv = xendrv ? &xendrv->driver : NULL;
++      unsigned int seconds_waited = 0;
+       if (!ready_to_wait_for_devices || !xen_domain())
+               return;
+       while (exists_connecting_device(drv)) {
+-              if (time_after(jiffies, timeout))
+-                      break;
++              if (time_after(jiffies, start + (seconds_waited+5)*HZ)) {
++                      if (!seconds_waited)
++                              printk(KERN_WARNING "XENBUS: Waiting for "
++                                     "devices to initialise: ");
++                      seconds_waited += 5;
++                      printk("%us...", 300 - seconds_waited);
++                      if (seconds_waited == 300)
++                              break;
++              }
++
+               schedule_timeout_interruptible(HZ/10);
+       }
++      if (seconds_waited)
++              printk("\n");
++
+       bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
+                        print_device_status);
+ }