--- /dev/null
+From stable-bounces@linux.kernel.org Thu Jul 6 04:21:10 2006
+Message-Id: <200607061119.k66BJhq1023227@shell0.pdx.osdl.net>
+To: torvalds@osdl.org
+From: akpm@osdl.org
+Date: Thu, 06 Jul 2006 04:19:43 -0700
+Cc: akpm@osdl.org, meissner@suse.de, axboe@suse.de, stable@kernel.org
+Subject: cdrom: fix bad cgc.buflen assignment
+
+From: Jens Axboe <axboe@suse.de>
+
+The code really means to mask off the high bits, not assign 0xff.
+
+Signed-off-by: Jens Axboe <axboe@suse.de>
+Cc: Marcus Meissner <meissner@suse.de>
+Signed-off-by: Andrew Morton <akpm@osdl.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/cdrom/cdrom.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- linux-2.6.17.4.orig/drivers/cdrom/cdrom.c
++++ linux-2.6.17.4/drivers/cdrom/cdrom.c
+@@ -1838,7 +1838,7 @@ static int dvd_read_bca(struct cdrom_dev
+ init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
+ cgc.cmd[0] = GPCMD_READ_DVD_STRUCTURE;
+ cgc.cmd[7] = s->type;
+- cgc.cmd[9] = cgc.buflen = 0xff;
++ cgc.cmd[9] = cgc.buflen & 0xff;
+
+ if ((ret = cdo->generic_packet(cdi, &cgc)))
+ return ret;
--- /dev/null
+From stable-bounces@linux.kernel.org Fri Jun 30 19:10:22 2006
+Date: Fri, 23 Jun 2006 06:10:02 GMT
+Message-Id: <200606230610.k5N6A2PQ015237@hera.kernel.org>
+From: Linux Kernel Mailing List <linux-kernel@vger.kernel.org>
+To: git-commits-head@vger.kernel.org
+Cc:
+Subject: [CPUFREQ] Fix ondemand vs suspend deadlock
+
+From: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
+
+[CPUFREQ] Fix ondemand vs suspend deadlock
+
+Rootcaused the bug to a deadlock in cpufreq and ondemand. Due to non-existent
+ordering between cpu_hotplug lock and dbs_mutex. Basically a race condition
+between cpu_down() and do_dbs_timer().
+
+cpu_down() flow:
+* cpu_down() call for CPU 1
+* Takes hot plug lock
+* Calls pre down notifier
+* cpufreq notifier handler calls cpufreq_driver_target() which takes
+ cpu_hotplug lock again. OK as cpu_hotplug lock is recursive in same
+ process context
+* CPU 1 goes down
+* Calls post down notifier
+* cpufreq notifier handler calls ondemand event stop which takes dbs_mutex
+
+So, cpu_hotplug lock is taken before dbs_mutex in this flow.
+
+do_dbs_timer is triggerred by a periodic timer event.
+It first takes dbs_mutex and then takes cpu_hotplug lock in
+cpufreq_driver_target().
+Note the reverse order here compared to above. So, if this timer event happens
+at right moment during cpu_down, system will deadlok.
+
+Attached patch fixes the issue for both ondemand and conservative.
+
+Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
+Signed-off-by: Dave Jones <davej@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/cpufreq/cpufreq_conservative.c | 12 ++++++++++++
+ drivers/cpufreq/cpufreq_ondemand.c | 12 ++++++++++++
+ 2 files changed, 24 insertions(+)
+
+--- linux-2.6.17.4.orig/drivers/cpufreq/cpufreq_conservative.c
++++ linux-2.6.17.4/drivers/cpufreq/cpufreq_conservative.c
+@@ -72,6 +72,14 @@ static DEFINE_PER_CPU(struct cpu_dbs_inf
+
+ static unsigned int dbs_enable; /* number of CPUs using this policy */
+
++/*
++ * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
++ * lock and dbs_mutex. cpu_hotplug lock should always be held before
++ * dbs_mutex. If any function that can potentially take cpu_hotplug lock
++ * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
++ * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
++ * is recursive for the same process. -Venki
++ */
+ static DEFINE_MUTEX (dbs_mutex);
+ static DECLARE_WORK (dbs_work, do_dbs_timer, NULL);
+
+@@ -414,12 +422,14 @@ static void dbs_check_cpu(int cpu)
+ static void do_dbs_timer(void *data)
+ {
+ int i;
++ lock_cpu_hotplug();
+ mutex_lock(&dbs_mutex);
+ for_each_online_cpu(i)
+ dbs_check_cpu(i);
+ schedule_delayed_work(&dbs_work,
+ usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
+ mutex_unlock(&dbs_mutex);
++ unlock_cpu_hotplug();
+ }
+
+ static inline void dbs_timer_init(void)
+@@ -514,6 +524,7 @@ static int cpufreq_governor_dbs(struct c
+ break;
+
+ case CPUFREQ_GOV_LIMITS:
++ lock_cpu_hotplug();
+ mutex_lock(&dbs_mutex);
+ if (policy->max < this_dbs_info->cur_policy->cur)
+ __cpufreq_driver_target(
+@@ -524,6 +535,7 @@ static int cpufreq_governor_dbs(struct c
+ this_dbs_info->cur_policy,
+ policy->min, CPUFREQ_RELATION_L);
+ mutex_unlock(&dbs_mutex);
++ unlock_cpu_hotplug();
+ break;
+ }
+ return 0;
+--- linux-2.6.17.4.orig/drivers/cpufreq/cpufreq_ondemand.c
++++ linux-2.6.17.4/drivers/cpufreq/cpufreq_ondemand.c
+@@ -71,6 +71,14 @@ static DEFINE_PER_CPU(struct cpu_dbs_inf
+
+ static unsigned int dbs_enable; /* number of CPUs using this policy */
+
++/*
++ * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
++ * lock and dbs_mutex. cpu_hotplug lock should always be held before
++ * dbs_mutex. If any function that can potentially take cpu_hotplug lock
++ * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
++ * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
++ * is recursive for the same process. -Venki
++ */
+ static DEFINE_MUTEX (dbs_mutex);
+ static DECLARE_WORK (dbs_work, do_dbs_timer, NULL);
+
+@@ -363,12 +371,14 @@ static void dbs_check_cpu(int cpu)
+ static void do_dbs_timer(void *data)
+ {
+ int i;
++ lock_cpu_hotplug();
+ mutex_lock(&dbs_mutex);
+ for_each_online_cpu(i)
+ dbs_check_cpu(i);
+ queue_delayed_work(dbs_workq, &dbs_work,
+ usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
+ mutex_unlock(&dbs_mutex);
++ unlock_cpu_hotplug();
+ }
+
+ static inline void dbs_timer_init(void)
+@@ -469,6 +479,7 @@ static int cpufreq_governor_dbs(struct c
+ break;
+
+ case CPUFREQ_GOV_LIMITS:
++ lock_cpu_hotplug();
+ mutex_lock(&dbs_mutex);
+ if (policy->max < this_dbs_info->cur_policy->cur)
+ __cpufreq_driver_target(
+@@ -479,6 +490,7 @@ static int cpufreq_governor_dbs(struct c
+ this_dbs_info->cur_policy,
+ policy->min, CPUFREQ_RELATION_L);
+ mutex_unlock(&dbs_mutex);
++ unlock_cpu_hotplug();
+ break;
+ }
+ return 0;
--- /dev/null
+From stable-bounces@linux.kernel.org Fri Jun 30 19:10:14 2006
+Date: Fri, 23 Jun 2006 06:10:01 GMT
+Message-Id: <200606230610.k5N6A1Ea015198@hera.kernel.org>
+From: Linux Kernel Mailing List <linux-kernel@vger.kernel.org>
+Cc:
+Subject: [CPUFREQ] Fix powernow-k8 SMP kernel on UP hardware bug.
+
+From: Randy Dunlap <randy.dunlap@oracle.com>
+
+[CPUFREQ] Fix powernow-k8 SMP kernel on UP hardware bug.
+
+Fix powernow-k8 doesn't load bug.
+Reference: https://launchpad.net/distros/ubuntu/+source/linux-source-2.6.15/+bug/35145
+
+Signed-off-by: Ben Collins <bcollins@ubuntu.com>
+Signed-off-by: Dave Jones <davej@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+
+---
+ arch/i386/kernel/cpu/cpufreq/powernow-k8.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- linux-2.6.17.4.orig/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
++++ linux-2.6.17.4/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
+@@ -1008,7 +1008,7 @@ static int __cpuinit powernowk8_cpu_init
+ * an UP version, and is deprecated by AMD.
+ */
+
+- if ((num_online_cpus() != 1) || (num_possible_cpus() != 1)) {
++ if (num_online_cpus() != 1) {
+ printk(KERN_ERR PFX "MP systems not supported by PSB BIOS structure\n");
+ kfree(data);
+ return -ENODEV;
--- /dev/null
+From stable-bounces@linux.kernel.org Fri Jun 30 19:09:48 2006
+Date: Tue, 20 Jun 2006 03:11:32 GMT
+Message-Id: <200606200311.k5K3BWre007315@hera.kernel.org>
+Cc:
+Subject: [stable] [CPUFREQ] Make powernow-k7 work on SMP kernels.
+
+From: Dave Jones <davej@redhat.com>
+
+[CPUFREQ] Make powernow-k7 work on SMP kernels.
+Even though powernow-k7 doesn't work in SMP environments,
+it can work on an SMP configured kernel if there's only
+one CPU present, however recalibrate_cpu_khz was returning
+-EINVAL on such kernels, so we failed to init the cpufreq driver.
+
+Signed-off-by: Dave Jones <davej@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+
+---
+ arch/i386/kernel/cpu/cpufreq/powernow-k7.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+--- linux-2.6.17.4.orig/arch/i386/kernel/cpu/cpufreq/powernow-k7.c
++++ linux-2.6.17.4/arch/i386/kernel/cpu/cpufreq/powernow-k7.c
+@@ -581,10 +581,7 @@ static int __init powernow_cpu_init (str
+
+ rdmsrl (MSR_K7_FID_VID_STATUS, fidvidstatus.val);
+
+- /* recalibrate cpu_khz */
+- result = recalibrate_cpu_khz();
+- if (result)
+- return result;
++ recalibrate_cpu_khz();
+
+ fsb = (10 * cpu_khz) / fid_codes[fidvidstatus.bits.CFID];
+ if (!fsb) {