]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
more .26 patches
authorGreg Kroah-Hartman <gregkh@suse.de>
Wed, 1 Oct 2008 23:39:29 +0000 (16:39 -0700)
committerGreg Kroah-Hartman <gregkh@suse.de>
Wed, 1 Oct 2008 23:39:29 +0000 (16:39 -0700)
queue-2.6.26/apic-routing-fix.patch [new file with mode: 0644]
queue-2.6.26/block-submit_bh-inadvertently-discards-barrier-flag-on-a-sync-write.patch [new file with mode: 0644]
queue-2.6.26/ocfs2-increment-the-reference-count-of-an-already-active-stack.patch [new file with mode: 0644]
queue-2.6.26/rt2x00-use-ieee80211_hw-workqueue-again.patch [new file with mode: 0644]
queue-2.6.26/sata_nv-disable-hardreset-for-generic.patch [new file with mode: 0644]
queue-2.6.26/sched-fix-process-time-monotonicity.patch [new file with mode: 0644]
queue-2.6.26/series
queue-2.6.26/sg-disable-interrupts-inside-sg_copy_buffer.patch [new file with mode: 0644]
queue-2.6.26/x86-fix-27-rc-crash-on-vsmp-due-to-paravirt-during-module-load.patch [new file with mode: 0644]

diff --git a/queue-2.6.26/apic-routing-fix.patch b/queue-2.6.26/apic-routing-fix.patch
new file mode 100644 (file)
index 0000000..840cfa0
--- /dev/null
@@ -0,0 +1,155 @@
+From tglx@linutronix.de  Wed Oct  1 16:32:20 2008
+From: Yinghai Lu <yhlu.kernel@gmail.com>
+Date: Fri, 12 Sep 2008 13:08:18 +0200 (CEST)
+Subject: APIC routing fix
+To: Stable Team <stable@kernel.org>
+Cc: Yinghai Lu <yhlu.kernel@gmail.com>
+Message-ID: <alpine.LFD.1.10.0809121304400.13819@apollo.tec.linutronix.de>
+
+
+From: Yinghai Lu <yhlu.kernel@gmail.com>
+
+commit e0da33646826b66ef933d47ea2fb7a693fd849bf upstream
+
+x86: introduce max_physical_apicid for bigsmp switching
+
+a multi-socket test-system with 3 or 4 ioapics, when 4 dualcore cpus or
+2 quadcore cpus installed, needs to switch to bigsmp or physflat.
+
+CPU apic id is [4,11] instead of [0,7], and we need to check max apic
+id instead of cpu numbers.
+
+also add check for 32 bit when acpi is not compiled in or acpi=off.
+
+Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/apic_32.c      |    5 ++++-
+ arch/x86/kernel/apic_64.c      |    3 +++
+ arch/x86/kernel/genapic_64.c   |    2 +-
+ arch/x86/kernel/mpparse.c      |    5 +++++
+ arch/x86/kernel/setup.c        |    1 +
+ arch/x86/kernel/setup_32.c     |   11 ++++++-----
+ arch/x86/mach-generic/bigsmp.c |    2 +-
+ include/asm-x86/mpspec.h       |    1 +
+ 8 files changed, 22 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/kernel/apic_32.c
++++ b/arch/x86/kernel/apic_32.c
+@@ -1536,6 +1536,9 @@ void __cpuinit generic_processor_info(in
+                */
+               cpu = 0;
++      if (apicid > max_physical_apicid)
++              max_physical_apicid = apicid;
++
+       /*
+        * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y
+        * but we need to work other dependencies like SMP_SUSPEND etc
+@@ -1543,7 +1546,7 @@ void __cpuinit generic_processor_info(in
+        * if (CPU_HOTPLUG_ENABLED || num_processors > 8)
+        *       - Ashok Raj <ashok.raj@intel.com>
+        */
+-      if (num_processors > 8) {
++      if (max_physical_apicid >= 8) {
+               switch (boot_cpu_data.x86_vendor) {
+               case X86_VENDOR_INTEL:
+                       if (!APIC_XAPIC(version)) {
+--- a/arch/x86/kernel/apic_64.c
++++ b/arch/x86/kernel/apic_64.c
+@@ -1090,6 +1090,9 @@ void __cpuinit generic_processor_info(in
+                */
+               cpu = 0;
+       }
++      if (apicid > max_physical_apicid)
++              max_physical_apicid = apicid;
++
+       /* are we being called early in kernel startup? */
+       if (x86_cpu_to_apicid_early_ptr) {
+               u16 *cpu_to_apicid = x86_cpu_to_apicid_early_ptr;
+--- a/arch/x86/kernel/genapic_64.c
++++ b/arch/x86/kernel/genapic_64.c
+@@ -51,7 +51,7 @@ void __init setup_apic_routing(void)
+       else
+ #endif
+-      if (num_possible_cpus() <= 8)
++      if (max_physical_apicid < 8)
+               genapic = &apic_flat;
+       else
+               genapic = &apic_physflat;
+--- a/arch/x86/kernel/mpparse.c
++++ b/arch/x86/kernel/mpparse.c
+@@ -402,6 +402,11 @@ static int __init smp_read_mpc(struct mp
+               ++mpc_record;
+ #endif
+       }
++
++#ifdef CONFIG_X86_GENERICARCH
++       generic_bigsmp_probe();
++#endif
++
+       setup_apic_routing();
+       if (!num_processors)
+               printk(KERN_ERR "MPTABLE: no processors registered!\n");
+--- a/arch/x86/kernel/setup_32.c
++++ b/arch/x86/kernel/setup_32.c
+@@ -914,6 +914,12 @@ void __init setup_arch(char **cmdline_p)
+ #ifdef CONFIG_ACPI
+       acpi_boot_init();
++#endif
++
++#ifdef CONFIG_X86_LOCAL_APIC
++      if (smp_found_config)
++              get_smp_config();
++#endif
+ #if defined(CONFIG_SMP) && defined(CONFIG_X86_PC)
+       if (def_to_bigsmp)
+@@ -921,11 +927,6 @@ void __init setup_arch(char **cmdline_p)
+                       "CONFIG_X86_PC cannot handle it.\nUse "
+                       "CONFIG_X86_GENERICARCH or CONFIG_X86_BIGSMP.\n");
+ #endif
+-#endif
+-#ifdef CONFIG_X86_LOCAL_APIC
+-      if (smp_found_config)
+-              get_smp_config();
+-#endif
+       e820_register_memory();
+       e820_mark_nosave_regions();
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -17,6 +17,7 @@ unsigned int num_processors;
+ unsigned disabled_cpus __cpuinitdata;
+ /* Processor that is doing the boot up */
+ unsigned int boot_cpu_physical_apicid = -1U;
++unsigned int max_physical_apicid;
+ EXPORT_SYMBOL(boot_cpu_physical_apicid);
+ DEFINE_PER_CPU(u16, x86_cpu_to_apicid) = BAD_APICID;
+--- a/arch/x86/mach-generic/bigsmp.c
++++ b/arch/x86/mach-generic/bigsmp.c
+@@ -48,7 +48,7 @@ static const struct dmi_system_id bigsmp
+ static int probe_bigsmp(void)
+ {
+       if (def_to_bigsmp)
+-      dmi_bigsmp = 1;
++              dmi_bigsmp = 1;
+       else
+               dmi_check_system(bigsmp_dmi_table);
+       return dmi_bigsmp;
+--- a/include/asm-x86/mpspec.h
++++ b/include/asm-x86/mpspec.h
+@@ -35,6 +35,7 @@ extern DECLARE_BITMAP(mp_bus_not_pci, MA
+ extern int mp_bus_id_to_pci_bus[MAX_MP_BUSSES];
+ extern unsigned int boot_cpu_physical_apicid;
++extern unsigned int max_physical_apicid;
+ extern int smp_found_config;
+ extern int mpc_default_type;
+ extern unsigned long mp_lapic_addr;
diff --git a/queue-2.6.26/block-submit_bh-inadvertently-discards-barrier-flag-on-a-sync-write.patch b/queue-2.6.26/block-submit_bh-inadvertently-discards-barrier-flag-on-a-sync-write.patch
new file mode 100644 (file)
index 0000000..49c873b
--- /dev/null
@@ -0,0 +1,50 @@
+From cebbert@redhat.com  Wed Oct  1 16:16:38 2008
+From: Jens Axboe <jens.axboe@oracle.com>
+Date: Wed, 3 Sep 2008 19:49:10 -0400
+Subject: block: submit_bh() inadvertently discards barrier flag on a sync write
+To: stable@kernel.org
+Cc: Jens Axboe <jens.axboe@oracle.com>
+Message-ID: <20080903194910.585bc39c@redhat.com>
+
+
+From: Jens Axboe <jens.axboe@oracle.com>
+
+commit 48fd4f93a00eac844678629f2f00518e146ed30d upstream
+
+Reported by Milan Broz <mbroz@redhat.com>, commit 18ce3751 inadvertently
+made submit_bh() discard the barrier bit for a WRITE_SYNC request. Fix
+that up.
+
+Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
+Cc: Chuck Ebbert <cebbert@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/buffer.c |   13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+--- a/fs/buffer.c
++++ b/fs/buffer.c
+@@ -2868,14 +2868,17 @@ int submit_bh(int rw, struct buffer_head
+       BUG_ON(!buffer_mapped(bh));
+       BUG_ON(!bh->b_end_io);
+-      if (buffer_ordered(bh) && (rw == WRITE))
+-              rw = WRITE_BARRIER;
++      /*
++       * Mask in barrier bit for a write (could be either a WRITE or a
++       * WRITE_SYNC
++       */
++      if (buffer_ordered(bh) && (rw & WRITE))
++              rw |= WRITE_BARRIER;
+       /*
+-       * Only clear out a write error when rewriting, should this
+-       * include WRITE_SYNC as well?
++       * Only clear out a write error when rewriting
+        */
+-      if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
++      if (test_set_buffer_req(bh) && (rw & WRITE))
+               clear_buffer_write_io_error(bh);
+       /*
diff --git a/queue-2.6.26/ocfs2-increment-the-reference-count-of-an-already-active-stack.patch b/queue-2.6.26/ocfs2-increment-the-reference-count-of-an-already-active-stack.patch
new file mode 100644 (file)
index 0000000..5fab97f
--- /dev/null
@@ -0,0 +1,46 @@
+From mfasheh@suse.com  Wed Oct  1 16:34:58 2008
+From: Joel Becker <Joel.Becker@oracle.com>
+Date: Wed, 10 Sep 2008 06:27:07 -0700
+Subject: ocfs2: Increment the reference count of an already-active stack.
+To: stable@kernel.org
+Cc: Joel Becker <Joel.Becker@oracle.com>
+Message-ID: <20080910132707.GG4563@wotan.suse.de>
+Content-Disposition: inline
+
+From: Joel Becker <Joel.Becker@oracle.com>
+
+commit d6817cdbd143f87f9d7c59a4c3194091190eeb84 upstream
+
+The ocfs2_stack_driver_request() function failed to increment the
+refcount of an already-active stack.  It only did the increment on the
+first reference.  Whoops.
+
+Signed-off-by: Joel Becker <joel.becker@oracle.com>
+Tested-by: Marcos Matsunaga <marcos.matsunaga@oracle.com>
+Signed-off-by: Mark Fasheh <mfasheh@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/ocfs2/stackglue.c |    7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/fs/ocfs2/stackglue.c
++++ b/fs/ocfs2/stackglue.c
+@@ -97,13 +97,14 @@ static int ocfs2_stack_driver_request(co
+               goto out;
+       }
+-      /* Ok, the stack is pinned */
+-      p->sp_count++;
+       active_stack = p;
+-
+       rc = 0;
+ out:
++      /* If we found it, pin it */
++      if (!rc)
++              active_stack->sp_count++;
++
+       spin_unlock(&ocfs2_stack_lock);
+       return rc;
+ }
diff --git a/queue-2.6.26/rt2x00-use-ieee80211_hw-workqueue-again.patch b/queue-2.6.26/rt2x00-use-ieee80211_hw-workqueue-again.patch
new file mode 100644 (file)
index 0000000..b38e7f1
--- /dev/null
@@ -0,0 +1,113 @@
+From 8e260c22238dd8b57aefb1f5e4bd114486a9c17d Mon Sep 17 00:00:00 2001
+From: Ivo van Doorn <ivdoorn@gmail.com>
+Date: Fri, 4 Jul 2008 13:41:31 +0200
+Subject: rt2x00: Use ieee80211_hw->workqueue again
+
+commit 8e260c22238dd8b57aefb1f5e4bd114486a9c17d upstream
+
+Remove the rt2x00 singlethreaded workqueue and move
+the link tuner and packet filter scheduled work to
+the ieee80211_hw->workqueue again.
+The only exception is the interface scheduled work
+handler which uses the mac80211 interface iterator
+under the RTNL lock. This work needs to be handled
+on the kernel workqueue to prevent lockdep issues.
+
+Signed-off-by: Ivo van Doorn <IvDoorn@gmail.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/wireless/rt2x00/rt2x00.h    |    4 +++-
+ drivers/net/wireless/rt2x00/rt2x00dev.c |   17 +++--------------
+ drivers/net/wireless/rt2x00/rt2x00mac.c |    4 ++--
+ 3 files changed, 8 insertions(+), 17 deletions(-)
+
+--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
++++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
+@@ -75,7 +75,7 @@ static void rt2x00lib_start_link_tuner(s
+       rt2x00lib_reset_link_tuner(rt2x00dev);
+-      queue_delayed_work(rt2x00dev->workqueue,
++      queue_delayed_work(rt2x00dev->hw->workqueue,
+                          &rt2x00dev->link.work, LINK_TUNE_INTERVAL);
+ }
+@@ -390,7 +390,7 @@ static void rt2x00lib_link_tuner(struct 
+        * Increase tuner counter, and reschedule the next link tuner run.
+        */
+       rt2x00dev->link.count++;
+-      queue_delayed_work(rt2x00dev->workqueue,
++      queue_delayed_work(rt2x00dev->hw->workqueue,
+                          &rt2x00dev->link.work, LINK_TUNE_INTERVAL);
+ }
+@@ -488,7 +488,7 @@ void rt2x00lib_beacondone(struct rt2x00_
+                                                  rt2x00lib_beacondone_iter,
+                                                  rt2x00dev);
+-      queue_work(rt2x00dev->workqueue, &rt2x00dev->intf_work);
++      schedule_work(&rt2x00dev->intf_work);
+ }
+ EXPORT_SYMBOL_GPL(rt2x00lib_beacondone);
+@@ -1131,10 +1131,6 @@ int rt2x00lib_probe_dev(struct rt2x00_de
+       /*
+        * Initialize configuration work.
+        */
+-      rt2x00dev->workqueue = create_singlethread_workqueue("rt2x00lib");
+-      if (!rt2x00dev->workqueue)
+-              goto exit;
+-
+       INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled);
+       INIT_WORK(&rt2x00dev->filter_work, rt2x00lib_packetfilter_scheduled);
+       INIT_DELAYED_WORK(&rt2x00dev->link.work, rt2x00lib_link_tuner);
+@@ -1195,13 +1191,6 @@ void rt2x00lib_remove_dev(struct rt2x00_
+       rt2x00leds_unregister(rt2x00dev);
+       /*
+-       * Stop all queued work. Note that most tasks will already be halted
+-       * during rt2x00lib_disable_radio() and rt2x00lib_uninitialize().
+-       */
+-      flush_workqueue(rt2x00dev->workqueue);
+-      destroy_workqueue(rt2x00dev->workqueue);
+-
+-      /*
+        * Free ieee80211_hw memory.
+        */
+       rt2x00lib_remove_hw(rt2x00dev);
+--- a/drivers/net/wireless/rt2x00/rt2x00.h
++++ b/drivers/net/wireless/rt2x00/rt2x00.h
+@@ -820,8 +820,10 @@ struct rt2x00_dev {
+       /*
+        * Scheduled work.
++       * NOTE: intf_work will use ieee80211_iterate_active_interfaces()
++       * which means it cannot be placed on the hw->workqueue
++       * due to RTNL locking requirements.
+        */
+-      struct workqueue_struct *workqueue;
+       struct work_struct intf_work;
+       struct work_struct filter_work;
+--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
++++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
+@@ -428,7 +428,7 @@ void rt2x00mac_configure_filter(struct i
+       if (!test_bit(DRIVER_REQUIRE_SCHEDULED, &rt2x00dev->flags))
+               rt2x00dev->ops->lib->config_filter(rt2x00dev, *total_flags);
+       else
+-              queue_work(rt2x00dev->workqueue, &rt2x00dev->filter_work);
++              queue_work(rt2x00dev->hw->workqueue, &rt2x00dev->filter_work);
+ }
+ EXPORT_SYMBOL_GPL(rt2x00mac_configure_filter);
+@@ -509,7 +509,7 @@ void rt2x00mac_bss_info_changed(struct i
+       memcpy(&intf->conf, bss_conf, sizeof(*bss_conf));
+       if (delayed) {
+               intf->delayed_flags |= delayed;
+-              queue_work(rt2x00dev->workqueue, &rt2x00dev->intf_work);
++              schedule_work(&rt2x00dev->intf_work);
+       }
+       spin_unlock(&intf->lock);
+ }
diff --git a/queue-2.6.26/sata_nv-disable-hardreset-for-generic.patch b/queue-2.6.26/sata_nv-disable-hardreset-for-generic.patch
new file mode 100644 (file)
index 0000000..c60f335
--- /dev/null
@@ -0,0 +1,74 @@
+From 2fd673ecf0378ddeeeb87b3605e50212e0c0ddc6 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Fri, 29 Aug 2008 16:13:12 +0200
+Subject: sata_nv: disable hardreset for generic
+
+From: Tejun Heo <tj@kernel.org>
+
+commit 2fd673ecf0378ddeeeb87b3605e50212e0c0ddc6 upstream
+
+of them being unifying probing, hotplug and EH reset paths uniform.
+Previously, broken hardreset could go unnoticed as it wasn't used
+during probing but when something goes wrong or after hotplug the
+problem will surface and bite hard.
+
+OSDL bug 11195 reports that sata_nv generic flavor falls into this
+category.  Hardreset itself succeeds but PHY stays offline after
+hardreset.  I tried longer debounce timing but the result was the
+same.
+
+  http://bugzilla.kernel.org/show_bug.cgi?id=11195
+
+So, it seems we'll have to drop hardreset from the generic flavor.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Cc: Peer Chen <pchen@nvidia.com>
+Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/ata/sata_nv.c |   19 +------------------
+ 1 file changed, 1 insertion(+), 18 deletions(-)
+
+--- a/drivers/ata/sata_nv.c
++++ b/drivers/ata/sata_nv.c
+@@ -309,8 +309,6 @@ static void nv_nf2_freeze(struct ata_por
+ static void nv_nf2_thaw(struct ata_port *ap);
+ static void nv_ck804_freeze(struct ata_port *ap);
+ static void nv_ck804_thaw(struct ata_port *ap);
+-static int nv_hardreset(struct ata_link *link, unsigned int *class,
+-                      unsigned long deadline);
+ static int nv_adma_slave_config(struct scsi_device *sdev);
+ static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
+ static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
+@@ -407,7 +405,7 @@ static struct scsi_host_template nv_swnc
+ static struct ata_port_operations nv_generic_ops = {
+       .inherits               = &ata_bmdma_port_ops,
+-      .hardreset              = nv_hardreset,
++      .hardreset              = ATA_OP_NULL,
+       .scr_read               = nv_scr_read,
+       .scr_write              = nv_scr_write,
+ };
+@@ -1588,21 +1586,6 @@ static void nv_mcp55_thaw(struct ata_por
+       ata_sff_thaw(ap);
+ }
+-static int nv_hardreset(struct ata_link *link, unsigned int *class,
+-                      unsigned long deadline)
+-{
+-      int rc;
+-
+-      /* SATA hardreset fails to retrieve proper device signature on
+-       * some controllers.  Request follow up SRST.  For more info,
+-       * see http://bugzilla.kernel.org/show_bug.cgi?id=3352
+-       */
+-      rc = sata_sff_hardreset(link, class, deadline);
+-      if (rc)
+-              return rc;
+-      return -EAGAIN;
+-}
+-
+ static void nv_adma_error_handler(struct ata_port *ap)
+ {
+       struct nv_adma_port_priv *pp = ap->private_data;
diff --git a/queue-2.6.26/sched-fix-process-time-monotonicity.patch b/queue-2.6.26/sched-fix-process-time-monotonicity.patch
new file mode 100644 (file)
index 0000000..05e144d
--- /dev/null
@@ -0,0 +1,198 @@
+From 49048622eae698e5c4ae61f7e71200f265ccc529 Mon Sep 17 00:00:00 2001
+From: Balbir Singh <balbir@linux.vnet.ibm.com>
+Date: Fri, 5 Sep 2008 18:12:23 +0200
+Subject: sched: fix process time monotonicity
+
+From: Balbir Singh <balbir@linux.vnet.ibm.com>
+
+commit 49048622eae698e5c4ae61f7e71200f265ccc529 upstream
+
+Spencer reported a problem where utime and stime were going negative despite
+the fixes in commit b27f03d4bdc145a09fb7b0c0e004b29f1ee555fa. The suspected
+reason for the problem is that signal_struct maintains it's own utime and
+stime (of exited tasks), these are not updated using the new task_utime()
+routine, hence sig->utime can go backwards and cause the same problem
+to occur (sig->utime, adds tsk->utime and not task_utime()). This patch
+fixes the problem
+
+TODO: using max(task->prev_utime, derived utime) works for now, but a more
+generic solution is to implement cputime_max() and use the cputime_gt()
+function for comparison.
+
+Reported-by: spencer@bluehost.com
+Signed-off-by: Balbir Singh <balbir@linux.vnet.ibm.com>
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/proc/array.c       |   59 --------------------------------------------------
+ include/linux/sched.h |    4 +++
+ kernel/exit.c         |    6 ++---
+ kernel/sched.c        |   59 ++++++++++++++++++++++++++++++++++++++++++++++++++
+ 4 files changed, 66 insertions(+), 62 deletions(-)
+
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -332,65 +332,6 @@ int proc_pid_status(struct seq_file *m, 
+       return 0;
+ }
+-/*
+- * Use precise platform statistics if available:
+- */
+-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+-static cputime_t task_utime(struct task_struct *p)
+-{
+-      return p->utime;
+-}
+-
+-static cputime_t task_stime(struct task_struct *p)
+-{
+-      return p->stime;
+-}
+-#else
+-static cputime_t task_utime(struct task_struct *p)
+-{
+-      clock_t utime = cputime_to_clock_t(p->utime),
+-              total = utime + cputime_to_clock_t(p->stime);
+-      u64 temp;
+-
+-      /*
+-       * Use CFS's precise accounting:
+-       */
+-      temp = (u64)nsec_to_clock_t(p->se.sum_exec_runtime);
+-
+-      if (total) {
+-              temp *= utime;
+-              do_div(temp, total);
+-      }
+-      utime = (clock_t)temp;
+-
+-      p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime));
+-      return p->prev_utime;
+-}
+-
+-static cputime_t task_stime(struct task_struct *p)
+-{
+-      clock_t stime;
+-
+-      /*
+-       * Use CFS's precise accounting. (we subtract utime from
+-       * the total, to make sure the total observed by userspace
+-       * grows monotonically - apps rely on that):
+-       */
+-      stime = nsec_to_clock_t(p->se.sum_exec_runtime) -
+-                      cputime_to_clock_t(task_utime(p));
+-
+-      if (stime >= 0)
+-              p->prev_stime = max(p->prev_stime, clock_t_to_cputime(stime));
+-
+-      return p->prev_stime;
+-}
+-#endif
+-
+-static cputime_t task_gtime(struct task_struct *p)
+-{
+-      return p->gtime;
+-}
+-
+ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+                       struct pid *pid, struct task_struct *task, int whole)
+ {
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1477,6 +1477,10 @@ static inline void put_task_struct(struc
+               __put_task_struct(t);
+ }
++extern cputime_t task_utime(struct task_struct *p);
++extern cputime_t task_stime(struct task_struct *p);
++extern cputime_t task_gtime(struct task_struct *p);
++
+ /*
+  * Per process flags
+  */
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -111,9 +111,9 @@ static void __exit_signal(struct task_st
+                * We won't ever get here for the group leader, since it
+                * will have been the last reference on the signal_struct.
+                */
+-              sig->utime = cputime_add(sig->utime, tsk->utime);
+-              sig->stime = cputime_add(sig->stime, tsk->stime);
+-              sig->gtime = cputime_add(sig->gtime, tsk->gtime);
++              sig->utime = cputime_add(sig->utime, task_utime(tsk));
++              sig->stime = cputime_add(sig->stime, task_stime(tsk));
++              sig->gtime = cputime_add(sig->gtime, task_gtime(tsk));
+               sig->min_flt += tsk->min_flt;
+               sig->maj_flt += tsk->maj_flt;
+               sig->nvcsw += tsk->nvcsw;
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -3995,6 +3995,65 @@ void account_steal_time(struct task_stru
+ }
+ /*
++ * Use precise platform statistics if available:
++ */
++#ifdef CONFIG_VIRT_CPU_ACCOUNTING
++cputime_t task_utime(struct task_struct *p)
++{
++      return p->utime;
++}
++
++cputime_t task_stime(struct task_struct *p)
++{
++      return p->stime;
++}
++#else
++cputime_t task_utime(struct task_struct *p)
++{
++      clock_t utime = cputime_to_clock_t(p->utime),
++              total = utime + cputime_to_clock_t(p->stime);
++      u64 temp;
++
++      /*
++       * Use CFS's precise accounting:
++       */
++      temp = (u64)nsec_to_clock_t(p->se.sum_exec_runtime);
++
++      if (total) {
++              temp *= utime;
++              do_div(temp, total);
++      }
++      utime = (clock_t)temp;
++
++      p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime));
++      return p->prev_utime;
++}
++
++cputime_t task_stime(struct task_struct *p)
++{
++      clock_t stime;
++
++      /*
++       * Use CFS's precise accounting. (we subtract utime from
++       * the total, to make sure the total observed by userspace
++       * grows monotonically - apps rely on that):
++       */
++      stime = nsec_to_clock_t(p->se.sum_exec_runtime) -
++                      cputime_to_clock_t(task_utime(p));
++
++      if (stime >= 0)
++              p->prev_stime = max(p->prev_stime, clock_t_to_cputime(stime));
++
++      return p->prev_stime;
++}
++#endif
++
++inline cputime_t task_gtime(struct task_struct *p)
++{
++      return p->gtime;
++}
++
++/*
+  * This function gets called by the timer code, with HZ frequency.
+  * We call it with interrupts disabled.
+  *
index d2427c37b2d328708310097b5d88ae943f43eda9..2f57a2e2b1275a35a51ecec410d31093b6c36d6e 100644 (file)
@@ -19,3 +19,11 @@ kvm-svm-fix-guest-global-tlb-flushes-with-npt.patch
 x86-64-clean-up-save-restore-i387-usage.patch
 x64-fpu-fix-possible-fpu-leakage-in-error-conditions.patch
 x86-fix-broken-ldt-access-in-vmi.patch
+block-submit_bh-inadvertently-discards-barrier-flag-on-a-sync-write.patch
+sata_nv-disable-hardreset-for-generic.patch
+sched-fix-process-time-monotonicity.patch
+apic-routing-fix.patch
+ocfs2-increment-the-reference-count-of-an-already-active-stack.patch
+sg-disable-interrupts-inside-sg_copy_buffer.patch
+x86-fix-27-rc-crash-on-vsmp-due-to-paravirt-during-module-load.patch
+rt2x00-use-ieee80211_hw-workqueue-again.patch
diff --git a/queue-2.6.26/sg-disable-interrupts-inside-sg_copy_buffer.patch b/queue-2.6.26/sg-disable-interrupts-inside-sg_copy_buffer.patch
new file mode 100644 (file)
index 0000000..ffe96b4
--- /dev/null
@@ -0,0 +1,54 @@
+From fujita.tomonori@lab.ntt.co.jp  Wed Oct  1 16:36:18 2008
+From: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+Date: Sat, 13 Sep 2008 01:16:45 +0900
+Subject: sg: disable interrupts inside sg_copy_buffer
+To: stable@kernel.org
+Cc: James.Bottomley@HansenPartnership.com, jens.axboe@oracle.com
+Message-ID: <20080913011645M.fujita.tomonori@lab.ntt.co.jp>
+
+From: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+
+This is the backport of the upstream commit 50bed2e2862a8f3a4f7d683d0d27292e71ef18b9
+
+The callers of sg_copy_buffer must disable interrupts before calling
+it (since it uses kmap_atomic). Some callers use it on
+interrupt-disabled code but some need to take the trouble to disable
+interrupts just for this. No wonder they forget about it and we hit a
+bug like:
+
+http://bugzilla.kernel.org/show_bug.cgi?id=11529
+
+James said that it might be better to disable interrupts inside the
+function rather than risk the callers getting it wrong.
+
+Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
+Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ lib/scatterlist.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/lib/scatterlist.c
++++ b/lib/scatterlist.c
+@@ -312,8 +312,9 @@ static size_t sg_copy_buffer(struct scat
+       struct scatterlist *sg;
+       size_t buf_off = 0;
+       int i;
++      unsigned long flags;
+-      WARN_ON(!irqs_disabled());
++      local_irq_save(flags);
+       for_each_sg(sgl, sg, nents, i) {
+               struct page *page;
+@@ -358,6 +359,8 @@ static size_t sg_copy_buffer(struct scat
+                       break;
+       }
++      local_irq_restore(flags);
++
+       return buf_off;
+ }
diff --git a/queue-2.6.26/x86-fix-27-rc-crash-on-vsmp-due-to-paravirt-during-module-load.patch b/queue-2.6.26/x86-fix-27-rc-crash-on-vsmp-due-to-paravirt-during-module-load.patch
new file mode 100644 (file)
index 0000000..078f05a
--- /dev/null
@@ -0,0 +1,39 @@
+From kiran@scalex86.org  Wed Oct  1 16:37:31 2008
+From: Ravikiran Thirumalai <kiran@scalex86.org>
+Date: Tue, 23 Sep 2008 11:03:50 -0700
+Subject: x86: Fix 27-rc crash on vsmp due to paravirt during module load
+To: Ingo Molnar <mingo@elte.hu>, stable@kernel.org
+Cc: Glauber de Oliveira Costa <glommer@gmail.com>, linux-kernel@vger.kernel.org
+Message-ID: <20080923180350.GB26882@localdomain>
+Content-Disposition: inline
+
+From: Ravikiran Thirumalai <kiran@scalex86.org>
+
+commit 05e12e1c4c09cd35ac9f4e6af1e42b0036375d72 upstream.
+
+vsmp_patch has been marked with __init ever since pvops, however,
+apply_paravirt can be called during module load causing calls to
+freed memory location.
+
+Since apply_paravirt can only be called during bootup and module load,
+mark vsmp patch with "__init_or_module"
+
+Signed-off-by: Ravikiran Thirumalai <kiran@scalex86.org>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/vsmp_64.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/vsmp_64.c
++++ b/arch/x86/kernel/vsmp_64.c
+@@ -58,7 +58,7 @@ static void vsmp_irq_enable(void)
+       native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
+ }
+-static unsigned __init vsmp_patch(u8 type, u16 clobbers, void *ibuf,
++static unsigned __init_or_module vsmp_patch(u8 type, u16 clobbers, void *ibuf,
+                                 unsigned long addr, unsigned len)
+ {
+       switch (type) {