--- /dev/null
+From 02a8f01b5a9f396d0327977af4c232d0f94c45fd Mon Sep 17 00:00:00 2001
+From: Justin TerAvest <teravest@google.com>
+Date: Wed, 9 Feb 2011 14:20:03 +0100
+Subject: cfq-iosched: Don't wait if queue already has requests.
+
+From: Justin TerAvest <teravest@google.com>
+
+commit 02a8f01b5a9f396d0327977af4c232d0f94c45fd upstream.
+
+Commit 7667aa0630407bc07dc38dcc79d29cc0a65553c1 added logic to wait for
+the last queue of the group to become busy (have at least one request),
+so that the group does not lose out for not being continuously
+backlogged. The commit did not check for the condition that the last
+queue already has some requests. As a result, if the queue already has
+requests, wait_busy is set. Later on, cfq_select_queue() checks the
+flag, and decides that since the queue has a request now and wait_busy
+is set, the queue is expired. This results in early expiration of the
+queue.
+
+This patch fixes the problem by adding a check to see if queue already
+has requests. If it does, wait_busy is not set. As a result, time slices
+do not expire early.
+
+The queues with more than one request are usually buffered writers.
+Testing shows improvement in isolation between buffered writers.
+
+Signed-off-by: Justin TerAvest <teravest@google.com>
+Reviewed-by: Gui Jianfeng <guijianfeng@cn.fujitsu.com>
+Acked-by: Vivek Goyal <vgoyal@redhat.com>
+Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ block/cfq-iosched.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/block/cfq-iosched.c
++++ b/block/cfq-iosched.c
+@@ -3412,6 +3412,10 @@ static bool cfq_should_wait_busy(struct
+ {
+ struct cfq_io_context *cic = cfqd->active_cic;
+
++ /* If the queue already has requests, don't wait */
++ if (!RB_EMPTY_ROOT(&cfqq->sort_list))
++ return false;
++
+ /* If there are other queues in the group, don't wait */
+ if (cfqq->cfqg->nr_cfqq > 1)
+ return false;
--- /dev/null
+From 0781b909b5586f4db720b5d1838b78f9d8e42f14 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <eric.dumazet@gmail.com>
+Date: Tue, 1 Feb 2011 15:52:35 -0800
+Subject: epoll: epoll_wait() should not use timespec_add_ns()
+
+From: Eric Dumazet <eric.dumazet@gmail.com>
+
+commit 0781b909b5586f4db720b5d1838b78f9d8e42f14 upstream.
+
+commit 95aac7b1cd224f ("epoll: make epoll_wait() use the hrtimer range
+feature") added a performance regression because it uses timespec_add_ns()
+with potential very large 'ns' values.
+
+[akpm@linux-foundation.org: s/epoll_set_mstimeout/ep_set_mstimeout/, per Davide]
+Reported-by: Simon Kirby <sim@hostway.ca>
+Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
+Cc: Shawn Bohrer <shawn.bohrer@gmail.com>
+Acked-by: Davide Libenzi <davidel@xmailserver.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/eventpoll.c | 16 +++++++++++++---
+ 1 file changed, 13 insertions(+), 3 deletions(-)
+
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -1111,6 +1111,17 @@ static int ep_send_events(struct eventpo
+ return ep_scan_ready_list(ep, ep_send_events_proc, &esed);
+ }
+
++static inline struct timespec ep_set_mstimeout(long ms)
++{
++ struct timespec now, ts = {
++ .tv_sec = ms / MSEC_PER_SEC,
++ .tv_nsec = NSEC_PER_MSEC * (ms % MSEC_PER_SEC),
++ };
++
++ ktime_get_ts(&now);
++ return timespec_add_safe(now, ts);
++}
++
+ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
+ int maxevents, long timeout)
+ {
+@@ -1118,12 +1129,11 @@ static int ep_poll(struct eventpoll *ep,
+ unsigned long flags;
+ long slack;
+ wait_queue_t wait;
+- struct timespec end_time;
+ ktime_t expires, *to = NULL;
+
+ if (timeout > 0) {
+- ktime_get_ts(&end_time);
+- timespec_add_ns(&end_time, (u64)timeout * NSEC_PER_MSEC);
++ struct timespec end_time = ep_set_mstimeout(timeout);
++
+ slack = select_estimate_accuracy(&end_time);
+ to = &expires;
+ *to = timespec_to_ktime(end_time);
--- /dev/null
+From 6044565af458e7fa6e748bff437ecc49dea88d79 Mon Sep 17 00:00:00 2001
+From: Stefan Richter <stefanr@s5r6.in-berlin.de>
+Date: Sat, 15 Jan 2011 18:19:48 +0100
+Subject: firewire: core: fix unstable I/O with Canon camcorder
+
+From: Stefan Richter <stefanr@s5r6.in-berlin.de>
+
+commit 6044565af458e7fa6e748bff437ecc49dea88d79 upstream.
+
+Regression since commit 10389536742c, "firewire: core: check for 1394a
+compliant IRM, fix inaccessibility of Sony camcorder":
+
+The camcorder Canon MV5i generates lots of bus resets when asynchronous
+requests are sent to it (e.g. Config ROM read requests or FCP Command
+write requests) if the camcorder is not root node. This causes drop-
+outs in videos or makes the camcorder entirely inaccessible.
+https://bugzilla.redhat.com/show_bug.cgi?id=633260
+
+Fix this by allowing any Canon device, even if it is a pre-1394a IRM
+like MV5i are, to remain root node (if it is at least Cycle Master
+capable). With the FireWire controller cards that I tested, MV5i always
+becomes root node when plugged in and left to its own devices.
+
+Reported-by: Ralf Lange
+Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/firewire/core-card.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+--- a/drivers/firewire/core-card.c
++++ b/drivers/firewire/core-card.c
+@@ -75,6 +75,8 @@ static size_t config_rom_length = 1 + 4
+ #define BIB_IRMC ((1) << 31)
+ #define NODE_CAPABILITIES 0x0c0083c0 /* per IEEE 1394 clause 8.3.2.6.5.2 */
+
++#define CANON_OUI 0x000085
++
+ static void generate_config_rom(struct fw_card *card, __be32 *config_rom)
+ {
+ struct fw_descriptor *desc;
+@@ -284,6 +286,7 @@ static void bm_work(struct work_struct *
+ bool root_device_is_running;
+ bool root_device_is_cmc;
+ bool irm_is_1394_1995_only;
++ bool keep_this_irm;
+
+ spin_lock_irq(&card->lock);
+
+@@ -305,6 +308,10 @@ static void bm_work(struct work_struct *
+ irm_is_1394_1995_only = irm_device && irm_device->config_rom &&
+ (irm_device->config_rom[2] & 0x000000f0) == 0;
+
++ /* Canon MV5i works unreliably if it is not root node. */
++ keep_this_irm = irm_device && irm_device->config_rom &&
++ irm_device->config_rom[3] >> 8 == CANON_OUI;
++
+ root_id = root_node->node_id;
+ irm_id = card->irm_node->node_id;
+ local_id = card->local_node->node_id;
+@@ -333,7 +340,7 @@ static void bm_work(struct work_struct *
+ goto pick_me;
+ }
+
+- if (irm_is_1394_1995_only) {
++ if (irm_is_1394_1995_only && !keep_this_irm) {
+ new_root_id = local_id;
+ fw_notify("%s, making local node (%02x) root.\n",
+ "IRM is not 1394a compliant", new_root_id);
+@@ -382,7 +389,7 @@ static void bm_work(struct work_struct *
+
+ spin_lock_irq(&card->lock);
+
+- if (rcode != RCODE_COMPLETE) {
++ if (rcode != RCODE_COMPLETE && !keep_this_irm) {
+ /*
+ * The lock request failed, maybe the IRM
+ * isn't really IRM capable after all. Let's
--- /dev/null
+From 91f78f36694b8748fda855b1f9e3614b027a744f Mon Sep 17 00:00:00 2001
+From: Ken Mills <ken.k.mills@intel.com>
+Date: Tue, 25 Jan 2011 14:17:45 +0000
+Subject: n_gsm: copy mtu over when configuring via ioctl interface
+
+From: Ken Mills <ken.k.mills@intel.com>
+
+commit 91f78f36694b8748fda855b1f9e3614b027a744f upstream.
+
+This field is settable but did not get copied.
+
+Signed-off-by: Ken Mills <ken.k.mills@intel.com>
+Signed-off-by: Alan Cox <alan@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/tty/n_gsm.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/tty/n_gsm.c
++++ b/drivers/tty/n_gsm.c
+@@ -2375,6 +2375,7 @@ static int gsmld_config(struct tty_struc
+
+ gsm->initiator = c->initiator;
+ gsm->mru = c->mru;
++ gsm->mtu = c->mtu;
+ gsm->encoding = c->encapsulation;
+ gsm->adaption = c->adaption;
+ gsm->n2 = c->n2;
--- /dev/null
+From b2e0861e51f2961954330dcafe6d148ee3ab5cff Mon Sep 17 00:00:00 2001
+From: Timur Tabi <timur@freescale.com>
+Date: Fri, 3 Dec 2010 10:52:14 -0600
+Subject: powerpc/85xx: fix compatible properties of the P1022DS DMA nodes used for audio
+
+From: Timur Tabi <timur@freescale.com>
+
+commit b2e0861e51f2961954330dcafe6d148ee3ab5cff upstream.
+
+In order to prevent the fsl_dma driver from claiming the DMA channels that the
+P1022DS audio driver needs, the compatible properties for those nodes must say
+"fsl,ssi-dma-channel" instead of "fsl,eloplus-dma-channel".
+
+Signed-off-by: Timur Tabi <timur@freescale.com>
+Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/powerpc/boot/dts/p1022ds.dts | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/boot/dts/p1022ds.dts
++++ b/arch/powerpc/boot/dts/p1022ds.dts
+@@ -291,13 +291,13 @@
+ ranges = <0x0 0xc100 0x200>;
+ cell-index = <1>;
+ dma00: dma-channel@0 {
+- compatible = "fsl,eloplus-dma-channel";
++ compatible = "fsl,ssi-dma-channel";
+ reg = <0x0 0x80>;
+ cell-index = <0>;
+ interrupts = <76 2>;
+ };
+ dma01: dma-channel@80 {
+- compatible = "fsl,eloplus-dma-channel";
++ compatible = "fsl,ssi-dma-channel";
+ reg = <0x80 0x80>;
+ cell-index = <1>;
+ interrupts = <77 2>;
--- /dev/null
+From 57cdfdf829a850a317425ed93c6a576c9ee6329c Mon Sep 17 00:00:00 2001
+From: Anton Blanchard <anton@samba.org>
+Date: Thu, 21 Oct 2010 00:52:12 +0000
+Subject: powerpc: Fix hcall tracepoint recursion
+
+From: Anton Blanchard <anton@samba.org>
+
+commit 57cdfdf829a850a317425ed93c6a576c9ee6329c upstream.
+
+Spinlocks on shared processor partitions use H_YIELD to notify the
+hypervisor we are waiting on another virtual CPU. Unfortunately this means
+the hcall tracepoints can recurse.
+
+The patch below adds a percpu depth and checks it on both the entry and
+exit hcall tracepoints.
+
+Signed-off-by: Anton Blanchard <anton@samba.org>
+Acked-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/powerpc/platforms/pseries/lpar.c | 37 ++++++++++++++++++++++++++++++++++
+ 1 file changed, 37 insertions(+)
+
+--- a/arch/powerpc/platforms/pseries/lpar.c
++++ b/arch/powerpc/platforms/pseries/lpar.c
+@@ -701,6 +701,13 @@ EXPORT_SYMBOL(arch_free_page);
+ /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
+ extern long hcall_tracepoint_refcount;
+
++/*
++ * Since the tracing code might execute hcalls we need to guard against
++ * recursion. One example of this are spinlocks calling H_YIELD on
++ * shared processor partitions.
++ */
++static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);
++
+ void hcall_tracepoint_regfunc(void)
+ {
+ hcall_tracepoint_refcount++;
+@@ -713,12 +720,42 @@ void hcall_tracepoint_unregfunc(void)
+
+ void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
+ {
++ unsigned long flags;
++ unsigned int *depth;
++
++ local_irq_save(flags);
++
++ depth = &__get_cpu_var(hcall_trace_depth);
++
++ if (*depth)
++ goto out;
++
++ (*depth)++;
+ trace_hcall_entry(opcode, args);
++ (*depth)--;
++
++out:
++ local_irq_restore(flags);
+ }
+
+ void __trace_hcall_exit(long opcode, unsigned long retval,
+ unsigned long *retbuf)
+ {
++ unsigned long flags;
++ unsigned int *depth;
++
++ local_irq_save(flags);
++
++ depth = &__get_cpu_var(hcall_trace_depth);
++
++ if (*depth)
++ goto out;
++
++ (*depth)++;
+ trace_hcall_exit(opcode, retval, retbuf);
++ (*depth)--;
++
++out:
++ local_irq_restore(flags);
+ }
+ #endif
--- /dev/null
+From 1f1936ff3febf38d582177ea319eaa278f32c91f Mon Sep 17 00:00:00 2001
+From: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Date: Thu, 20 Jan 2011 20:35:23 +0000
+Subject: powerpc: Fix some 6xx/7xxx CPU setup functions
+
+From: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+
+commit 1f1936ff3febf38d582177ea319eaa278f32c91f upstream.
+
+Some of those functions try to adjust the CPU features, for example
+to remove NAP support on some revisions. However, they seem to use
+r5 as an index into the CPU table entry, which might have been right
+a long time ago but no longer is. r4 is the right register to use.
+
+This probably caused some off behaviours on some PowerMac variants
+using 750cx or 7455 processor revisions.
+
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/powerpc/kernel/cpu_setup_6xx.S | 40 ++++++++++++++++++------------------
+ 1 file changed, 20 insertions(+), 20 deletions(-)
+
+--- a/arch/powerpc/kernel/cpu_setup_6xx.S
++++ b/arch/powerpc/kernel/cpu_setup_6xx.S
+@@ -18,7 +18,7 @@
+ #include <asm/mmu.h>
+
+ _GLOBAL(__setup_cpu_603)
+- mflr r4
++ mflr r5
+ BEGIN_MMU_FTR_SECTION
+ li r10,0
+ mtspr SPRN_SPRG_603_LRU,r10 /* init SW LRU tracking */
+@@ -27,60 +27,60 @@ BEGIN_FTR_SECTION
+ bl __init_fpu_registers
+ END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE)
+ bl setup_common_caches
+- mtlr r4
++ mtlr r5
+ blr
+ _GLOBAL(__setup_cpu_604)
+- mflr r4
++ mflr r5
+ bl setup_common_caches
+ bl setup_604_hid0
+- mtlr r4
++ mtlr r5
+ blr
+ _GLOBAL(__setup_cpu_750)
+- mflr r4
++ mflr r5
+ bl __init_fpu_registers
+ bl setup_common_caches
+ bl setup_750_7400_hid0
+- mtlr r4
++ mtlr r5
+ blr
+ _GLOBAL(__setup_cpu_750cx)
+- mflr r4
++ mflr r5
+ bl __init_fpu_registers
+ bl setup_common_caches
+ bl setup_750_7400_hid0
+ bl setup_750cx
+- mtlr r4
++ mtlr r5
+ blr
+ _GLOBAL(__setup_cpu_750fx)
+- mflr r4
++ mflr r5
+ bl __init_fpu_registers
+ bl setup_common_caches
+ bl setup_750_7400_hid0
+ bl setup_750fx
+- mtlr r4
++ mtlr r5
+ blr
+ _GLOBAL(__setup_cpu_7400)
+- mflr r4
++ mflr r5
+ bl __init_fpu_registers
+ bl setup_7400_workarounds
+ bl setup_common_caches
+ bl setup_750_7400_hid0
+- mtlr r4
++ mtlr r5
+ blr
+ _GLOBAL(__setup_cpu_7410)
+- mflr r4
++ mflr r5
+ bl __init_fpu_registers
+ bl setup_7410_workarounds
+ bl setup_common_caches
+ bl setup_750_7400_hid0
+ li r3,0
+ mtspr SPRN_L2CR2,r3
+- mtlr r4
++ mtlr r5
+ blr
+ _GLOBAL(__setup_cpu_745x)
+- mflr r4
++ mflr r5
+ bl setup_common_caches
+ bl setup_745x_specifics
+- mtlr r4
++ mtlr r5
+ blr
+
+ /* Enable caches for 603's, 604, 750 & 7400 */
+@@ -194,10 +194,10 @@ setup_750cx:
+ cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
+ cror 4*cr0+eq,4*cr0+eq,4*cr2+eq
+ bnelr
+- lwz r6,CPU_SPEC_FEATURES(r5)
++ lwz r6,CPU_SPEC_FEATURES(r4)
+ li r7,CPU_FTR_CAN_NAP
+ andc r6,r6,r7
+- stw r6,CPU_SPEC_FEATURES(r5)
++ stw r6,CPU_SPEC_FEATURES(r4)
+ blr
+
+ /* 750fx specific
+@@ -225,12 +225,12 @@ BEGIN_FTR_SECTION
+ andis. r11,r11,L3CR_L3E@h
+ beq 1f
+ END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
+- lwz r6,CPU_SPEC_FEATURES(r5)
++ lwz r6,CPU_SPEC_FEATURES(r4)
+ andi. r0,r6,CPU_FTR_L3_DISABLE_NAP
+ beq 1f
+ li r7,CPU_FTR_CAN_NAP
+ andc r6,r6,r7
+- stw r6,CPU_SPEC_FEATURES(r5)
++ stw r6,CPU_SPEC_FEATURES(r4)
+ 1:
+ mfspr r11,SPRN_HID0
+
--- /dev/null
+From 429f4d8d20b91e4a6c239f951c06a56a6ac22957 Mon Sep 17 00:00:00 2001
+From: Anton Blanchard <anton@samba.org>
+Date: Sat, 29 Jan 2011 12:37:16 +0000
+Subject: powerpc/numa: Fix bug in unmap_cpu_from_node
+
+From: Anton Blanchard <anton@samba.org>
+
+commit 429f4d8d20b91e4a6c239f951c06a56a6ac22957 upstream.
+
+When converting to the new cpumask code I screwed up:
+
+- if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) {
+- cpu_clear(cpu, numa_cpumask_lookup_table[node]);
++ if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
++ cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
+
+This was introduced in commit 25863de07af9 (powerpc/cpumask: Convert NUMA code
+to new cpumask API)
+
+Fix it.
+
+Signed-off-by: Anton Blanchard <anton@samba.org>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/powerpc/mm/numa.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/powerpc/mm/numa.c
++++ b/arch/powerpc/mm/numa.c
+@@ -181,7 +181,7 @@ static void unmap_cpu_from_node(unsigned
+ dbg("removing cpu %lu from node %d\n", cpu, node);
+
+ if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
+- cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
++ cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
+ } else {
+ printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
+ cpu, node);
--- /dev/null
+From 068c5cc5ac7414a8e9eb7856b4bf3cc4d4744267 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Wed, 19 Jan 2011 12:26:11 +0100
+Subject: sched, cgroup: Use exit hook to avoid use-after-free crash
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 068c5cc5ac7414a8e9eb7856b4bf3cc4d4744267 upstream.
+
+By not notifying the controller of the on-exit move back to
+init_css_set, we fail to move the task out of the previous
+cgroup's cfs_rq. This leads to an opportunity for a
+cgroup-destroy to come in and free the cgroup (there are no
+active tasks left in it after all) to which the not-quite dead
+task is still enqueued.
+
+Reported-by: Miklos Vajna <vmiklos@frugalware.org>
+Fixed-by: Mike Galbraith <efault@gmx.de>
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: Mike Galbraith <efault@gmx.de>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+LKML-Reference: <1293206353.29444.205.camel@laptop>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/sched.c | 18 ++++++++++++++++++
+ 1 file changed, 18 insertions(+)
+
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -607,6 +607,9 @@ static inline struct task_group *task_gr
+ {
+ struct cgroup_subsys_state *css;
+
++ if (p->flags & PF_EXITING)
++ return &root_task_group;
++
+ css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
+ lockdep_is_held(&task_rq(p)->lock));
+ return container_of(css, struct task_group, css);
+@@ -9178,6 +9181,20 @@ cpu_cgroup_attach(struct cgroup_subsys *
+ }
+ }
+
++static void
++cpu_cgroup_exit(struct cgroup_subsys *ss, struct task_struct *task)
++{
++ /*
++ * cgroup_exit() is called in the copy_process() failure path.
++ * Ignore this case since the task hasn't ran yet, this avoids
++ * trying to poke a half freed task state from generic code.
++ */
++ if (!(task->flags & PF_EXITING))
++ return;
++
++ sched_move_task(task);
++}
++
+ #ifdef CONFIG_FAIR_GROUP_SCHED
+ static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
+ u64 shareval)
+@@ -9250,6 +9267,7 @@ struct cgroup_subsys cpu_cgroup_subsys =
+ .destroy = cpu_cgroup_destroy,
+ .can_attach = cpu_cgroup_can_attach,
+ .attach = cpu_cgroup_attach,
++ .exit = cpu_cgroup_exit,
+ .populate = cpu_cgroup_populate,
+ .subsys_id = cpu_cgroup_subsys_id,
+ .early_init = 1,
--- /dev/null
+From 6bf4123760a5aece6e4829ce90b70b6ffd751d65 Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.de>
+Date: Wed, 5 Jan 2011 12:50:16 +1100
+Subject: sched: Change wait_for_completion_*_timeout() to return a signed long
+
+From: NeilBrown <neilb@suse.de>
+
+commit 6bf4123760a5aece6e4829ce90b70b6ffd751d65 upstream.
+
+wait_for_completion_*_timeout() can return:
+
+ 0: if the wait timed out
+ -ve: if the wait was interrupted
+ +ve: if the completion was completed.
+
+As they currently return an 'unsigned long', the last two cases
+are not easily distinguished which can easily result in buggy
+code, as is the case for the recently added
+wait_for_completion_interruptible_timeout() call in
+net/sunrpc/cache.c
+
+So change them both to return 'long'. As MAX_SCHEDULE_TIMEOUT
+is LONG_MAX, a large +ve return value should never overflow.
+
+Signed-off-by: NeilBrown <neilb@suse.de>
+Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: J. Bruce Fields <bfields@fieldses.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+LKML-Reference: <20110105125016.64ccab0e@notabene.brown>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/linux/completion.h | 8 ++++----
+ kernel/sched.c | 4 ++--
+ 2 files changed, 6 insertions(+), 6 deletions(-)
+
+--- a/include/linux/completion.h
++++ b/include/linux/completion.h
+@@ -81,10 +81,10 @@ extern int wait_for_completion_interrupt
+ extern int wait_for_completion_killable(struct completion *x);
+ extern unsigned long wait_for_completion_timeout(struct completion *x,
+ unsigned long timeout);
+-extern unsigned long wait_for_completion_interruptible_timeout(
+- struct completion *x, unsigned long timeout);
+-extern unsigned long wait_for_completion_killable_timeout(
+- struct completion *x, unsigned long timeout);
++extern long wait_for_completion_interruptible_timeout(
++ struct completion *x, unsigned long timeout);
++extern long wait_for_completion_killable_timeout(
++ struct completion *x, unsigned long timeout);
+ extern bool try_wait_for_completion(struct completion *x);
+ extern bool completion_done(struct completion *x);
+
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -4526,7 +4526,7 @@ EXPORT_SYMBOL(wait_for_completion_interr
+ * This waits for either a completion of a specific task to be signaled or for a
+ * specified timeout to expire. It is interruptible. The timeout is in jiffies.
+ */
+-unsigned long __sched
++long __sched
+ wait_for_completion_interruptible_timeout(struct completion *x,
+ unsigned long timeout)
+ {
+@@ -4559,7 +4559,7 @@ EXPORT_SYMBOL(wait_for_completion_killab
+ * signaled or for a specified timeout to expire. It can be
+ * interrupted by a kill signal. The timeout is in jiffies.
+ */
+-unsigned long __sched
++long __sched
+ wait_for_completion_killable_timeout(struct completion *x,
+ unsigned long timeout)
+ {
--- /dev/null
+From 06c3bc655697b19521901f9254eb0bbb2c67e7e8 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Date: Wed, 2 Feb 2011 13:19:48 +0100
+Subject: sched: Fix update_curr_rt()
+
+From: Peter Zijlstra <a.p.zijlstra@chello.nl>
+
+commit 06c3bc655697b19521901f9254eb0bbb2c67e7e8 upstream.
+
+cpu_stopper_thread()
+ migration_cpu_stop()
+ __migrate_task()
+ deactivate_task()
+ dequeue_task()
+ dequeue_task_rq()
+ update_curr_rt()
+
+Will call update_curr_rt() on rq->curr, which at that time is
+rq->stop. The problem is that rq->stop.prio matches an RT prio and
+thus falsely assumes its a rt_sched_class task.
+
+Reported-Debuged-Tested-Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+LKML-Reference: <new-submission>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/sched_rt.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/sched_rt.c
++++ b/kernel/sched_rt.c
+@@ -606,7 +606,7 @@ static void update_curr_rt(struct rq *rq
+ struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
+ u64 delta_exec;
+
+- if (!task_has_rt_policy(curr))
++ if (curr->sched_class != &rt_sched_class)
+ return;
+
+ delta_exec = rq->clock_task - curr->se.exec_start;
xen-platform-use-pci-interfaces-to-request-io-and-mem-resources.patch
mm-page-allocator-adjust-the-per-cpu-counter-threshold-when-memory-is-low.patch
klist-fix-object-alignment-on-64-bit.patch
+epoll-epoll_wait-should-not-use-timespec_add_ns.patch
+sched-change-wait_for_completion_-_timeout-to-return-a-signed-long.patch
+sched-cgroup-use-exit-hook-to-avoid-use-after-free-crash.patch
+sched-fix-update_curr_rt.patch
+cfq-iosched-don-t-wait-if-queue-already-has-requests.patch
+powerpc-85xx-fix-compatible-properties-of-the-p1022ds-dma-nodes-used-for-audio.patch
+powerpc-fix-hcall-tracepoint-recursion.patch
+powerpc-numa-fix-bug-in-unmap_cpu_from_node.patch
+powerpc-fix-some-6xx-7xxx-cpu-setup-functions.patch
+n_gsm-copy-mtu-over-when-configuring-via-ioctl-interface.patch
+firewire-core-fix-unstable-i-o-with-canon-camcorder.patch
+workqueue-relax-lockdep-annotation-on-flush_work.patch
--- /dev/null
+From e159489baa717dbae70f9903770a6a4990865887 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Sun, 9 Jan 2011 23:32:15 +0100
+Subject: workqueue: relax lockdep annotation on flush_work()
+
+From: Tejun Heo <tj@kernel.org>
+
+commit e159489baa717dbae70f9903770a6a4990865887 upstream.
+
+Currently, the lockdep annotation in flush_work() requires exclusive
+access on the workqueue the target work is queued on and triggers
+warning if a work is trying to flush another work on the same
+workqueue; however, this is no longer true as workqueues can now
+execute multiple works concurrently.
+
+This patch adds lock_map_acquire_read() and make process_one_work()
+hold read access to the workqueue while executing a work and
+start_flush_work() check for write access if concurrnecy level is one
+or the workqueue has a rescuer (as only one execution resource - the
+rescuer - is guaranteed to be available under memory pressure), and
+read access if higher.
+
+This better represents what's going on and removes spurious lockdep
+warnings which are triggered by fake dependency chain created through
+flush_work().
+
+* Peter pointed out that flushing another work from a WQ_MEM_RECLAIM
+ wq breaks forward progress guarantee under memory pressure.
+ Condition check accordingly updated.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Reported-by: "Rafael J. Wysocki" <rjw@sisk.pl>
+Tested-by: "Rafael J. Wysocki" <rjw@sisk.pl>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/linux/lockdep.h | 3 +++
+ kernel/workqueue.c | 14 ++++++++++++--
+ 2 files changed, 15 insertions(+), 2 deletions(-)
+
+--- a/include/linux/lockdep.h
++++ b/include/linux/lockdep.h
+@@ -522,12 +522,15 @@ static inline void print_irqtrace_events
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ # ifdef CONFIG_PROVE_LOCKING
+ # define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_)
++# define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 2, NULL, _THIS_IP_)
+ # else
+ # define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_)
++# define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 1, NULL, _THIS_IP_)
+ # endif
+ # define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
+ #else
+ # define lock_map_acquire(l) do { } while (0)
++# define lock_map_acquire_read(l) do { } while (0)
+ # define lock_map_release(l) do { } while (0)
+ #endif
+
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -1806,7 +1806,7 @@ __acquires(&gcwq->lock)
+ spin_unlock_irq(&gcwq->lock);
+
+ work_clear_pending(work);
+- lock_map_acquire(&cwq->wq->lockdep_map);
++ lock_map_acquire_read(&cwq->wq->lockdep_map);
+ lock_map_acquire(&lockdep_map);
+ trace_workqueue_execute_start(work);
+ f(work);
+@@ -2350,8 +2350,18 @@ static bool start_flush_work(struct work
+ insert_wq_barrier(cwq, barr, work, worker);
+ spin_unlock_irq(&gcwq->lock);
+
+- lock_map_acquire(&cwq->wq->lockdep_map);
++ /*
++ * If @max_active is 1 or rescuer is in use, flushing another work
++ * item on the same workqueue may lead to deadlock. Make sure the
++ * flusher is not running on the same workqueue by verifying write
++ * access.
++ */
++ if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER)
++ lock_map_acquire(&cwq->wq->lockdep_map);
++ else
++ lock_map_acquire_read(&cwq->wq->lockdep_map);
+ lock_map_release(&cwq->wq->lockdep_map);
++
+ return true;
+ already_gone:
+ spin_unlock_irq(&gcwq->lock);