--- /dev/null
+From d50235b7bc3ee0a0427984d763ea7534149531b4 Mon Sep 17 00:00:00 2001
+From: Jianpeng Ma <majianpeng@gmail.com>
+Date: Wed, 3 Jul 2013 13:25:24 +0200
+Subject: elevator: Fix a race in elevator switching
+
+From: Jianpeng Ma <majianpeng@gmail.com>
+
+commit d50235b7bc3ee0a0427984d763ea7534149531b4 upstream.
+
+There's a race between elevator switching and normal io operation.
+ Because the allocation of struct elevator_queue and struct elevator_data
+ don't in a atomic operation.So there are have chance to use NULL
+ ->elevator_data.
+ For example:
+ Thread A: Thread B
+ blk_queu_bio elevator_switch
+ spin_lock_irq(q->queue_block) elevator_alloc
+ elv_merge elevator_init_fn
+
+ Because call elevator_alloc, it can't hold queue_lock and the
+ ->elevator_data is NULL.So at the same time, threadA call elv_merge and
+ nedd some info of elevator_data.So the crash happened.
+
+ Move the elevator_alloc into func elevator_init_fn, it make the
+ operations in a atomic operation.
+
+ Using the follow method can easy reproduce this bug
+ 1:dd if=/dev/sdb of=/dev/null
+ 2:while true;do echo noop > scheduler;echo deadline > scheduler;done
+
+ The test method also use this method.
+
+Signed-off-by: Jianpeng Ma <majianpeng@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Cc: Jonghwan Choi <jhbird.choi@samsung.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/cfq-iosched.c | 17 ++++++++++++++---
+ block/deadline-iosched.c | 16 +++++++++++++---
+ block/elevator.c | 25 +++++--------------------
+ block/noop-iosched.c | 17 ++++++++++++++---
+ include/linux/elevator.h | 6 +++++-
+ 5 files changed, 51 insertions(+), 30 deletions(-)
+
+--- a/block/cfq-iosched.c
++++ b/block/cfq-iosched.c
+@@ -4347,18 +4347,28 @@ static void cfq_exit_queue(struct elevat
+ kfree(cfqd);
+ }
+
+-static int cfq_init_queue(struct request_queue *q)
++static int cfq_init_queue(struct request_queue *q, struct elevator_type *e)
+ {
+ struct cfq_data *cfqd;
+ struct blkcg_gq *blkg __maybe_unused;
+ int i, ret;
++ struct elevator_queue *eq;
++
++ eq = elevator_alloc(q, e);
++ if (!eq)
++ return -ENOMEM;
+
+ cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
+- if (!cfqd)
++ if (!cfqd) {
++ kobject_put(&eq->kobj);
+ return -ENOMEM;
++ }
++ eq->elevator_data = cfqd;
+
+ cfqd->queue = q;
+- q->elevator->elevator_data = cfqd;
++ spin_lock_irq(q->queue_lock);
++ q->elevator = eq;
++ spin_unlock_irq(q->queue_lock);
+
+ /* Init root service tree */
+ cfqd->grp_service_tree = CFQ_RB_ROOT;
+@@ -4433,6 +4443,7 @@ static int cfq_init_queue(struct request
+
+ out_free:
+ kfree(cfqd);
++ kobject_put(&eq->kobj);
+ return ret;
+ }
+
+--- a/block/deadline-iosched.c
++++ b/block/deadline-iosched.c
+@@ -337,13 +337,21 @@ static void deadline_exit_queue(struct e
+ /*
+ * initialize elevator private data (deadline_data).
+ */
+-static int deadline_init_queue(struct request_queue *q)
++static int deadline_init_queue(struct request_queue *q, struct elevator_type *e)
+ {
+ struct deadline_data *dd;
++ struct elevator_queue *eq;
++
++ eq = elevator_alloc(q, e);
++ if (!eq)
++ return -ENOMEM;
+
+ dd = kmalloc_node(sizeof(*dd), GFP_KERNEL | __GFP_ZERO, q->node);
+- if (!dd)
++ if (!dd) {
++ kobject_put(&eq->kobj);
+ return -ENOMEM;
++ }
++ eq->elevator_data = dd;
+
+ INIT_LIST_HEAD(&dd->fifo_list[READ]);
+ INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
+@@ -355,7 +363,9 @@ static int deadline_init_queue(struct re
+ dd->front_merges = 1;
+ dd->fifo_batch = fifo_batch;
+
+- q->elevator->elevator_data = dd;
++ spin_lock_irq(q->queue_lock);
++ q->elevator = eq;
++ spin_unlock_irq(q->queue_lock);
+ return 0;
+ }
+
+--- a/block/elevator.c
++++ b/block/elevator.c
+@@ -150,7 +150,7 @@ void __init load_default_elevator_module
+
+ static struct kobj_type elv_ktype;
+
+-static struct elevator_queue *elevator_alloc(struct request_queue *q,
++struct elevator_queue *elevator_alloc(struct request_queue *q,
+ struct elevator_type *e)
+ {
+ struct elevator_queue *eq;
+@@ -170,6 +170,7 @@ err:
+ elevator_put(e);
+ return NULL;
+ }
++EXPORT_SYMBOL(elevator_alloc);
+
+ static void elevator_release(struct kobject *kobj)
+ {
+@@ -221,16 +222,7 @@ int elevator_init(struct request_queue *
+ }
+ }
+
+- q->elevator = elevator_alloc(q, e);
+- if (!q->elevator)
+- return -ENOMEM;
+-
+- err = e->ops.elevator_init_fn(q);
+- if (err) {
+- kobject_put(&q->elevator->kobj);
+- return err;
+- }
+-
++ err = e->ops.elevator_init_fn(q, e);
+ return 0;
+ }
+ EXPORT_SYMBOL(elevator_init);
+@@ -935,17 +927,10 @@ static int elevator_switch(struct reques
+ spin_unlock_irq(q->queue_lock);
+
+ /* allocate, init and register new elevator */
+- err = -ENOMEM;
+- q->elevator = elevator_alloc(q, new_e);
+- if (!q->elevator)
++ err = new_e->ops.elevator_init_fn(q, new_e);
++ if (err)
+ goto fail_init;
+
+- err = new_e->ops.elevator_init_fn(q);
+- if (err) {
+- kobject_put(&q->elevator->kobj);
+- goto fail_init;
+- }
+-
+ if (registered) {
+ err = elv_register_queue(q);
+ if (err)
+--- a/block/noop-iosched.c
++++ b/block/noop-iosched.c
+@@ -59,16 +59,27 @@ noop_latter_request(struct request_queue
+ return list_entry(rq->queuelist.next, struct request, queuelist);
+ }
+
+-static int noop_init_queue(struct request_queue *q)
++static int noop_init_queue(struct request_queue *q, struct elevator_type *e)
+ {
+ struct noop_data *nd;
++ struct elevator_queue *eq;
++
++ eq = elevator_alloc(q, e);
++ if (!eq)
++ return -ENOMEM;
+
+ nd = kmalloc_node(sizeof(*nd), GFP_KERNEL, q->node);
+- if (!nd)
++ if (!nd) {
++ kobject_put(&eq->kobj);
+ return -ENOMEM;
++ }
++ eq->elevator_data = nd;
+
+ INIT_LIST_HEAD(&nd->queue);
+- q->elevator->elevator_data = nd;
++
++ spin_lock_irq(q->queue_lock);
++ q->elevator = eq;
++ spin_unlock_irq(q->queue_lock);
+ return 0;
+ }
+
+--- a/include/linux/elevator.h
++++ b/include/linux/elevator.h
+@@ -7,6 +7,7 @@
+ #ifdef CONFIG_BLOCK
+
+ struct io_cq;
++struct elevator_type;
+
+ typedef int (elevator_merge_fn) (struct request_queue *, struct request **,
+ struct bio *);
+@@ -35,7 +36,8 @@ typedef void (elevator_put_req_fn) (stru
+ typedef void (elevator_activate_req_fn) (struct request_queue *, struct request *);
+ typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *);
+
+-typedef int (elevator_init_fn) (struct request_queue *);
++typedef int (elevator_init_fn) (struct request_queue *,
++ struct elevator_type *e);
+ typedef void (elevator_exit_fn) (struct elevator_queue *);
+
+ struct elevator_ops
+@@ -155,6 +157,8 @@ extern int elevator_init(struct request_
+ extern void elevator_exit(struct elevator_queue *);
+ extern int elevator_change(struct request_queue *, const char *);
+ extern bool elv_rq_merge_ok(struct request *, struct bio *);
++extern struct elevator_queue *elevator_alloc(struct request_queue *,
++ struct elevator_type *);
+
+ /*
+ * Helper functions.
--- /dev/null
+From 8c8296223f3abb142be8fc31711b18a704c0e7d8 Mon Sep 17 00:00:00 2001
+From: yonghua zheng <younghua.zheng@gmail.com>
+Date: Tue, 13 Aug 2013 16:01:03 -0700
+Subject: fs/proc/task_mmu.c: fix buffer overflow in add_page_map()
+
+From: yonghua zheng <younghua.zheng@gmail.com>
+
+commit 8c8296223f3abb142be8fc31711b18a704c0e7d8 upstream.
+
+Recently we met quite a lot of random kernel panic issues after enabling
+CONFIG_PROC_PAGE_MONITOR. After debuggind we found this has something
+to do with following bug in pagemap:
+
+In struct pagemapread:
+
+ struct pagemapread {
+ int pos, len;
+ pagemap_entry_t *buffer;
+ bool v2;
+ };
+
+pos is number of PM_ENTRY_BYTES in buffer, but len is the size of
+buffer, it is a mistake to compare pos and len in add_page_map() for
+checking buffer is full or not, and this can lead to buffer overflow and
+random kernel panic issue.
+
+Correct len to be total number of PM_ENTRY_BYTES in buffer.
+
+[akpm@linux-foundation.org: document pagemapread.pos and .len units, fix PM_ENTRY_BYTES definition]
+Signed-off-by: Yonghua Zheng <younghua.zheng@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/proc/task_mmu.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -792,14 +792,14 @@ typedef struct {
+ } pagemap_entry_t;
+
+ struct pagemapread {
+- int pos, len;
++ int pos, len; /* units: PM_ENTRY_BYTES, not bytes */
+ pagemap_entry_t *buffer;
+ };
+
+ #define PAGEMAP_WALK_SIZE (PMD_SIZE)
+ #define PAGEMAP_WALK_MASK (PMD_MASK)
+
+-#define PM_ENTRY_BYTES sizeof(u64)
++#define PM_ENTRY_BYTES sizeof(pagemap_entry_t)
+ #define PM_STATUS_BITS 3
+ #define PM_STATUS_OFFSET (64 - PM_STATUS_BITS)
+ #define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
+@@ -1038,8 +1038,8 @@ static ssize_t pagemap_read(struct file
+ if (!count)
+ goto out_task;
+
+- pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
+- pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
++ pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
++ pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
+ ret = -ENOMEM;
+ if (!pm.buffer)
+ goto out_task;
--- /dev/null
+From 3e6b11df245180949938734bc192eaf32f3a06b3 Mon Sep 17 00:00:00 2001
+From: Andrey Vagin <avagin@openvz.org>
+Date: Tue, 13 Aug 2013 16:00:47 -0700
+Subject: memcg: don't initialize kmem-cache destroying work for root caches
+
+From: Andrey Vagin <avagin@openvz.org>
+
+commit 3e6b11df245180949938734bc192eaf32f3a06b3 upstream.
+
+struct memcg_cache_params has a union. Different parts of this union
+are used for root and non-root caches. A part with destroying work is
+used only for non-root caches.
+
+I fixed the same problem in another place v3.9-rc1-16204-gf101a94, but
+didn't notice this one.
+
+This patch fixes the kernel panic:
+
+[ 46.848187] BUG: unable to handle kernel paging request at 000000fffffffeb8
+[ 46.849026] IP: [<ffffffff811a484c>] kmem_cache_destroy_memcg_children+0x6c/0xc0
+[ 46.849092] PGD 0
+[ 46.849092] Oops: 0000 [#1] SMP
+...
+
+Signed-off-by: Andrey Vagin <avagin@openvz.org>
+Cc: Glauber Costa <glommer@openvz.org>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Acked-by: Michal Hocko <mhocko@suse.cz>
+Cc: Balbir Singh <bsingharora@gmail.com>
+Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
+Cc: Konstantin Khlebnikov <khlebnikov@openvz.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/memcontrol.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -3186,11 +3186,11 @@ int memcg_register_cache(struct mem_cgro
+ if (!s->memcg_params)
+ return -ENOMEM;
+
+- INIT_WORK(&s->memcg_params->destroy,
+- kmem_cache_destroy_work_func);
+ if (memcg) {
+ s->memcg_params->memcg = memcg;
+ s->memcg_params->root_cache = root_cache;
++ INIT_WORK(&s->memcg_params->destroy,
++ kmem_cache_destroy_work_func);
+ } else
+ s->memcg_params->is_root_cache = true;
+
--- /dev/null
+From dfa9771a7c4784bafd0673bc7abcee3813088b77 Mon Sep 17 00:00:00 2001
+From: Michal Simek <michal.simek@xilinx.com>
+Date: Tue, 13 Aug 2013 16:00:53 -0700
+Subject: microblaze: fix clone syscall
+
+From: Michal Simek <michal.simek@xilinx.com>
+
+commit dfa9771a7c4784bafd0673bc7abcee3813088b77 upstream.
+
+Fix inadvertent breakage in the clone syscall ABI for Microblaze that
+was introduced in commit f3268edbe6fe ("microblaze: switch to generic
+fork/vfork/clone").
+
+The Microblaze syscall ABI for clone takes the parent tid address in the
+4th argument; the third argument slot is used for the stack size. The
+incorrectly-used CLONE_BACKWARDS type assigned parent tid to the 3rd
+slot.
+
+This commit restores the original ABI so that existing userspace libc
+code will work correctly.
+
+All kernel versions from v3.8-rc1 were affected.
+
+Signed-off-by: Michal Simek <michal.simek@xilinx.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/Kconfig | 6 ++++++
+ arch/microblaze/Kconfig | 2 +-
+ include/linux/syscalls.h | 5 +++++
+ kernel/fork.c | 6 ++++++
+ 4 files changed, 18 insertions(+), 1 deletion(-)
+
+--- a/arch/Kconfig
++++ b/arch/Kconfig
+@@ -404,6 +404,12 @@ config CLONE_BACKWARDS2
+ help
+ Architecture has the first two arguments of clone(2) swapped.
+
++config CLONE_BACKWARDS3
++ bool
++ help
++ Architecture has tls passed as the 3rd argument of clone(2),
++ not the 5th one.
++
+ config ODD_RT_SIGACTION
+ bool
+ help
+--- a/arch/microblaze/Kconfig
++++ b/arch/microblaze/Kconfig
+@@ -28,7 +28,7 @@ config MICROBLAZE
+ select GENERIC_CLOCKEVENTS
+ select GENERIC_IDLE_POLL_SETUP
+ select MODULES_USE_ELF_RELA
+- select CLONE_BACKWARDS
++ select CLONE_BACKWARDS3
+
+ config SWAP
+ def_bool n
+--- a/include/linux/syscalls.h
++++ b/include/linux/syscalls.h
+@@ -802,9 +802,14 @@ asmlinkage long sys_vfork(void);
+ asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, int,
+ int __user *);
+ #else
++#ifdef CONFIG_CLONE_BACKWARDS3
++asmlinkage long sys_clone(unsigned long, unsigned long, int, int __user *,
++ int __user *, int);
++#else
+ asmlinkage long sys_clone(unsigned long, unsigned long, int __user *,
+ int __user *, int);
+ #endif
++#endif
+
+ asmlinkage long sys_execve(const char __user *filename,
+ const char __user *const __user *argv,
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1675,6 +1675,12 @@ SYSCALL_DEFINE5(clone, unsigned long, ne
+ int __user *, parent_tidptr,
+ int __user *, child_tidptr,
+ int, tls_val)
++#elif defined(CONFIG_CLONE_BACKWARDS3)
++SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp,
++ int, stack_size,
++ int __user *, parent_tidptr,
++ int __user *, child_tidptr,
++ int, tls_val)
+ #else
+ SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
+ int __user *, parent_tidptr,
--- /dev/null
+From b88a2595b6d8aedbd275c07dfa784657b4f757eb Mon Sep 17 00:00:00 2001
+From: Stephen Boyd <sboyd@codeaurora.org>
+Date: Wed, 7 Aug 2013 16:18:08 -0700
+Subject: perf/arm: Fix armpmu_map_hw_event()
+
+From: Stephen Boyd <sboyd@codeaurora.org>
+
+commit b88a2595b6d8aedbd275c07dfa784657b4f757eb upstream.
+
+Fix constraint check in armpmu_map_hw_event().
+
+Reported-and-tested-by: Vince Weaver <vincent.weaver@maine.edu>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kernel/perf_event.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/arch/arm/kernel/perf_event.c
++++ b/arch/arm/kernel/perf_event.c
+@@ -53,7 +53,12 @@ armpmu_map_cache_event(const unsigned (*
+ static int
+ armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
+ {
+- int mapping = (*event_map)[config];
++ int mapping;
++
++ if (config >= PERF_COUNT_HW_MAX)
++ return -ENOENT;
++
++ mapping = (*event_map)[config];
+ return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
+ }
+
--- /dev/null
+From c9601247f8f3fdc18aed7ed7e490e8dfcd07f122 Mon Sep 17 00:00:00 2001
+From: Vince Weaver <vincent.weaver@maine.edu>
+Date: Fri, 2 Aug 2013 10:47:34 -0400
+Subject: perf/x86: Fix intel QPI uncore event definitions
+
+From: Vince Weaver <vincent.weaver@maine.edu>
+
+commit c9601247f8f3fdc18aed7ed7e490e8dfcd07f122 upstream.
+
+John McCalpin reports that the "drs_data" and "ncb_data" QPI
+uncore events are missing the "extra bit" and always return zero
+values unless the bit is properly set.
+
+More details from him:
+
+ According to the Xeon E5-2600 Product Family Uncore Performance
+ Monitoring Guide, Table 2-94, about 1/2 of the QPI Link Layer events
+ (including the ones that "perf" calls "drs_data" and "ncb_data") require
+ that the "extra bit" be set.
+
+ This was confusing for a while -- a note at the bottom of page 94 says
+ that the "extra bit" is bit 16 of the control register.
+ Unfortunately, Table 2-86 clearly says that bit 16 is reserved and must
+ be zero. Looking around a bit, I found that bit 21 appears to be the
+ correct "extra bit", and further investigation shows that "perf" actually
+ agrees with me:
+ [root@c560-003.stampede]# cat /sys/bus/event_source/devices/uncore_qpi_0/format/event
+ config:0-7,21
+
+ So the command
+ # perf -e "uncore_qpi_0/event=drs_data/"
+ Is the same as
+ # perf -e "uncore_qpi_0/event=0x02,umask=0x08/"
+ While it should be
+ # perf -e "uncore_qpi_0/event=0x102,umask=0x08/"
+
+ I confirmed that this last version gives results that agree with the
+ amount of data that I expected the STREAM benchmark to move across the QPI
+ link in the second (cross-chip) test of the original script.
+
+Reported-by: John McCalpin <mccalpin@tacc.utexas.edu>
+Signed-off-by: Vince Weaver <vincent.weaver@maine.edu>
+Cc: zheng.z.yan@intel.com
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
+Cc: Paul Mackerras <paulus@samba.org>
+Link: http://lkml.kernel.org/r/alpine.DEB.2.10.1308021037280.26119@vincent-weaver-1.um.maine.edu
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/perf_event_intel_uncore.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
++++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+@@ -314,8 +314,8 @@ static struct uncore_event_desc snbep_un
+ static struct uncore_event_desc snbep_uncore_qpi_events[] = {
+ INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
+ INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
+- INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x02,umask=0x08"),
+- INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x03,umask=0x04"),
++ INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"),
++ INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"),
+ { /* end: all zeroes */ },
+ };
+
--- /dev/null
+From bf0bd948d1682e3996adc093b43021ed391983e6 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Fri, 26 Jul 2013 23:48:42 +0200
+Subject: sched: Ensure update_cfs_shares() is called for parents of continuously-running tasks
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit bf0bd948d1682e3996adc093b43021ed391983e6 upstream.
+
+We typically update a task_group's shares within the dequeue/enqueue
+path. However, continuously running tasks sharing a CPU are not
+subject to these updates as they are only put/picked. Unfortunately,
+when we reverted f269ae046 (in 17bc14b7), we lost the augmenting
+periodic update that was supposed to account for this; resulting in a
+potential loss of fairness.
+
+To fix this, re-introduce the explicit update in
+update_cfs_rq_blocked_load() [called via entity_tick()].
+
+Reported-by: Max Hailperin <max@gustavus.edu>
+Signed-off-by: Peter Zijlstra <peterz@infradead.org>
+Reviewed-by: Paul Turner <pjt@google.com>
+Link: http://lkml.kernel.org/n/tip-9545m3apw5d93ubyrotrj31y@git.kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/sched/fair.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -1984,6 +1984,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc
+ */
+ update_entity_load_avg(curr, 1);
+ update_cfs_rq_blocked_load(cfs_rq, 1);
++ update_cfs_shares(cfs_rq);
+
+ #ifdef CONFIG_SCHED_HRTICK
+ /*
--- /dev/null
+perf-x86-fix-intel-qpi-uncore-event-definitions.patch
+perf-arm-fix-armpmu_map_hw_event.patch
+memcg-don-t-initialize-kmem-cache-destroying-work-for-root-caches.patch
+microblaze-fix-clone-syscall.patch
+x86-get_unmapped_area-use-proper-mmap-base-for-bottom-up-direction.patch
+fs-proc-task_mmu.c-fix-buffer-overflow-in-add_page_map.patch
+sched-ensure-update_cfs_shares-is-called-for-parents-of-continuously-running-tasks.patch
+elevator-fix-a-race-in-elevator-switching.patch
--- /dev/null
+From df54d6fa54275ce59660453e29d1228c2b45a826 Mon Sep 17 00:00:00 2001
+From: Radu Caragea <sinaelgl@gmail.com>
+Date: Tue, 13 Aug 2013 16:00:59 -0700
+Subject: x86 get_unmapped_area(): use proper mmap base for bottom-up direction
+
+From: Radu Caragea <sinaelgl@gmail.com>
+
+commit df54d6fa54275ce59660453e29d1228c2b45a826 upstream.
+
+When the stack is set to unlimited, the bottomup direction is used for
+mmap-ings but the mmap_base is not used and thus effectively renders
+ASLR for mmapings along with PIE useless.
+
+Cc: Michel Lespinasse <walken@google.com>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Reviewed-by: Rik van Riel <riel@redhat.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Cc: Adrian Sendroiu <molecula2788@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/sys_x86_64.c | 2 +-
+ arch/x86/mm/mmap.c | 2 +-
+ include/linux/sched.h | 1 +
+ 3 files changed, 3 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/sys_x86_64.c
++++ b/arch/x86/kernel/sys_x86_64.c
+@@ -101,7 +101,7 @@ static void find_start_end(unsigned long
+ *begin = new_begin;
+ }
+ } else {
+- *begin = TASK_UNMAPPED_BASE;
++ *begin = mmap_legacy_base();
+ *end = TASK_SIZE;
+ }
+ }
+--- a/arch/x86/mm/mmap.c
++++ b/arch/x86/mm/mmap.c
+@@ -98,7 +98,7 @@ static unsigned long mmap_base(void)
+ * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
+ * does, but not when emulating X86_32
+ */
+-static unsigned long mmap_legacy_base(void)
++unsigned long mmap_legacy_base(void)
+ {
+ if (mmap_is_ia32())
+ return TASK_UNMAPPED_BASE;
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -314,6 +314,7 @@ struct nsproxy;
+ struct user_namespace;
+
+ #ifdef CONFIG_MMU
++extern unsigned long mmap_legacy_base(void);
+ extern void arch_pick_mmap_layout(struct mm_struct *mm);
+ extern unsigned long
+ arch_get_unmapped_area(struct file *, unsigned long, unsigned long,