--- /dev/null
+From 965278dcb8ab0b1f666cc47937933c4be4aea48d Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Wed, 13 May 2015 15:07:54 +0100
+Subject: ARM: 8356/1: mm: handle non-pmd-aligned end of RAM
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit 965278dcb8ab0b1f666cc47937933c4be4aea48d upstream.
+
+At boot time we round the memblock limit down to section size in an
+attempt to ensure that we will have mapped this RAM with section
+mappings prior to allocating from it. When mapping RAM we iterate over
+PMD-sized chunks, creating these section mappings.
+
+Section mappings are only created when the end of a chunk is aligned to
+section size. Unfortunately, with classic page tables (where PMD_SIZE is
+2 * SECTION_SIZE) this means that if a chunk is between 1M and 2M in
+size the first 1M will not be mapped despite having been accounted for
+in the memblock limit. This has been observed to result in page tables
+being allocated from unmapped memory, causing boot-time hangs.
+
+This patch modifies the memblock limit rounding to always round down to
+PMD_SIZE instead of SECTION_SIZE. For classic MMU this means that we
+will round the memblock limit down to a 2M boundary, matching the limits
+on section mappings, and preventing allocations from unmapped memory.
+For LPAE there should be no change as PMD_SIZE == SECTION_SIZE.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Reported-by: Stefan Agner <stefan@agner.ch>
+Tested-by: Stefan Agner <stefan@agner.ch>
+Acked-by: Laura Abbott <labbott@redhat.com>
+Tested-by: Hans de Goede <hdegoede@redhat.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Steve Capper <steve.capper@linaro.org>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/mm/mmu.c | 20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+--- a/arch/arm/mm/mmu.c
++++ b/arch/arm/mm/mmu.c
+@@ -1112,22 +1112,22 @@ void __init sanity_check_meminfo(void)
+ }
+
+ /*
+- * Find the first non-section-aligned page, and point
++ * Find the first non-pmd-aligned page, and point
+ * memblock_limit at it. This relies on rounding the
+- * limit down to be section-aligned, which happens at
+- * the end of this function.
++ * limit down to be pmd-aligned, which happens at the
++ * end of this function.
+ *
+ * With this algorithm, the start or end of almost any
+- * bank can be non-section-aligned. The only exception
+- * is that the start of the bank 0 must be section-
++ * bank can be non-pmd-aligned. The only exception is
++ * that the start of the bank 0 must be section-
+ * aligned, since otherwise memory would need to be
+ * allocated when mapping the start of bank 0, which
+ * occurs before any free memory is mapped.
+ */
+ if (!memblock_limit) {
+- if (!IS_ALIGNED(block_start, SECTION_SIZE))
++ if (!IS_ALIGNED(block_start, PMD_SIZE))
+ memblock_limit = block_start;
+- else if (!IS_ALIGNED(block_end, SECTION_SIZE))
++ else if (!IS_ALIGNED(block_end, PMD_SIZE))
+ memblock_limit = arm_lowmem_limit;
+ }
+
+@@ -1137,12 +1137,12 @@ void __init sanity_check_meminfo(void)
+ high_memory = __va(arm_lowmem_limit - 1) + 1;
+
+ /*
+- * Round the memblock limit down to a section size. This
++ * Round the memblock limit down to a pmd size. This
+ * helps to ensure that we will allocate memory from the
+- * last full section, which should be mapped.
++ * last full pmd, which should be mapped.
+ */
+ if (memblock_limit)
+- memblock_limit = round_down(memblock_limit, SECTION_SIZE);
++ memblock_limit = round_down(memblock_limit, PMD_SIZE);
+ if (!memblock_limit)
+ memblock_limit = arm_lowmem_limit;
+
--- /dev/null
+From e46b5a6470a5e2c8e1096f8f60887ac19949055b Mon Sep 17 00:00:00 2001
+From: Shawn Guo <shawn.guo@linaro.org>
+Date: Tue, 19 May 2015 22:06:41 +0800
+Subject: ARM: dts: fix imx27 dtb build rule
+
+From: Shawn Guo <shawn.guo@linaro.org>
+
+commit e46b5a6470a5e2c8e1096f8f60887ac19949055b upstream.
+
+The i.MX27 dtb build should be controlled by CONFIG_SOC_IMX27 rather
+than CONFIG_SOC_IMX31.
+
+Signed-off-by: Shawn Guo <shawn.guo@linaro.org>
+Fixes: cb612390e546 ("ARM: dts: Only build dtb if associated Arch and/or SoC is enabled")
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/boot/dts/Makefile | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/boot/dts/Makefile
++++ b/arch/arm/boot/dts/Makefile
+@@ -215,7 +215,7 @@ dtb-$(CONFIG_SOC_IMX25) += \
+ imx25-eukrea-mbimxsd25-baseboard-dvi-vga.dtb \
+ imx25-karo-tx25.dtb \
+ imx25-pdk.dtb
+-dtb-$(CONFIG_SOC_IMX31) += \
++dtb-$(CONFIG_SOC_IMX27) += \
+ imx27-apf27.dtb \
+ imx27-apf27dev.dtb \
+ imx27-eukrea-mbimxsd27-baseboard.dtb \
--- /dev/null
+From a29ef819f3f34f89a1b9b6a939b4c1cdfe1e85ce Mon Sep 17 00:00:00 2001
+From: Philippe Reynes <tremyfr@gmail.com>
+Date: Wed, 13 May 2015 00:18:26 +0200
+Subject: ARM: dts: imx27: only map 4 Kbyte for fec registers
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Philippe Reynes <tremyfr@gmail.com>
+
+commit a29ef819f3f34f89a1b9b6a939b4c1cdfe1e85ce upstream.
+
+According to the imx27 documentation, fec has a 4 Kbyte
+memory space map. Moreover, the actual 16 Kbyte mapping
+overlaps the SCC (Security Controller) memory register
+space. So, we reduce the memory register space to 4 Kbyte.
+
+Signed-off-by: Philippe Reynes <tremyfr@gmail.com>
+Acked-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Fixes: 9f0749e3eb88 ("ARM i.MX27: Add devicetree support")
+Signed-off-by: Shawn Guo <shawn.guo@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/boot/dts/imx27.dtsi | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/boot/dts/imx27.dtsi
++++ b/arch/arm/boot/dts/imx27.dtsi
+@@ -531,7 +531,7 @@
+
+ fec: ethernet@1002b000 {
+ compatible = "fsl,imx27-fec";
+- reg = <0x1002b000 0x4000>;
++ reg = <0x1002b000 0x1000>;
+ interrupts = <50>;
+ clocks = <&clks IMX27_CLK_FEC_IPG_GATE>,
+ <&clks IMX27_CLK_FEC_AHB_GATE>;
--- /dev/null
+From 242ddf04297f2c4768bd8eb7593ab911910c5f76 Mon Sep 17 00:00:00 2001
+From: Inki Dae <inki.dae@samsung.com>
+Date: Sat, 23 May 2015 11:46:55 +0900
+Subject: ARM: dts: set display clock correctly for exynos4412-trats2
+
+From: Inki Dae <inki.dae@samsung.com>
+
+commit 242ddf04297f2c4768bd8eb7593ab911910c5f76 upstream.
+
+This patch sets display clock correctly. If Display clock isn't set
+correctly then you would find below messages and Display controller
+doesn't work correctly.
+
+ exynos-drm: No connectors reported connected with modes
+ [drm] Cannot find any crtc or sizes - going 1024x768
+
+Fixes: abc0b1447d49 ("drm: Perform basic sanity checks on probed modes")
+Signed-off-by: Inki Dae <inki.dae@samsung.com>
+Reviewed-by: Krzysztof Kozlowski <k.kozlowski@samsung.com>
+Tested-by: Krzysztof Kozlowski <k.kozlowski@samsung.com>
+Signed-off-by: Kukjin Kim <kgene@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/boot/dts/exynos4412-trats2.dts | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/boot/dts/exynos4412-trats2.dts
++++ b/arch/arm/boot/dts/exynos4412-trats2.dts
+@@ -736,7 +736,7 @@
+
+ display-timings {
+ timing-0 {
+- clock-frequency = <0>;
++ clock-frequency = <57153600>;
+ hactive = <720>;
+ vactive = <1280>;
+ hfront-porch = <5>;
--- /dev/null
+From 0b7dc0ff95237a53287e52f1aab7408ebf1c4085 Mon Sep 17 00:00:00 2001
+From: Krzysztof Kozlowski <k.kozlowski@samsung.com>
+Date: Wed, 13 May 2015 17:45:52 +0900
+Subject: ARM: EXYNOS: Fix dereference of ERR_PTR returned by of_genpd_get_from_provider
+
+From: Krzysztof Kozlowski <k.kozlowski@samsung.com>
+
+commit 0b7dc0ff95237a53287e52f1aab7408ebf1c4085 upstream.
+
+ERR_PTR was dereferenced during sub domain parsing, if parent domain
+could not be obtained (because of invalid phandle or deferred
+registration of parent domain).
+
+The Exynos power domain code checked whether
+of_genpd_get_from_provider() returned NULL and in that case it skipped
+that power domain node. However this function returns ERR_PTR or valid
+pointer, not NULL.
+
+Fixes: 0f7807518fe1 ("ARM: EXYNOS: add support for sub-power domains")
+Signed-off-by: Krzysztof Kozlowski <k.kozlowski@samsung.com>
+Signed-off-by: Kukjin Kim <kgene@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/mach-exynos/pm_domains.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/arm/mach-exynos/pm_domains.c
++++ b/arch/arm/mach-exynos/pm_domains.c
+@@ -169,7 +169,7 @@ no_clk:
+ args.np = np;
+ args.args_count = 0;
+ child_domain = of_genpd_get_from_provider(&args);
+- if (!child_domain)
++ if (IS_ERR(child_domain))
+ continue;
+
+ if (of_parse_phandle_with_args(np, "power-domains",
+@@ -177,7 +177,7 @@ no_clk:
+ continue;
+
+ parent_domain = of_genpd_get_from_provider(&args);
+- if (!parent_domain)
++ if (IS_ERR(parent_domain))
+ continue;
+
+ if (pm_genpd_add_subdomain(parent_domain, child_domain))
--- /dev/null
+From 1b97937246d8b97c0760d16d8992c7937bdf5e6a Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+Date: Fri, 15 May 2015 11:02:23 +0100
+Subject: ARM: fix missing syscall trace exit
+
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+
+commit 1b97937246d8b97c0760d16d8992c7937bdf5e6a upstream.
+
+Josh Stone reports:
+
+ I've discovered a case where both arm and arm64 will miss a ptrace
+ syscall-exit that they should report. If the syscall is entered
+ without TIF_SYSCALL_TRACE set, then it goes on the fast path. It's
+ then possible to have TIF_SYSCALL_TRACE added in the middle of the
+ syscall, but ret_fast_syscall doesn't check this flag again.
+
+Fix this by always checking for a syscall trace in the fast exit path.
+
+Reported-by: Josh Stone <jistone@redhat.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kernel/entry-common.S | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/arch/arm/kernel/entry-common.S
++++ b/arch/arm/kernel/entry-common.S
+@@ -33,7 +33,9 @@ ret_fast_syscall:
+ UNWIND(.fnstart )
+ UNWIND(.cantunwind )
+ disable_irq @ disable interrupts
+- ldr r1, [tsk, #TI_FLAGS]
++ ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
++ tst r1, #_TIF_SYSCALL_WORK
++ bne __sys_trace_return
+ tst r1, #_TIF_WORK_MASK
+ bne fast_work_pending
+ asm_trace_hardirqs_on
--- /dev/null
+From 8f4fc071b1926d0b20336e2b3f8ab85c94c734c5 Mon Sep 17 00:00:00 2001
+From: Vladimir Davydov <vdavydov@parallels.com>
+Date: Thu, 14 May 2015 15:16:55 -0700
+Subject: gfp: add __GFP_NOACCOUNT
+
+From: Vladimir Davydov <vdavydov@parallels.com>
+
+commit 8f4fc071b1926d0b20336e2b3f8ab85c94c734c5 upstream.
+
+Not all kmem allocations should be accounted to memcg. The following
+patch gives an example when accounting of a certain type of allocations to
+memcg can effectively result in a memory leak. This patch adds the
+__GFP_NOACCOUNT flag which if passed to kmalloc and friends will force the
+allocation to go through the root cgroup. It will be used by the next
+patch.
+
+Note, since in case of kmemleak enabled each kmalloc implies yet another
+allocation from the kmemleak_object cache, we add __GFP_NOACCOUNT to
+gfp_kmemleak_mask.
+
+Alternatively, we could introduce a per kmem cache flag disabling
+accounting for all allocations of a particular kind, but (a) we would not
+be able to bypass accounting for kmalloc then and (b) a kmem cache with
+this flag set could not be merged with a kmem cache without this flag,
+which would increase the number of global caches and therefore
+fragmentation even if the memory cgroup controller is not used.
+
+Despite its generic name, currently __GFP_NOACCOUNT disables accounting
+only for kmem allocations while user page allocations are always charged.
+To catch abusing of this flag, a warning is issued on an attempt of
+passing it to mem_cgroup_try_charge.
+
+Signed-off-by: Vladimir Davydov <vdavydov@parallels.com>
+Cc: Tejun Heo <tj@kernel.org>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Michal Hocko <mhocko@suse.cz>
+Cc: Christoph Lameter <cl@linux.com>
+Cc: Pekka Enberg <penberg@kernel.org>
+Cc: David Rientjes <rientjes@google.com>
+Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Cc: Greg Thelen <gthelen@google.com>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/gfp.h | 2 ++
+ include/linux/memcontrol.h | 4 ++++
+ mm/kmemleak.c | 3 ++-
+ 3 files changed, 8 insertions(+), 1 deletion(-)
+
+--- a/include/linux/gfp.h
++++ b/include/linux/gfp.h
+@@ -30,6 +30,7 @@ struct vm_area_struct;
+ #define ___GFP_HARDWALL 0x20000u
+ #define ___GFP_THISNODE 0x40000u
+ #define ___GFP_RECLAIMABLE 0x80000u
++#define ___GFP_NOACCOUNT 0x100000u
+ #define ___GFP_NOTRACK 0x200000u
+ #define ___GFP_NO_KSWAPD 0x400000u
+ #define ___GFP_OTHER_NODE 0x800000u
+@@ -85,6 +86,7 @@ struct vm_area_struct;
+ #define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */
+ #define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */
+ #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
++#define __GFP_NOACCOUNT ((__force gfp_t)___GFP_NOACCOUNT) /* Don't account to kmemcg */
+ #define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */
+
+ #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
+--- a/include/linux/memcontrol.h
++++ b/include/linux/memcontrol.h
+@@ -463,6 +463,8 @@ memcg_kmem_newpage_charge(gfp_t gfp, str
+ if (!memcg_kmem_enabled())
+ return true;
+
++ if (gfp & __GFP_NOACCOUNT)
++ return true;
+ /*
+ * __GFP_NOFAIL allocations will move on even if charging is not
+ * possible. Therefore we don't even try, and have this allocation
+@@ -522,6 +524,8 @@ memcg_kmem_get_cache(struct kmem_cache *
+ {
+ if (!memcg_kmem_enabled())
+ return cachep;
++ if (gfp & __GFP_NOACCOUNT)
++ return cachep;
+ if (gfp & __GFP_NOFAIL)
+ return cachep;
+ if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
+--- a/mm/kmemleak.c
++++ b/mm/kmemleak.c
+@@ -115,7 +115,8 @@
+ #define BYTES_PER_POINTER sizeof(void *)
+
+ /* GFP bitmask for kmemleak internal allocations */
+-#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
++#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC | \
++ __GFP_NOACCOUNT)) | \
+ __GFP_NORETRY | __GFP_NOMEMALLOC | \
+ __GFP_NOWARN)
+
--- /dev/null
+From 499611ed451508a42d1d7d1faff10177827755d5 Mon Sep 17 00:00:00 2001
+From: Vladimir Davydov <vdavydov@parallels.com>
+Date: Thu, 14 May 2015 15:16:58 -0700
+Subject: kernfs: do not account ino_ida allocations to memcg
+
+From: Vladimir Davydov <vdavydov@parallels.com>
+
+commit 499611ed451508a42d1d7d1faff10177827755d5 upstream.
+
+root->ino_ida is used for kernfs inode number allocations. Since IDA has
+a layered structure, different IDs can reside on the same layer, which
+is currently accounted to some memory cgroup. The problem is that each
+kmem cache of a memory cgroup has its own directory on sysfs (under
+/sys/fs/kernel/<cache-name>/cgroup). If the inode number of such a
+directory or any file in it gets allocated from a layer accounted to the
+cgroup which the cache is created for, the cgroup will get pinned for
+good, because one has to free all kmem allocations accounted to a cgroup
+in order to release it and destroy all its kmem caches. That said we
+must not account layers of ino_ida to any memory cgroup.
+
+Since per net init operations may create new sysfs entries directly
+(e.g. lo device) or indirectly (nf_conntrack creates a new kmem cache
+per each namespace, which, in turn, creates new sysfs entries), an easy
+way to reproduce this issue is by creating network namespace(s) from
+inside a kmem-active memory cgroup.
+
+Signed-off-by: Vladimir Davydov <vdavydov@parallels.com>
+Acked-by: Tejun Heo <tj@kernel.org>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Michal Hocko <mhocko@suse.cz>
+Cc: Christoph Lameter <cl@linux.com>
+Cc: Pekka Enberg <penberg@kernel.org>
+Cc: David Rientjes <rientjes@google.com>
+Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Cc: Greg Thelen <gthelen@google.com>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/kernfs/dir.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/fs/kernfs/dir.c
++++ b/fs/kernfs/dir.c
+@@ -518,7 +518,14 @@ static struct kernfs_node *__kernfs_new_
+ if (!kn)
+ goto err_out1;
+
+- ret = ida_simple_get(&root->ino_ida, 1, 0, GFP_KERNEL);
++ /*
++ * If the ino of the sysfs entry created for a kmem cache gets
++ * allocated from an ida layer, which is accounted to the memcg that
++ * owns the cache, the memcg will get pinned forever. So do not account
++ * ino ida allocations.
++ */
++ ret = ida_simple_get(&root->ino_ida, 1, 0,
++ GFP_KERNEL | __GFP_NOACCOUNT);
+ if (ret < 0)
+ goto err_out2;
+ kn->ino = ret;
--- /dev/null
+From b0dc2b9bb4ab782115b964310518ee0b17784277 Mon Sep 17 00:00:00 2001
+From: Mel Gorman <mgorman@suse.de>
+Date: Thu, 14 May 2015 15:17:09 -0700
+Subject: mm, numa: really disable NUMA balancing by default on single node machines
+
+From: Mel Gorman <mgorman@suse.de>
+
+commit b0dc2b9bb4ab782115b964310518ee0b17784277 upstream.
+
+NUMA balancing is meant to be disabled by default on UMA machines but
+the check is using nr_node_ids (highest node) instead of
+num_online_nodes (online nodes).
+
+The consequences are that a UMA machine with a node ID of 1 or higher
+will enable NUMA balancing. This will incur useless overhead due to
+minor faults with the impact depending on the workload. These are the
+impact on the stats when running a kernel build on a single node machine
+whose node ID happened to be 1:
+
+ vanilla patched
+ NUMA base PTE updates 5113158 0
+ NUMA huge PMD updates 643 0
+ NUMA page range updates 5442374 0
+ NUMA hint faults 2109622 0
+ NUMA hint local faults 2109622 0
+ NUMA hint local percent 100 100
+ NUMA pages migrated 0 0
+
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Reviewed-by: Rik van Riel <riel@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/mempolicy.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -2517,7 +2517,7 @@ static void __init check_numabalancing_e
+ if (numabalancing_override)
+ set_numabalancing_state(numabalancing_override == 1);
+
+- if (nr_node_ids > 1 && !numabalancing_override) {
++ if (num_online_nodes() > 1 && !numabalancing_override) {
+ pr_info("%s automatic NUMA balancing. "
+ "Configure with numa_balancing= or the "
+ "kernel.numa_balancing sysctl",
--- /dev/null
+From d045c77c1a69703143a36169c224429c48b9eecd Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@gmx.de>
+Date: Mon, 11 May 2015 22:01:27 +0200
+Subject: parisc,metag: Fix crashes due to stack randomization on stack-grows-upwards architectures
+
+From: Helge Deller <deller@gmx.de>
+
+commit d045c77c1a69703143a36169c224429c48b9eecd upstream.
+
+On architectures where the stack grows upwards (CONFIG_STACK_GROWSUP=y,
+currently parisc and metag only) stack randomization sometimes leads to crashes
+when the stack ulimit is set to lower values than STACK_RND_MASK (which is 8 MB
+by default if not defined in arch-specific headers).
+
+The problem is, that when the stack vm_area_struct is set up in fs/exec.c, the
+additional space needed for the stack randomization (as defined by the value of
+STACK_RND_MASK) was not taken into account yet and as such, when the stack
+randomization code added a random offset to the stack start, the stack
+effectively got smaller than what the user defined via rlimit_max(RLIMIT_STACK)
+which then sometimes leads to out-of-stack situations and crashes.
+
+This patch fixes it by adding the maximum possible amount of memory (based on
+STACK_RND_MASK) which theoretically could be added by the stack randomization
+code to the initial stack size. That way, the user-defined stack size is always
+guaranteed to be at minimum what is defined via rlimit_max(RLIMIT_STACK).
+
+This bug is currently not visible on the metag architecture, because on metag
+STACK_RND_MASK is defined to 0 which effectively disables stack randomization.
+
+The changes to fs/exec.c are inside an "#ifdef CONFIG_STACK_GROWSUP"
+section, so it does not affect other platformws beside those where the
+stack grows upwards (parisc and metag).
+
+Signed-off-by: Helge Deller <deller@gmx.de>
+Cc: linux-parisc@vger.kernel.org
+Cc: James Hogan <james.hogan@imgtec.com>
+Cc: linux-metag@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/parisc/include/asm/elf.h | 4 ++++
+ arch/parisc/kernel/sys_parisc.c | 3 +++
+ fs/exec.c | 3 +++
+ 3 files changed, 10 insertions(+)
+
+--- a/arch/parisc/include/asm/elf.h
++++ b/arch/parisc/include/asm/elf.h
+@@ -348,6 +348,10 @@ struct pt_regs; /* forward declaration..
+
+ #define ELF_HWCAP 0
+
++#define STACK_RND_MASK (is_32bit_task() ? \
++ 0x7ff >> (PAGE_SHIFT - 12) : \
++ 0x3ffff >> (PAGE_SHIFT - 12))
++
+ struct mm_struct;
+ extern unsigned long arch_randomize_brk(struct mm_struct *);
+ #define arch_randomize_brk arch_randomize_brk
+--- a/arch/parisc/kernel/sys_parisc.c
++++ b/arch/parisc/kernel/sys_parisc.c
+@@ -77,6 +77,9 @@ static unsigned long mmap_upper_limit(vo
+ if (stack_base > STACK_SIZE_MAX)
+ stack_base = STACK_SIZE_MAX;
+
++ /* Add space for stack randomization. */
++ stack_base += (STACK_RND_MASK << PAGE_SHIFT);
++
+ return PAGE_ALIGN(STACK_TOP - stack_base);
+ }
+
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -659,6 +659,9 @@ int setup_arg_pages(struct linux_binprm
+ if (stack_base > STACK_SIZE_MAX)
+ stack_base = STACK_SIZE_MAX;
+
++ /* Add space for stack randomization. */
++ stack_base += (STACK_RND_MASK << PAGE_SHIFT);
++
+ /* Make sure we didn't let the argument array grow too large. */
+ if (vma->vm_end - vma->vm_start > stack_base)
+ return -ENOMEM;
s390-mm-correct-return-value-of-pmd_pfn.patch
sched-handle-priority-boosted-tasks-proper-in-setscheduler.patch
sched-always-use-blk_schedule_flush_plug-in-io_schedule_out.patch
+arm-8356-1-mm-handle-non-pmd-aligned-end-of-ram.patch
+arm-exynos-fix-dereference-of-err_ptr-returned-by-of_genpd_get_from_provider.patch
+arm-dts-imx27-only-map-4-kbyte-for-fec-registers.patch
+arm-dts-fix-imx27-dtb-build-rule.patch
+arm-dts-set-display-clock-correctly-for-exynos4412-trats2.patch
+arm-fix-missing-syscall-trace-exit.patch
+parisc-metag-fix-crashes-due-to-stack-randomization-on-stack-grows-upwards-architectures.patch
+gfp-add-__gfp_noaccount.patch
+kernfs-do-not-account-ino_ida-allocations-to-memcg.patch
+tools-vm-fix-page-flags-build.patch
+mm-numa-really-disable-numa-balancing-by-default-on-single-node-machines.patch
--- /dev/null
+From 4933f55fe72c86e57efc454dd6e673c7f17af5a3 Mon Sep 17 00:00:00 2001
+From: Andi Kleen <ak@linux.intel.com>
+Date: Thu, 14 May 2015 15:16:53 -0700
+Subject: tools/vm: fix page-flags build
+
+From: Andi Kleen <ak@linux.intel.com>
+
+commit 4933f55fe72c86e57efc454dd6e673c7f17af5a3 upstream.
+
+libabikfs.a doesn't exist anymore, so we now need to link with libapi.a.
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/vm/Makefile | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/tools/vm/Makefile
++++ b/tools/vm/Makefile
+@@ -3,7 +3,7 @@
+ TARGETS=page-types slabinfo page_owner_sort
+
+ LIB_DIR = ../lib/api
+-LIBS = $(LIB_DIR)/libapikfs.a
++LIBS = $(LIB_DIR)/libapi.a
+
+ CC = $(CROSS_COMPILE)gcc
+ CFLAGS = -Wall -Wextra -I../lib/