--- /dev/null
+From 1813908986e36119228c158aae1c6a0267c99e77 Mon Sep 17 00:00:00 2001
+From: Jeff Mahoney <jeffm@suse.com>
+Date: Tue, 12 Aug 2014 13:46:09 -0700
+Subject: drivers/mfd/rtsx_usb.c: export device table
+
+From: Jeff Mahoney <jeffm@suse.com>
+
+commit 1813908986e36119228c158aae1c6a0267c99e77 upstream.
+
+The rtsx_usb driver contains the table for the devices it supports but
+doesn't export it. As a result, no alias is generated and it doesn't
+get loaded automatically.
+
+Via https://bugzilla.novell.com/show_bug.cgi?id=890096
+
+Signed-off-by: Jeff Mahoney <jeffm@suse.com>
+Reported-by: Marcel Witte <wittemar@googlemail.com>
+Cc: Roger Tseng <rogerable@realtek.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mfd/rtsx_usb.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/mfd/rtsx_usb.c
++++ b/drivers/mfd/rtsx_usb.c
+@@ -744,6 +744,7 @@ static struct usb_device_id rtsx_usb_usb
+ { USB_DEVICE(0x0BDA, 0x0140) },
+ { }
+ };
++MODULE_DEVICE_TABLE(usb, rtsx_usb_usb_ids);
+
+ static struct usb_driver rtsx_usb_driver = {
+ .name = "rtsx_usb",
--- /dev/null
+From 618fde872163e782183ce574c77f1123e2be8887 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sasha.levin@oracle.com>
+Date: Wed, 6 Aug 2014 16:08:14 -0700
+Subject: kernel/smp.c:on_each_cpu_cond(): fix warning in fallback path
+
+From: Sasha Levin <sasha.levin@oracle.com>
+
+commit 618fde872163e782183ce574c77f1123e2be8887 upstream.
+
+The rarely-executed memry-allocation-failed callback path generates a
+WARN_ON_ONCE() when smp_call_function_single() succeeds. Presumably
+it's supposed to warn on failures.
+
+Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
+Cc: Christoph Lameter <cl@gentwo.org>
+Cc: Gilad Ben-Yossef <gilad@benyossef.com>
+Cc: David Rientjes <rientjes@google.com>
+Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Cc: Tejun Heo <htejun@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/smp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/smp.c
++++ b/kernel/smp.c
+@@ -661,7 +661,7 @@ void on_each_cpu_cond(bool (*cond_func)(
+ if (cond_func(cpu, info)) {
+ ret = smp_call_function_single(cpu, func,
+ info, wait);
+- WARN_ON_ONCE(!ret);
++ WARN_ON_ONCE(ret);
+ }
+ preempt_enable();
+ }
--- /dev/null
+From 46de8ff8e80a6546aa3d2fdf58c6776666301a0c Mon Sep 17 00:00:00 2001
+From: Michael Welling <mwelling@emacinc.com>
+Date: Mon, 28 Jul 2014 18:01:04 -0500
+Subject: mfd: omap-usb-host: Fix improper mask use.
+
+From: Michael Welling <mwelling@emacinc.com>
+
+commit 46de8ff8e80a6546aa3d2fdf58c6776666301a0c upstream.
+
+single-ulpi-bypass is a flag used for older OMAP3 silicon.
+
+The flag when set, can excite code that improperly uses the
+OMAP_UHH_HOSTCONFIG_UPLI_BYPASS define to clear the corresponding bit.
+Instead it clears all of the other bits disabling all of the ports in
+the process.
+
+Signed-off-by: Michael Welling <mwelling@emacinc.com>
+Signed-off-by: Lee Jones <lee.jones@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mfd/omap-usb-host.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/mfd/omap-usb-host.c
++++ b/drivers/mfd/omap-usb-host.c
+@@ -445,7 +445,7 @@ static unsigned omap_usbhs_rev1_hostconf
+
+ for (i = 0; i < omap->nports; i++) {
+ if (is_ehci_phy_mode(pdata->port_mode[i])) {
+- reg &= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
++ reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
+ break;
+ }
+ }
--- /dev/null
+From daebabd578647440d41fc9b48d8c7a88dc2f7ab5 Mon Sep 17 00:00:00 2001
+From: Tony Lindgren <tony@atomide.com>
+Date: Tue, 19 Aug 2014 08:24:05 -0700
+Subject: mfd: twl4030-power: Fix PM idle pin configuration to not conflict with regulators
+
+From: Tony Lindgren <tony@atomide.com>
+
+commit daebabd578647440d41fc9b48d8c7a88dc2f7ab5 upstream.
+
+Commit 43fef47f94a1 (mfd: twl4030-power: Add a configuration to turn
+off oscillator during off-idle) added support for configuring the PMIC
+to cut off resources during deeper idle states to save power.
+
+This however caused regression for n900 display power that needed the
+PMIC configuration to be disabled with commit d937678ab625 (ARM: dts:
+Revert enabling of twl configuration for n900).
+
+Turns out the root cause of the problem is that we must use
+TWL4030_RESCONFIG_UNDEF instead of DEV_GRP_NULL to avoid disabling
+regulators that may have been enabled before the init function
+for twl4030-power.c runs. With TWL4030_RESCONFIG_UNDEF we let the
+regulator framework control the regulators like it should. Here we
+need to only configure the sys_clken and sys_off_mode triggers for
+the regulators that cannot be done by the regulator framework as
+it's not running at that point.
+
+This allows us to enable the PMIC configuration for n900.
+
+Fixes: 43fef47f94a1 (mfd: twl4030-power: Add a configuration to turn off oscillator during off-idle)
+
+Signed-off-by: Tony Lindgren <tony@atomide.com>
+Tested-by: Aaro Koskinen <aaro.koskinen@iki.fi>
+Signed-off-by: Lee Jones <lee.jones@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/boot/dts/omap3-n900.dts | 2 +-
+ drivers/mfd/twl4030-power.c | 20 ++++++++++----------
+ 2 files changed, 11 insertions(+), 11 deletions(-)
+
+--- a/arch/arm/boot/dts/omap3-n900.dts
++++ b/arch/arm/boot/dts/omap3-n900.dts
+@@ -353,7 +353,7 @@
+ };
+
+ twl_power: power {
+- compatible = "ti,twl4030-power-n900";
++ compatible = "ti,twl4030-power-n900", "ti,twl4030-power-idle-osc-off";
+ ti,use_poweroff;
+ };
+ };
+--- a/drivers/mfd/twl4030-power.c
++++ b/drivers/mfd/twl4030-power.c
+@@ -724,24 +724,24 @@ static struct twl4030_script *omap3_idle
+ * above.
+ */
+ static struct twl4030_resconfig omap3_idle_rconfig[] = {
+- TWL_REMAP_SLEEP(RES_VAUX1, DEV_GRP_NULL, 0, 0),
+- TWL_REMAP_SLEEP(RES_VAUX2, DEV_GRP_NULL, 0, 0),
+- TWL_REMAP_SLEEP(RES_VAUX3, DEV_GRP_NULL, 0, 0),
+- TWL_REMAP_SLEEP(RES_VAUX4, DEV_GRP_NULL, 0, 0),
+- TWL_REMAP_SLEEP(RES_VMMC1, DEV_GRP_NULL, 0, 0),
+- TWL_REMAP_SLEEP(RES_VMMC2, DEV_GRP_NULL, 0, 0),
++ TWL_REMAP_SLEEP(RES_VAUX1, TWL4030_RESCONFIG_UNDEF, 0, 0),
++ TWL_REMAP_SLEEP(RES_VAUX2, TWL4030_RESCONFIG_UNDEF, 0, 0),
++ TWL_REMAP_SLEEP(RES_VAUX3, TWL4030_RESCONFIG_UNDEF, 0, 0),
++ TWL_REMAP_SLEEP(RES_VAUX4, TWL4030_RESCONFIG_UNDEF, 0, 0),
++ TWL_REMAP_SLEEP(RES_VMMC1, TWL4030_RESCONFIG_UNDEF, 0, 0),
++ TWL_REMAP_SLEEP(RES_VMMC2, TWL4030_RESCONFIG_UNDEF, 0, 0),
+ TWL_REMAP_OFF(RES_VPLL1, DEV_GRP_P1, 3, 1),
+ TWL_REMAP_SLEEP(RES_VPLL2, DEV_GRP_P1, 0, 0),
+- TWL_REMAP_SLEEP(RES_VSIM, DEV_GRP_NULL, 0, 0),
+- TWL_REMAP_SLEEP(RES_VDAC, DEV_GRP_NULL, 0, 0),
++ TWL_REMAP_SLEEP(RES_VSIM, TWL4030_RESCONFIG_UNDEF, 0, 0),
++ TWL_REMAP_SLEEP(RES_VDAC, TWL4030_RESCONFIG_UNDEF, 0, 0),
+ TWL_REMAP_SLEEP(RES_VINTANA1, TWL_DEV_GRP_P123, 1, 2),
+ TWL_REMAP_SLEEP(RES_VINTANA2, TWL_DEV_GRP_P123, 0, 2),
+ TWL_REMAP_SLEEP(RES_VINTDIG, TWL_DEV_GRP_P123, 1, 2),
+ TWL_REMAP_SLEEP(RES_VIO, TWL_DEV_GRP_P123, 2, 2),
+ TWL_REMAP_OFF(RES_VDD1, DEV_GRP_P1, 4, 1),
+ TWL_REMAP_OFF(RES_VDD2, DEV_GRP_P1, 3, 1),
+- TWL_REMAP_SLEEP(RES_VUSB_1V5, DEV_GRP_NULL, 0, 0),
+- TWL_REMAP_SLEEP(RES_VUSB_1V8, DEV_GRP_NULL, 0, 0),
++ TWL_REMAP_SLEEP(RES_VUSB_1V5, TWL4030_RESCONFIG_UNDEF, 0, 0),
++ TWL_REMAP_SLEEP(RES_VUSB_1V8, TWL4030_RESCONFIG_UNDEF, 0, 0),
+ TWL_REMAP_SLEEP(RES_VUSB_3V1, TWL_DEV_GRP_P123, 0, 0),
+ /* Resource #20 USB charge pump skipped */
+ TWL_REMAP_SLEEP(RES_REGEN, TWL_DEV_GRP_P123, 2, 1),
--- /dev/null
+From d0177639310d23c7739500df3c6ce6fdfe34acec Mon Sep 17 00:00:00 2001
+From: Li Zhong <zhong@linux.vnet.ibm.com>
+Date: Wed, 6 Aug 2014 16:07:56 -0700
+Subject: mm: fix potential infinite loop in dissolve_free_huge_pages()
+
+From: Li Zhong <zhong@linux.vnet.ibm.com>
+
+commit d0177639310d23c7739500df3c6ce6fdfe34acec upstream.
+
+It is possible for some platforms, such as powerpc to set HPAGE_SHIFT to
+0 to indicate huge pages not supported.
+
+When this is the case, hugetlbfs could be disabled during boot time:
+hugetlbfs: disabling because there are no supported hugepage sizes
+
+Then in dissolve_free_huge_pages(), order is kept maximum (64 for
+64bits), and the for loop below won't end: for (pfn = start_pfn; pfn <
+end_pfn; pfn += 1 << order)
+
+As suggested by Naoya, below fix checks hugepages_supported() before
+calling dissolve_free_huge_pages().
+
+[rientjes@google.com: no legitimate reason to call dissolve_free_huge_pages() when !hugepages_supported()]
+Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
+Acked-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+Acked-by: David Rientjes <rientjes@google.com>
+Signed-off-by: David Rientjes <rientjes@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/hugetlb.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1089,6 +1089,9 @@ void dissolve_free_huge_pages(unsigned l
+ unsigned long pfn;
+ struct hstate *h;
+
++ if (!hugepages_supported())
++ return;
++
+ /* Set scan step to minimum hugepage size */
+ for_each_hstate(h)
+ if (order > huge_page_order(h))
--- /dev/null
+From 95707d852856aec1cbdad1873ff2dc5161a5cb91 Mon Sep 17 00:00:00 2001
+From: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>
+Date: Sun, 3 Aug 2014 13:23:08 +0530
+Subject: powerpc/cpuidle: Fix parsing of idle state flags from device-tree
+
+From: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>
+
+commit 95707d852856aec1cbdad1873ff2dc5161a5cb91 upstream.
+
+Flags from device-tree need to be parsed with accessors for
+interpreting correct value in little-endian.
+
+Signed-off-by: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>
+Reviewed-by: Preeti U. Murthy <preeti@linux.vnet.ibm.com>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/cpuidle/cpuidle-powernv.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/drivers/cpuidle/cpuidle-powernv.c
++++ b/drivers/cpuidle/cpuidle-powernv.c
+@@ -160,10 +160,10 @@ static int powernv_cpuidle_driver_init(v
+ static int powernv_add_idle_states(void)
+ {
+ struct device_node *power_mgt;
+- struct property *prop;
+ int nr_idle_states = 1; /* Snooze */
+ int dt_idle_states;
+- u32 *flags;
++ const __be32 *idle_state_flags;
++ u32 len_flags, flags;
+ int i;
+
+ /* Currently we have snooze statically defined */
+@@ -174,18 +174,18 @@ static int powernv_add_idle_states(void)
+ return nr_idle_states;
+ }
+
+- prop = of_find_property(power_mgt, "ibm,cpu-idle-state-flags", NULL);
+- if (!prop) {
++ idle_state_flags = of_get_property(power_mgt, "ibm,cpu-idle-state-flags", &len_flags);
++ if (!idle_state_flags) {
+ pr_warn("DT-PowerMgmt: missing ibm,cpu-idle-state-flags\n");
+ return nr_idle_states;
+ }
+
+- dt_idle_states = prop->length / sizeof(u32);
+- flags = (u32 *) prop->value;
++ dt_idle_states = len_flags / sizeof(u32);
+
+ for (i = 0; i < dt_idle_states; i++) {
+
+- if (flags[i] & IDLE_USE_INST_NAP) {
++ flags = be32_to_cpu(idle_state_flags[i]);
++ if (flags & IDLE_USE_INST_NAP) {
+ /* Add NAP state */
+ strcpy(powernv_states[nr_idle_states].name, "Nap");
+ strcpy(powernv_states[nr_idle_states].desc, "Nap");
+@@ -196,7 +196,7 @@ static int powernv_add_idle_states(void)
+ nr_idle_states++;
+ }
+
+- if (flags[i] & IDLE_USE_INST_SLEEP) {
++ if (flags & IDLE_USE_INST_SLEEP) {
+ /* Add FASTSLEEP state */
+ strcpy(powernv_states[nr_idle_states].name, "FastSleep");
+ strcpy(powernv_states[nr_idle_states].desc, "FastSleep");
--- /dev/null
+From b00fc6ec1f24f9d7af9b8988b6a198186eb3408c Mon Sep 17 00:00:00 2001
+From: Andrey Utkin <andrey.krieger.utkin@gmail.com>
+Date: Mon, 4 Aug 2014 23:13:10 +0300
+Subject: powerpc/mm/numa: Fix break placement
+
+From: Andrey Utkin <andrey.krieger.utkin@gmail.com>
+
+commit b00fc6ec1f24f9d7af9b8988b6a198186eb3408c upstream.
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=81631
+Reported-by: David Binderman <dcb314@hotmail.com>
+Signed-off-by: Andrey Utkin <andrey.krieger.utkin@gmail.com>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/mm/numa.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/powerpc/mm/numa.c
++++ b/arch/powerpc/mm/numa.c
+@@ -611,8 +611,8 @@ static int cpu_numa_callback(struct noti
+ case CPU_UP_CANCELED:
+ case CPU_UP_CANCELED_FROZEN:
+ unmap_cpu_from_node(lcpu);
+- break;
+ ret = NOTIFY_OK;
++ break;
+ #endif
+ }
+ return ret;
--- /dev/null
+From 85c1fafd7262e68ad821ee1808686b1392b1167d Mon Sep 17 00:00:00 2001
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
+Date: Wed, 13 Aug 2014 12:32:03 +0530
+Subject: powerpc/mm: Use read barrier when creating real_pte
+
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
+
+commit 85c1fafd7262e68ad821ee1808686b1392b1167d upstream.
+
+On ppc64 we support 4K hash pte with 64K page size. That requires
+us to track the hash pte slot information on a per 4k basis. We do that
+by storing the slot details in the second half of pte page. The pte bit
+_PAGE_COMBO is used to indicate whether the second half need to be
+looked while building real_pte. We need to use read memory barrier while
+doing that so that load of hidx is not reordered w.r.t _PAGE_COMBO
+check. On the store side we already do a lwsync in __hash_page_4K
+
+Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/pte-hash64-64k.h | 30 +++++++++++++++++++++++++-----
+ 1 file changed, 25 insertions(+), 5 deletions(-)
+
+--- a/arch/powerpc/include/asm/pte-hash64-64k.h
++++ b/arch/powerpc/include/asm/pte-hash64-64k.h
+@@ -46,11 +46,31 @@
+ * in order to deal with 64K made of 4K HW pages. Thus we override the
+ * generic accessors and iterators here
+ */
+-#define __real_pte(e,p) ((real_pte_t) { \
+- (e), (pte_val(e) & _PAGE_COMBO) ? \
+- (pte_val(*((p) + PTRS_PER_PTE))) : 0 })
+-#define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \
+- (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf))
++#define __real_pte __real_pte
++static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep)
++{
++ real_pte_t rpte;
++
++ rpte.pte = pte;
++ rpte.hidx = 0;
++ if (pte_val(pte) & _PAGE_COMBO) {
++ /*
++ * Make sure we order the hidx load against the _PAGE_COMBO
++ * check. The store side ordering is done in __hash_page_4K
++ */
++ smp_rmb();
++ rpte.hidx = pte_val(*((ptep) + PTRS_PER_PTE));
++ }
++ return rpte;
++}
++
++static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index)
++{
++ if ((pte_val(rpte.pte) & _PAGE_COMBO))
++ return (rpte.hidx >> (index<<2)) & 0xf;
++ return (pte_val(rpte.pte) >> 12) & 0xf;
++}
++
+ #define __rpte_to_pte(r) ((r).pte)
+ #define __rpte_sub_valid(rpte, index) \
+ (pte_val(rpte.pte) & (_PAGE_HPTE_SUB0 >> (index)))
--- /dev/null
+From 5efbabe09d986f25c02d19954660238fcd7f008a Mon Sep 17 00:00:00 2001
+From: Gavin Shan <gwshan@linux.vnet.ibm.com>
+Date: Mon, 11 Aug 2014 19:16:20 +1000
+Subject: powerpc/pseries: Avoid deadlock on removing ddw
+
+From: Gavin Shan <gwshan@linux.vnet.ibm.com>
+
+commit 5efbabe09d986f25c02d19954660238fcd7f008a upstream.
+
+Function remove_ddw() could be called in of_reconfig_notifier and
+we potentially remove the dynamic DMA window property, which invokes
+of_reconfig_notifier again. Eventually, it leads to the deadlock as
+following backtrace shows.
+
+The patch fixes the above issue by deferring releasing the dynamic
+DMA window property while releasing the device node.
+
+=============================================
+[ INFO: possible recursive locking detected ]
+3.16.0+ #428 Tainted: G W
+---------------------------------------------
+drmgr/2273 is trying to acquire lock:
+ ((of_reconfig_chain).rwsem){.+.+..}, at: [<c000000000091890>] \
+ .__blocking_notifier_call_chain+0x40/0x78
+
+but task is already holding lock:
+ ((of_reconfig_chain).rwsem){.+.+..}, at: [<c000000000091890>] \
+ .__blocking_notifier_call_chain+0x40/0x78
+
+other info that might help us debug this:
+ Possible unsafe locking scenario:
+
+ CPU0
+ ----
+ lock((of_reconfig_chain).rwsem);
+ lock((of_reconfig_chain).rwsem);
+ *** DEADLOCK ***
+
+ May be due to missing lock nesting notation
+
+2 locks held by drmgr/2273:
+ #0: (sb_writers#4){.+.+.+}, at: [<c0000000001cbe70>] \
+ .vfs_write+0xb0/0x1f8
+ #1: ((of_reconfig_chain).rwsem){.+.+..}, at: [<c000000000091890>] \
+ .__blocking_notifier_call_chain+0x40/0x78
+
+stack backtrace:
+CPU: 17 PID: 2273 Comm: drmgr Tainted: G W 3.16.0+ #428
+Call Trace:
+[c0000000137e7000] [c000000000013d9c] .show_stack+0x88/0x148 (unreliable)
+[c0000000137e70b0] [c00000000083cd34] .dump_stack+0x7c/0x9c
+[c0000000137e7130] [c0000000000b8afc] .__lock_acquire+0x128c/0x1c68
+[c0000000137e7280] [c0000000000b9a4c] .lock_acquire+0xe8/0x104
+[c0000000137e7350] [c00000000083588c] .down_read+0x4c/0x90
+[c0000000137e73e0] [c000000000091890] .__blocking_notifier_call_chain+0x40/0x78
+[c0000000137e7490] [c000000000091900] .blocking_notifier_call_chain+0x38/0x48
+[c0000000137e7520] [c000000000682a28] .of_reconfig_notify+0x34/0x5c
+[c0000000137e75b0] [c000000000682a9c] .of_property_notify+0x4c/0x54
+[c0000000137e7650] [c000000000682bf0] .of_remove_property+0x30/0xd4
+[c0000000137e76f0] [c000000000052a44] .remove_ddw+0x144/0x168
+[c0000000137e7790] [c000000000053204] .iommu_reconfig_notifier+0x30/0xe0
+[c0000000137e7820] [c00000000009137c] .notifier_call_chain+0x6c/0xb4
+[c0000000137e78c0] [c0000000000918ac] .__blocking_notifier_call_chain+0x5c/0x78
+[c0000000137e7970] [c000000000091900] .blocking_notifier_call_chain+0x38/0x48
+[c0000000137e7a00] [c000000000682a28] .of_reconfig_notify+0x34/0x5c
+[c0000000137e7a90] [c000000000682e14] .of_detach_node+0x44/0x1fc
+[c0000000137e7b40] [c0000000000518e4] .ofdt_write+0x3ac/0x688
+[c0000000137e7c20] [c000000000238430] .proc_reg_write+0xb8/0xd4
+[c0000000137e7cd0] [c0000000001cbeac] .vfs_write+0xec/0x1f8
+[c0000000137e7d70] [c0000000001cc3b0] .SyS_write+0x58/0xa0
+[c0000000137e7e30] [c00000000000a064] syscall_exit+0x0/0x98
+
+Signed-off-by: Gavin Shan <gwshan@linux.vnet.ibm.com>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/platforms/pseries/iommu.c | 20 ++++++++++++++------
+ 1 file changed, 14 insertions(+), 6 deletions(-)
+
+--- a/arch/powerpc/platforms/pseries/iommu.c
++++ b/arch/powerpc/platforms/pseries/iommu.c
+@@ -721,13 +721,13 @@ static int __init disable_ddw_setup(char
+
+ early_param("disable_ddw", disable_ddw_setup);
+
+-static void remove_ddw(struct device_node *np)
++static void remove_ddw(struct device_node *np, bool remove_prop)
+ {
+ struct dynamic_dma_window_prop *dwp;
+ struct property *win64;
+ const u32 *ddw_avail;
+ u64 liobn;
+- int len, ret;
++ int len, ret = 0;
+
+ ddw_avail = of_get_property(np, "ibm,ddw-applicable", &len);
+ win64 = of_find_property(np, DIRECT64_PROPNAME, NULL);
+@@ -761,7 +761,8 @@ static void remove_ddw(struct device_nod
+ np->full_name, ret, ddw_avail[2], liobn);
+
+ delprop:
+- ret = of_remove_property(np, win64);
++ if (remove_prop)
++ ret = of_remove_property(np, win64);
+ if (ret)
+ pr_warning("%s: failed to remove direct window property: %d\n",
+ np->full_name, ret);
+@@ -805,7 +806,7 @@ static int find_existing_ddw_windows(voi
+ window = kzalloc(sizeof(*window), GFP_KERNEL);
+ if (!window || len < sizeof(struct dynamic_dma_window_prop)) {
+ kfree(window);
+- remove_ddw(pdn);
++ remove_ddw(pdn, true);
+ continue;
+ }
+
+@@ -1045,7 +1046,7 @@ out_free_window:
+ kfree(window);
+
+ out_clear_window:
+- remove_ddw(pdn);
++ remove_ddw(pdn, true);
+
+ out_free_prop:
+ kfree(win64->name);
+@@ -1255,7 +1256,14 @@ static int iommu_reconfig_notifier(struc
+
+ switch (action) {
+ case OF_RECONFIG_DETACH_NODE:
+- remove_ddw(np);
++ /*
++ * Removing the property will invoke the reconfig
++ * notifier again, which causes dead-lock on the
++ * read-write semaphore of the notifier chain. So
++ * we have to remove the property when releasing
++ * the device node.
++ */
++ remove_ddw(np, false);
+ if (pci && pci->iommu_table)
+ iommu_free_table(pci->iommu_table, np->full_name);
+
--- /dev/null
+From f1b3929c232784580e5d8ee324b6bc634e709575 Mon Sep 17 00:00:00 2001
+From: Gavin Shan <gwshan@linux.vnet.ibm.com>
+Date: Mon, 11 Aug 2014 19:16:19 +1000
+Subject: powerpc/pseries: Failure on removing device node
+
+From: Gavin Shan <gwshan@linux.vnet.ibm.com>
+
+commit f1b3929c232784580e5d8ee324b6bc634e709575 upstream.
+
+While running command "drmgr -c phb -r -s 'PHB 528'", following
+backtrace jumped out because the target device node isn't marked
+with OF_DETACHED by of_detach_node(), which caused by error
+returned from memory hotplug related reconfig notifier when
+disabling CONFIG_MEMORY_HOTREMOVE. The patch fixes it.
+
+ERROR: Bad of_node_put() on /pci@800000020000210/ethernet@0
+CPU: 14 PID: 2252 Comm: drmgr Tainted: G W 3.16.0+ #427
+Call Trace:
+[c000000012a776a0] [c000000000013d9c] .show_stack+0x88/0x148 (unreliable)
+[c000000012a77750] [c00000000083cd34] .dump_stack+0x7c/0x9c
+[c000000012a777d0] [c0000000006807c4] .of_node_release+0x58/0xe0
+[c000000012a77860] [c00000000038a7d0] .kobject_release+0x174/0x1b8
+[c000000012a77900] [c00000000038a884] .kobject_put+0x70/0x78
+[c000000012a77980] [c000000000681680] .of_node_put+0x28/0x34
+[c000000012a77a00] [c000000000681ea8] .__of_get_next_child+0x64/0x70
+[c000000012a77a90] [c000000000682138] .of_find_node_by_path+0x1b8/0x20c
+[c000000012a77b40] [c000000000051840] .ofdt_write+0x308/0x688
+[c000000012a77c20] [c000000000238430] .proc_reg_write+0xb8/0xd4
+[c000000012a77cd0] [c0000000001cbeac] .vfs_write+0xec/0x1f8
+[c000000012a77d70] [c0000000001cc3b0] .SyS_write+0x58/0xa0
+[c000000012a77e30] [c00000000000a064] syscall_exit+0x0/0x98
+
+Signed-off-by: Gavin Shan <gwshan@linux.vnet.ibm.com>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/platforms/pseries/hotplug-memory.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
++++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
+@@ -146,7 +146,7 @@ static inline int pseries_remove_membloc
+ }
+ static inline int pseries_remove_mem_node(struct device_node *np)
+ {
+- return -EOPNOTSUPP;
++ return 0;
+ }
+ #endif /* CONFIG_MEMORY_HOTREMOVE */
+
--- /dev/null
+From b0aa44a3dfae3d8f45bd1264349aa87f87b7774f Mon Sep 17 00:00:00 2001
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
+Date: Wed, 13 Aug 2014 12:31:57 +0530
+Subject: powerpc/thp: Add write barrier after updating the valid bit
+
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
+
+commit b0aa44a3dfae3d8f45bd1264349aa87f87b7774f upstream.
+
+With hugepages, we store the hpte valid information in the pte page
+whose address is stored in the second half of the PMD. Use a
+write barrier to make sure clearing pmd busy bit and updating
+hpte valid info are ordered properly.
+
+Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/mm/hugepage-hash64.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/mm/hugepage-hash64.c
++++ b/arch/powerpc/mm/hugepage-hash64.c
+@@ -172,8 +172,11 @@ repeat:
+ mark_hpte_slot_valid(hpte_slot_array, index, slot);
+ }
+ /*
+- * No need to use ldarx/stdcx here
++ * The hpte valid is stored in the pgtable whose address is in the
++ * second half of the PMD. Order this against clearing of the busy bit in
++ * huge pmd.
+ */
++ smp_wmb();
+ *pmdp = __pmd(new_pmd & ~_PAGE_BUSY);
+ return 0;
+ }
--- /dev/null
+From fa1f8ae80f8bb996594167ff4750a0b0a5a5bb5d Mon Sep 17 00:00:00 2001
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
+Date: Wed, 13 Aug 2014 12:31:58 +0530
+Subject: powerpc/thp: Don't recompute vsid and ssize in loop on invalidate
+
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
+
+commit fa1f8ae80f8bb996594167ff4750a0b0a5a5bb5d upstream.
+
+The segment identifier and segment size will remain the same in
+the loop, So we can compute it outside. We also change the
+hugepage_invalidate interface so that we can use it the later patch
+
+Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/machdep.h | 6 +++---
+ arch/powerpc/mm/hash_native_64.c | 19 +++++--------------
+ arch/powerpc/mm/pgtable_64.c | 24 ++++++++++++------------
+ arch/powerpc/platforms/pseries/lpar.c | 20 ++++++--------------
+ 4 files changed, 26 insertions(+), 43 deletions(-)
+
+--- a/arch/powerpc/include/asm/machdep.h
++++ b/arch/powerpc/include/asm/machdep.h
+@@ -57,10 +57,10 @@ struct machdep_calls {
+ void (*hpte_removebolted)(unsigned long ea,
+ int psize, int ssize);
+ void (*flush_hash_range)(unsigned long number, int local);
+- void (*hugepage_invalidate)(struct mm_struct *mm,
++ void (*hugepage_invalidate)(unsigned long vsid,
++ unsigned long addr,
+ unsigned char *hpte_slot_array,
+- unsigned long addr, int psize);
+-
++ int psize, int ssize);
+ /* special for kexec, to be called in real mode, linear mapping is
+ * destroyed as well */
+ void (*hpte_clear_all)(void);
+--- a/arch/powerpc/mm/hash_native_64.c
++++ b/arch/powerpc/mm/hash_native_64.c
+@@ -412,18 +412,18 @@ static void native_hpte_invalidate(unsig
+ local_irq_restore(flags);
+ }
+
+-static void native_hugepage_invalidate(struct mm_struct *mm,
++static void native_hugepage_invalidate(unsigned long vsid,
++ unsigned long addr,
+ unsigned char *hpte_slot_array,
+- unsigned long addr, int psize)
++ int psize, int ssize)
+ {
+- int ssize = 0, i;
+- int lock_tlbie;
++ int i, lock_tlbie;
+ struct hash_pte *hptep;
+ int actual_psize = MMU_PAGE_16M;
+ unsigned int max_hpte_count, valid;
+ unsigned long flags, s_addr = addr;
+ unsigned long hpte_v, want_v, shift;
+- unsigned long hidx, vpn = 0, vsid, hash, slot;
++ unsigned long hidx, vpn = 0, hash, slot;
+
+ shift = mmu_psize_defs[psize].shift;
+ max_hpte_count = 1U << (PMD_SHIFT - shift);
+@@ -437,15 +437,6 @@ static void native_hugepage_invalidate(s
+
+ /* get the vpn */
+ addr = s_addr + (i * (1ul << shift));
+- if (!is_kernel_addr(addr)) {
+- ssize = user_segment_size(addr);
+- vsid = get_vsid(mm->context.id, addr, ssize);
+- WARN_ON(vsid == 0);
+- } else {
+- vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
+- ssize = mmu_kernel_ssize;
+- }
+-
+ vpn = hpt_vpn(addr, vsid, ssize);
+ hash = hpt_hash(vpn, shift, ssize);
+ if (hidx & _PTEIDX_SECONDARY)
+--- a/arch/powerpc/mm/pgtable_64.c
++++ b/arch/powerpc/mm/pgtable_64.c
+@@ -745,12 +745,21 @@ void hpte_do_hugepage_flush(struct mm_st
+ if (!hpte_slot_array)
+ return;
+
+- /* get the base page size */
++ /* get the base page size,vsid and segment size */
+ psize = get_slice_psize(mm, s_addr);
++ if (!is_kernel_addr(s_addr)) {
++ ssize = user_segment_size(s_addr);
++ vsid = get_vsid(mm->context.id, s_addr, ssize);
++ WARN_ON(vsid == 0);
++ } else {
++ vsid = get_kernel_vsid(s_addr, mmu_kernel_ssize);
++ ssize = mmu_kernel_ssize;
++ }
+
+ if (ppc_md.hugepage_invalidate)
+- return ppc_md.hugepage_invalidate(mm, hpte_slot_array,
+- s_addr, psize);
++ return ppc_md.hugepage_invalidate(vsid, s_addr,
++ hpte_slot_array,
++ psize, ssize);
+ /*
+ * No bluk hpte removal support, invalidate each entry
+ */
+@@ -768,15 +777,6 @@ void hpte_do_hugepage_flush(struct mm_st
+
+ /* get the vpn */
+ addr = s_addr + (i * (1ul << shift));
+- if (!is_kernel_addr(addr)) {
+- ssize = user_segment_size(addr);
+- vsid = get_vsid(mm->context.id, addr, ssize);
+- WARN_ON(vsid == 0);
+- } else {
+- vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
+- ssize = mmu_kernel_ssize;
+- }
+-
+ vpn = hpt_vpn(addr, vsid, ssize);
+ hash = hpt_hash(vpn, shift, ssize);
+ if (hidx & _PTEIDX_SECONDARY)
+--- a/arch/powerpc/platforms/pseries/lpar.c
++++ b/arch/powerpc/platforms/pseries/lpar.c
+@@ -430,16 +430,17 @@ static void __pSeries_lpar_hugepage_inva
+ spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
+ }
+
+-static void pSeries_lpar_hugepage_invalidate(struct mm_struct *mm,
+- unsigned char *hpte_slot_array,
+- unsigned long addr, int psize)
++static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
++ unsigned long addr,
++ unsigned char *hpte_slot_array,
++ int psize, int ssize)
+ {
+- int ssize = 0, i, index = 0;
++ int i, index = 0;
+ unsigned long s_addr = addr;
+ unsigned int max_hpte_count, valid;
+ unsigned long vpn_array[PPC64_HUGE_HPTE_BATCH];
+ unsigned long slot_array[PPC64_HUGE_HPTE_BATCH];
+- unsigned long shift, hidx, vpn = 0, vsid, hash, slot;
++ unsigned long shift, hidx, vpn = 0, hash, slot;
+
+ shift = mmu_psize_defs[psize].shift;
+ max_hpte_count = 1U << (PMD_SHIFT - shift);
+@@ -452,15 +453,6 @@ static void pSeries_lpar_hugepage_invali
+
+ /* get the vpn */
+ addr = s_addr + (i * (1ul << shift));
+- if (!is_kernel_addr(addr)) {
+- ssize = user_segment_size(addr);
+- vsid = get_vsid(mm->context.id, addr, ssize);
+- WARN_ON(vsid == 0);
+- } else {
+- vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
+- ssize = mmu_kernel_ssize;
+- }
+-
+ vpn = hpt_vpn(addr, vsid, ssize);
+ hash = hpt_hash(vpn, shift, ssize);
+ if (hidx & _PTEIDX_SECONDARY)
--- /dev/null
+From fc0479557572375100ef16c71170b29a98e0d69a Mon Sep 17 00:00:00 2001
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
+Date: Wed, 13 Aug 2014 12:32:00 +0530
+Subject: powerpc/thp: Handle combo pages in invalidate
+
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
+
+commit fc0479557572375100ef16c71170b29a98e0d69a upstream.
+
+If we changed base page size of the segment, either via sub_page_protect
+or via remap_4k_pfn, we do a demote_segment which doesn't flush the hash
+table entries. We do a lazy hash page table flush for all mapped pages
+in the demoted segment. This happens when we handle hash page fault for
+these pages.
+
+We use _PAGE_COMBO bit along with _PAGE_HASHPTE to indicate whether a
+pte is backed by 4K hash pte. If we find _PAGE_COMBO not set on the pte,
+that implies that we could possibly have older 64K hash pte entries in
+the hash page table and we need to invalidate those entries.
+
+Use _PAGE_COMBO to determine the page size with which we should
+invalidate the hash table entries on unmap.
+
+Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/pgtable-ppc64.h | 2 +-
+ arch/powerpc/mm/pgtable_64.c | 14 +++++++++++---
+ arch/powerpc/mm/tlb_hash64.c | 2 +-
+ 3 files changed, 13 insertions(+), 5 deletions(-)
+
+--- a/arch/powerpc/include/asm/pgtable-ppc64.h
++++ b/arch/powerpc/include/asm/pgtable-ppc64.h
+@@ -413,7 +413,7 @@ static inline char *get_hpte_slot_array(
+ }
+
+ extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
+- pmd_t *pmdp);
++ pmd_t *pmdp, unsigned long old_pmd);
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
+ extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
+--- a/arch/powerpc/mm/pgtable_64.c
++++ b/arch/powerpc/mm/pgtable_64.c
+@@ -538,7 +538,7 @@ unsigned long pmd_hugepage_update(struct
+ *pmdp = __pmd((old & ~clr) | set);
+ #endif
+ if (old & _PAGE_HASHPTE)
+- hpte_do_hugepage_flush(mm, addr, pmdp);
++ hpte_do_hugepage_flush(mm, addr, pmdp, old);
+ return old;
+ }
+
+@@ -645,7 +645,7 @@ void pmdp_splitting_flush(struct vm_area
+ if (!(old & _PAGE_SPLITTING)) {
+ /* We need to flush the hpte */
+ if (old & _PAGE_HASHPTE)
+- hpte_do_hugepage_flush(vma->vm_mm, address, pmdp);
++ hpte_do_hugepage_flush(vma->vm_mm, address, pmdp, old);
+ }
+ /*
+ * This ensures that generic code that rely on IRQ disabling
+@@ -723,7 +723,7 @@ void pmdp_invalidate(struct vm_area_stru
+ * neesd to be flushed.
+ */
+ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
+- pmd_t *pmdp)
++ pmd_t *pmdp, unsigned long old_pmd)
+ {
+ int ssize, i;
+ unsigned long s_addr;
+@@ -746,7 +746,15 @@ void hpte_do_hugepage_flush(struct mm_st
+ return;
+
+ /* get the base page size,vsid and segment size */
++#ifdef CONFIG_DEBUG_VM
+ psize = get_slice_psize(mm, s_addr);
++ BUG_ON(psize == MMU_PAGE_16M);
++#endif
++ if (old_pmd & _PAGE_COMBO)
++ psize = MMU_PAGE_4K;
++ else
++ psize = MMU_PAGE_64K;
++
+ if (!is_kernel_addr(s_addr)) {
+ ssize = user_segment_size(s_addr);
+ vsid = get_vsid(mm->context.id, s_addr, ssize);
+--- a/arch/powerpc/mm/tlb_hash64.c
++++ b/arch/powerpc/mm/tlb_hash64.c
+@@ -216,7 +216,7 @@ void __flush_hash_table_range(struct mm_
+ if (!(pte & _PAGE_HASHPTE))
+ continue;
+ if (unlikely(hugepage_shift && pmd_trans_huge(*(pmd_t *)pte)))
+- hpte_do_hugepage_flush(mm, start, (pmd_t *)pte);
++ hpte_do_hugepage_flush(mm, start, (pmd_t *)ptep, pte);
+ else
+ hpte_need_flush(mm, start, ptep, pte, 0);
+ }
--- /dev/null
+From 629149fae478f0ac6bf705a535708b192e9c6b59 Mon Sep 17 00:00:00 2001
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
+Date: Wed, 13 Aug 2014 12:31:59 +0530
+Subject: powerpc/thp: Invalidate old 64K based hash page mapping before insert of 4k pte
+
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
+
+commit 629149fae478f0ac6bf705a535708b192e9c6b59 upstream.
+
+If we changed base page size of the segment, either via sub_page_protect
+or via remap_4k_pfn, we do a demote_segment which doesn't flush the hash
+table entries. We do a lazy hash page table flush for all mapped pages
+in the demoted segment. This happens when we handle hash page fault
+for these pages.
+
+We use _PAGE_COMBO bit along with _PAGE_HASHPTE to indicate whether a
+pte is backed by 4K hash pte. If we find _PAGE_COMBO not set on the pte,
+that implies that we could possibly have older 64K hash pte entries in
+the hash page table and we need to invalidate those entries.
+
+Handle this correctly for 16M pages
+
+Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/mm/hugepage-hash64.c | 79 +++++++++++++++++++++++++++++++++-----
+ 1 file changed, 70 insertions(+), 9 deletions(-)
+
+--- a/arch/powerpc/mm/hugepage-hash64.c
++++ b/arch/powerpc/mm/hugepage-hash64.c
+@@ -18,6 +18,57 @@
+ #include <linux/mm.h>
+ #include <asm/machdep.h>
+
++static void invalidate_old_hpte(unsigned long vsid, unsigned long addr,
++ pmd_t *pmdp, unsigned int psize, int ssize)
++{
++ int i, max_hpte_count, valid;
++ unsigned long s_addr;
++ unsigned char *hpte_slot_array;
++ unsigned long hidx, shift, vpn, hash, slot;
++
++ s_addr = addr & HPAGE_PMD_MASK;
++ hpte_slot_array = get_hpte_slot_array(pmdp);
++ /*
++ * IF we try to do a HUGE PTE update after a withdraw is done.
++ * we will find the below NULL. This happens when we do
++ * split_huge_page_pmd
++ */
++ if (!hpte_slot_array)
++ return;
++
++ if (ppc_md.hugepage_invalidate)
++ return ppc_md.hugepage_invalidate(vsid, s_addr, hpte_slot_array,
++ psize, ssize);
++ /*
++ * No bluk hpte removal support, invalidate each entry
++ */
++ shift = mmu_psize_defs[psize].shift;
++ max_hpte_count = HPAGE_PMD_SIZE >> shift;
++ for (i = 0; i < max_hpte_count; i++) {
++ /*
++ * 8 bits per each hpte entries
++ * 000| [ secondary group (one bit) | hidx (3 bits) | valid bit]
++ */
++ valid = hpte_valid(hpte_slot_array, i);
++ if (!valid)
++ continue;
++ hidx = hpte_hash_index(hpte_slot_array, i);
++
++ /* get the vpn */
++ addr = s_addr + (i * (1ul << shift));
++ vpn = hpt_vpn(addr, vsid, ssize);
++ hash = hpt_hash(vpn, shift, ssize);
++ if (hidx & _PTEIDX_SECONDARY)
++ hash = ~hash;
++
++ slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
++ slot += hidx & _PTEIDX_GROUP_IX;
++ ppc_md.hpte_invalidate(slot, vpn, psize,
++ MMU_PAGE_16M, ssize, 0);
++ }
++}
++
++
+ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
+ pmd_t *pmdp, unsigned long trap, int local, int ssize,
+ unsigned int psize)
+@@ -85,6 +136,15 @@ int __hash_page_thp(unsigned long ea, un
+ vpn = hpt_vpn(ea, vsid, ssize);
+ hash = hpt_hash(vpn, shift, ssize);
+ hpte_slot_array = get_hpte_slot_array(pmdp);
++ if (psize == MMU_PAGE_4K) {
++ /*
++ * invalidate the old hpte entry if we have that mapped via 64K
++ * base page size. This is because demote_segment won't flush
++ * hash page table entries.
++ */
++ if ((old_pmd & _PAGE_HASHPTE) && !(old_pmd & _PAGE_COMBO))
++ invalidate_old_hpte(vsid, ea, pmdp, MMU_PAGE_64K, ssize);
++ }
+
+ valid = hpte_valid(hpte_slot_array, index);
+ if (valid) {
+@@ -107,11 +167,8 @@ int __hash_page_thp(unsigned long ea, un
+ * safely update this here.
+ */
+ valid = 0;
+- new_pmd &= ~_PAGE_HPTEFLAGS;
+ hpte_slot_array[index] = 0;
+- } else
+- /* clear the busy bits and set the hash pte bits */
+- new_pmd = (new_pmd & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
++ }
+ }
+
+ if (!valid) {
+@@ -119,11 +176,7 @@ int __hash_page_thp(unsigned long ea, un
+
+ /* insert new entry */
+ pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
+-repeat:
+- hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+-
+- /* clear the busy bits and set the hash pte bits */
+- new_pmd = (new_pmd & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
++ new_pmd |= _PAGE_HASHPTE;
+
+ /* Add in WIMG bits */
+ rflags |= (new_pmd & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
+@@ -132,6 +185,8 @@ repeat:
+ * enable the memory coherence always
+ */
+ rflags |= HPTE_R_M;
++repeat:
++ hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+
+ /* Insert into the hash table, primary slot */
+ slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
+@@ -172,6 +227,12 @@ repeat:
+ mark_hpte_slot_valid(hpte_slot_array, index, slot);
+ }
+ /*
++ * Mark the pte with _PAGE_COMBO, if we are trying to hash it with
++ * base page size 4k.
++ */
++ if (psize == MMU_PAGE_4K)
++ new_pmd |= _PAGE_COMBO;
++ /*
+ * The hpte valid is stored in the pgtable whose address is in the
+ * second half of the PMD. Order this against clearing of the busy bit in
+ * huge pmd.
--- /dev/null
+From 969b7b208f7408712a3526856e4ae60ad13f6928 Mon Sep 17 00:00:00 2001
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
+Date: Wed, 13 Aug 2014 12:32:01 +0530
+Subject: powerpc/thp: Invalidate with vpn in loop
+
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
+
+commit 969b7b208f7408712a3526856e4ae60ad13f6928 upstream.
+
+As per ISA, for 4k base page size we compare 14..65 bits of VA specified
+with the entry_VA in tlb. That implies we need to make sure we do a
+tlbie with all the possible 4k va we used to access the 16MB hugepage.
+With 64k base page size we compare 14..57 bits of VA. Hence we cannot
+ignore the lower 24 bits of va while tlbie .We also cannot tlb
+invalidate a 16MB entry with just one tlbie instruction because
+we don't track which va was used to instantiate the tlb entry.
+
+Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/mm/hash_native_64.c | 23 +++++++----------------
+ 1 file changed, 7 insertions(+), 16 deletions(-)
+
+--- a/arch/powerpc/mm/hash_native_64.c
++++ b/arch/powerpc/mm/hash_native_64.c
+@@ -417,7 +417,7 @@ static void native_hugepage_invalidate(u
+ unsigned char *hpte_slot_array,
+ int psize, int ssize)
+ {
+- int i, lock_tlbie;
++ int i;
+ struct hash_pte *hptep;
+ int actual_psize = MMU_PAGE_16M;
+ unsigned int max_hpte_count, valid;
+@@ -456,22 +456,13 @@ static void native_hugepage_invalidate(u
+ else
+ /* Invalidate the hpte. NOTE: this also unlocks it */
+ hptep->v = 0;
++ /*
++ * We need to do tlb invalidate for all the address, tlbie
++ * instruction compares entry_VA in tlb with the VA specified
++ * here
++ */
++ tlbie(vpn, psize, actual_psize, ssize, 0);
+ }
+- /*
+- * Since this is a hugepage, we just need a single tlbie.
+- * use the last vpn.
+- */
+- lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
+- if (lock_tlbie)
+- raw_spin_lock(&native_tlbie_lock);
+-
+- asm volatile("ptesync":::"memory");
+- __tlbie(vpn, psize, actual_psize, ssize);
+- asm volatile("eieio; tlbsync; ptesync":::"memory");
+-
+- if (lock_tlbie)
+- raw_spin_unlock(&native_tlbie_lock);
+-
+ local_irq_restore(flags);
+ }
+
--- /dev/null
+From 7e467245bf5226db34c4b12d3cbacfa2f7a15a8b Mon Sep 17 00:00:00 2001
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
+Date: Wed, 13 Aug 2014 12:32:02 +0530
+Subject: powerpc/thp: Use ACCESS_ONCE when loading pmdp
+
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
+
+commit 7e467245bf5226db34c4b12d3cbacfa2f7a15a8b upstream.
+
+We would get wrong results in compiler recomputed old_pmd. Avoid
+that by using ACCESS_ONCE
+
+Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/mm/hugepage-hash64.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/mm/hugepage-hash64.c
++++ b/arch/powerpc/mm/hugepage-hash64.c
+@@ -84,7 +84,9 @@ int __hash_page_thp(unsigned long ea, un
+ * atomically mark the linux large page PMD busy and dirty
+ */
+ do {
+- old_pmd = pmd_val(*pmdp);
++ pmd_t pmd = ACCESS_ONCE(*pmdp);
++
++ old_pmd = pmd_val(pmd);
+ /* If PMD busy, retry the access */
+ if (unlikely(old_pmd & _PAGE_BUSY))
+ return 0;
--- /dev/null
+From 5b919f3ebb533cbe400664837e24f66a0836b907 Mon Sep 17 00:00:00 2001
+From: Nikesh Oswal <nikesh@opensource.wolfsonmicro.com>
+Date: Fri, 4 Jul 2014 09:55:16 +0100
+Subject: regulator: arizona-ldo1: remove bypass functionality
+
+From: Nikesh Oswal <nikesh@opensource.wolfsonmicro.com>
+
+commit 5b919f3ebb533cbe400664837e24f66a0836b907 upstream.
+
+WM5110/8280 devices do not support bypass mode for LDO1 so remove
+the bypass callbacks registered with regulator core.
+
+Signed-off-by: Nikesh Oswal <nikesh@opensource.wolfsonmicro.com>
+Signed-off-by: Mark Brown <broonie@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/regulator/arizona-ldo1.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/drivers/regulator/arizona-ldo1.c
++++ b/drivers/regulator/arizona-ldo1.c
+@@ -143,8 +143,6 @@ static struct regulator_ops arizona_ldo1
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+- .get_bypass = regulator_get_bypass_regmap,
+- .set_bypass = regulator_set_bypass_regmap,
+ };
+
+ static const struct regulator_desc arizona_ldo1 = {
--- /dev/null
+From 42ab0f3915f22728f54bb1f3c0dcf38ab2335b5b Mon Sep 17 00:00:00 2001
+From: Felipe Balbi <balbi@ti.com>
+Date: Tue, 8 Jul 2014 14:09:12 -0500
+Subject: regulator: tps65218: fix DCDC4 linear voltage range
+
+From: Felipe Balbi <balbi@ti.com>
+
+commit 42ab0f3915f22728f54bb1f3c0dcf38ab2335b5b upstream.
+
+The second range of this particular regulator,
+starts at 1.60V, not as 1.55V as it was originally
+implied by code.
+
+Signed-off-by: Felipe Balbi <balbi@ti.com>
+Signed-off-by: Mark Brown <broonie@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/regulator/tps65218-regulator.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/regulator/tps65218-regulator.c
++++ b/drivers/regulator/tps65218-regulator.c
+@@ -68,7 +68,7 @@ static const struct regulator_linear_ran
+
+ static const struct regulator_linear_range dcdc4_ranges[] = {
+ REGULATOR_LINEAR_RANGE(1175000, 0x0, 0xf, 25000),
+- REGULATOR_LINEAR_RANGE(1550000, 0x10, 0x34, 50000),
++ REGULATOR_LINEAR_RANGE(1600000, 0x10, 0x34, 50000),
+ };
+
+ static struct tps_info tps65218_pmic_regs[] = {
fanotify-fix-double-free-of-pending-permission-events.patch
ocfs2-do-not-write-error-flag-to-user-structure-we-cannot-copy-from-to.patch
sh-fix-build-error-by-adding-generic-ioport_-map-unmap.patch
+mm-fix-potential-infinite-loop-in-dissolve_free_huge_pages.patch
+kernel-smp.c-on_each_cpu_cond-fix-warning-in-fallback-path.patch
+mfd-omap-usb-host-fix-improper-mask-use.patch
+drivers-mfd-rtsx_usb.c-export-device-table.patch
+mfd-twl4030-power-fix-pm-idle-pin-configuration-to-not-conflict-with-regulators.patch
+regulator-arizona-ldo1-remove-bypass-functionality.patch
+regulator-tps65218-fix-dcdc4-linear-voltage-range.patch
+powerpc-cpuidle-fix-parsing-of-idle-state-flags-from-device-tree.patch
+powerpc-mm-numa-fix-break-placement.patch
+powerpc-mm-use-read-barrier-when-creating-real_pte.patch
+powerpc-pseries-failure-on-removing-device-node.patch
+powerpc-pseries-avoid-deadlock-on-removing-ddw.patch
+powerpc-thp-add-write-barrier-after-updating-the-valid-bit.patch
+powerpc-thp-don-t-recompute-vsid-and-ssize-in-loop-on-invalidate.patch
+powerpc-thp-invalidate-old-64k-based-hash-page-mapping-before-insert-of-4k-pte.patch
+powerpc-thp-handle-combo-pages-in-invalidate.patch
+powerpc-thp-invalidate-with-vpn-in-loop.patch
+powerpc-thp-use-access_once-when-loading-pmdp.patch