]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 13 Sep 2014 00:01:59 +0000 (17:01 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 13 Sep 2014 00:01:59 +0000 (17:01 -0700)
added patches:
kernel-smp.c-on_each_cpu_cond-fix-warning-in-fallback-path.patch
mfd-omap-usb-host-fix-improper-mask-use.patch
powerpc-mm-numa-fix-break-placement.patch
powerpc-mm-use-read-barrier-when-creating-real_pte.patch
powerpc-pseries-avoid-deadlock-on-removing-ddw.patch
powerpc-pseries-failure-on-removing-device-node.patch
powerpc-thp-add-write-barrier-after-updating-the-valid-bit.patch
powerpc-thp-don-t-recompute-vsid-and-ssize-in-loop-on-invalidate.patch
powerpc-thp-handle-combo-pages-in-invalidate.patch
powerpc-thp-invalidate-old-64k-based-hash-page-mapping-before-insert-of-4k-pte.patch
powerpc-thp-invalidate-with-vpn-in-loop.patch
powerpc-thp-use-access_once-when-loading-pmdp.patch
regulator-arizona-ldo1-remove-bypass-functionality.patch

14 files changed:
queue-3.14/kernel-smp.c-on_each_cpu_cond-fix-warning-in-fallback-path.patch [new file with mode: 0644]
queue-3.14/mfd-omap-usb-host-fix-improper-mask-use.patch [new file with mode: 0644]
queue-3.14/powerpc-mm-numa-fix-break-placement.patch [new file with mode: 0644]
queue-3.14/powerpc-mm-use-read-barrier-when-creating-real_pte.patch [new file with mode: 0644]
queue-3.14/powerpc-pseries-avoid-deadlock-on-removing-ddw.patch [new file with mode: 0644]
queue-3.14/powerpc-pseries-failure-on-removing-device-node.patch [new file with mode: 0644]
queue-3.14/powerpc-thp-add-write-barrier-after-updating-the-valid-bit.patch [new file with mode: 0644]
queue-3.14/powerpc-thp-don-t-recompute-vsid-and-ssize-in-loop-on-invalidate.patch [new file with mode: 0644]
queue-3.14/powerpc-thp-handle-combo-pages-in-invalidate.patch [new file with mode: 0644]
queue-3.14/powerpc-thp-invalidate-old-64k-based-hash-page-mapping-before-insert-of-4k-pte.patch [new file with mode: 0644]
queue-3.14/powerpc-thp-invalidate-with-vpn-in-loop.patch [new file with mode: 0644]
queue-3.14/powerpc-thp-use-access_once-when-loading-pmdp.patch [new file with mode: 0644]
queue-3.14/regulator-arizona-ldo1-remove-bypass-functionality.patch [new file with mode: 0644]
queue-3.14/series

diff --git a/queue-3.14/kernel-smp.c-on_each_cpu_cond-fix-warning-in-fallback-path.patch b/queue-3.14/kernel-smp.c-on_each_cpu_cond-fix-warning-in-fallback-path.patch
new file mode 100644 (file)
index 0000000..b1e49ff
--- /dev/null
@@ -0,0 +1,38 @@
+From 618fde872163e782183ce574c77f1123e2be8887 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sasha.levin@oracle.com>
+Date: Wed, 6 Aug 2014 16:08:14 -0700
+Subject: kernel/smp.c:on_each_cpu_cond(): fix warning in fallback path
+
+From: Sasha Levin <sasha.levin@oracle.com>
+
+commit 618fde872163e782183ce574c77f1123e2be8887 upstream.
+
+The rarely-executed memry-allocation-failed callback path generates a
+WARN_ON_ONCE() when smp_call_function_single() succeeds.  Presumably
+it's supposed to warn on failures.
+
+Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
+Cc: Christoph Lameter <cl@gentwo.org>
+Cc: Gilad Ben-Yossef <gilad@benyossef.com>
+Cc: David Rientjes <rientjes@google.com>
+Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Cc: Tejun Heo <htejun@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/smp.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/smp.c
++++ b/kernel/smp.c
+@@ -617,7 +617,7 @@ void on_each_cpu_cond(bool (*cond_func)(
+                       if (cond_func(cpu, info)) {
+                               ret = smp_call_function_single(cpu, func,
+                                                               info, wait);
+-                              WARN_ON_ONCE(!ret);
++                              WARN_ON_ONCE(ret);
+                       }
+               preempt_enable();
+       }
diff --git a/queue-3.14/mfd-omap-usb-host-fix-improper-mask-use.patch b/queue-3.14/mfd-omap-usb-host-fix-improper-mask-use.patch
new file mode 100644 (file)
index 0000000..bcf6f67
--- /dev/null
@@ -0,0 +1,35 @@
+From 46de8ff8e80a6546aa3d2fdf58c6776666301a0c Mon Sep 17 00:00:00 2001
+From: Michael Welling <mwelling@emacinc.com>
+Date: Mon, 28 Jul 2014 18:01:04 -0500
+Subject: mfd: omap-usb-host: Fix improper mask use.
+
+From: Michael Welling <mwelling@emacinc.com>
+
+commit 46de8ff8e80a6546aa3d2fdf58c6776666301a0c upstream.
+
+single-ulpi-bypass is a flag used for older OMAP3 silicon.
+
+The flag when set, can excite code that improperly uses the
+OMAP_UHH_HOSTCONFIG_UPLI_BYPASS define to clear the corresponding bit.
+Instead it clears all of the other bits disabling all of the ports in
+the process.
+
+Signed-off-by: Michael Welling <mwelling@emacinc.com>
+Signed-off-by: Lee Jones <lee.jones@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mfd/omap-usb-host.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/mfd/omap-usb-host.c
++++ b/drivers/mfd/omap-usb-host.c
+@@ -445,7 +445,7 @@ static unsigned omap_usbhs_rev1_hostconf
+               for (i = 0; i < omap->nports; i++) {
+                       if (is_ehci_phy_mode(pdata->port_mode[i])) {
+-                              reg &= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
++                              reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
+                               break;
+                       }
+               }
diff --git a/queue-3.14/powerpc-mm-numa-fix-break-placement.patch b/queue-3.14/powerpc-mm-numa-fix-break-placement.patch
new file mode 100644 (file)
index 0000000..d27ef27
--- /dev/null
@@ -0,0 +1,31 @@
+From b00fc6ec1f24f9d7af9b8988b6a198186eb3408c Mon Sep 17 00:00:00 2001
+From: Andrey Utkin <andrey.krieger.utkin@gmail.com>
+Date: Mon, 4 Aug 2014 23:13:10 +0300
+Subject: powerpc/mm/numa: Fix break placement
+
+From: Andrey Utkin <andrey.krieger.utkin@gmail.com>
+
+commit b00fc6ec1f24f9d7af9b8988b6a198186eb3408c upstream.
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=81631
+Reported-by: David Binderman <dcb314@hotmail.com>
+Signed-off-by: Andrey Utkin <andrey.krieger.utkin@gmail.com>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/mm/numa.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/powerpc/mm/numa.c
++++ b/arch/powerpc/mm/numa.c
+@@ -610,8 +610,8 @@ static int cpu_numa_callback(struct noti
+       case CPU_UP_CANCELED:
+       case CPU_UP_CANCELED_FROZEN:
+               unmap_cpu_from_node(lcpu);
+-              break;
+               ret = NOTIFY_OK;
++              break;
+ #endif
+       }
+       return ret;
diff --git a/queue-3.14/powerpc-mm-use-read-barrier-when-creating-real_pte.patch b/queue-3.14/powerpc-mm-use-read-barrier-when-creating-real_pte.patch
new file mode 100644 (file)
index 0000000..24d419a
--- /dev/null
@@ -0,0 +1,64 @@
+From 85c1fafd7262e68ad821ee1808686b1392b1167d Mon Sep 17 00:00:00 2001
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
+Date: Wed, 13 Aug 2014 12:32:03 +0530
+Subject: powerpc/mm: Use read barrier when creating real_pte
+
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
+
+commit 85c1fafd7262e68ad821ee1808686b1392b1167d upstream.
+
+On ppc64 we support 4K hash pte with 64K page size. That requires
+us to track the hash pte slot information on a per 4k basis. We do that
+by storing the slot details in the second half of pte page. The pte bit
+_PAGE_COMBO is used to indicate whether the second half need to be
+looked while building real_pte. We need to use read memory barrier while
+doing that so that load of hidx is not reordered w.r.t _PAGE_COMBO
+check. On the store side we already do a lwsync in __hash_page_4K
+
+Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/pte-hash64-64k.h |   30 +++++++++++++++++++++++++-----
+ 1 file changed, 25 insertions(+), 5 deletions(-)
+
+--- a/arch/powerpc/include/asm/pte-hash64-64k.h
++++ b/arch/powerpc/include/asm/pte-hash64-64k.h
+@@ -46,11 +46,31 @@
+  * in order to deal with 64K made of 4K HW pages. Thus we override the
+  * generic accessors and iterators here
+  */
+-#define __real_pte(e,p)       ((real_pte_t) { \
+-                      (e), (pte_val(e) & _PAGE_COMBO) ? \
+-                              (pte_val(*((p) + PTRS_PER_PTE))) : 0 })
+-#define __rpte_to_hidx(r,index)       ((pte_val((r).pte) & _PAGE_COMBO) ? \
+-        (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf))
++#define __real_pte __real_pte
++static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep)
++{
++      real_pte_t rpte;
++
++      rpte.pte = pte;
++      rpte.hidx = 0;
++      if (pte_val(pte) & _PAGE_COMBO) {
++              /*
++               * Make sure we order the hidx load against the _PAGE_COMBO
++               * check. The store side ordering is done in __hash_page_4K
++               */
++              smp_rmb();
++              rpte.hidx = pte_val(*((ptep) + PTRS_PER_PTE));
++      }
++      return rpte;
++}
++
++static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index)
++{
++      if ((pte_val(rpte.pte) & _PAGE_COMBO))
++              return (rpte.hidx >> (index<<2)) & 0xf;
++      return (pte_val(rpte.pte) >> 12) & 0xf;
++}
++
+ #define __rpte_to_pte(r)      ((r).pte)
+ #define __rpte_sub_valid(rpte, index) \
+       (pte_val(rpte.pte) & (_PAGE_HPTE_SUB0 >> (index)))
diff --git a/queue-3.14/powerpc-pseries-avoid-deadlock-on-removing-ddw.patch b/queue-3.14/powerpc-pseries-avoid-deadlock-on-removing-ddw.patch
new file mode 100644 (file)
index 0000000..7ce31a0
--- /dev/null
@@ -0,0 +1,142 @@
+From 5efbabe09d986f25c02d19954660238fcd7f008a Mon Sep 17 00:00:00 2001
+From: Gavin Shan <gwshan@linux.vnet.ibm.com>
+Date: Mon, 11 Aug 2014 19:16:20 +1000
+Subject: powerpc/pseries: Avoid deadlock on removing ddw
+
+From: Gavin Shan <gwshan@linux.vnet.ibm.com>
+
+commit 5efbabe09d986f25c02d19954660238fcd7f008a upstream.
+
+Function remove_ddw() could be called in of_reconfig_notifier and
+we potentially remove the dynamic DMA window property, which invokes
+of_reconfig_notifier again. Eventually, it leads to the deadlock as
+following backtrace shows.
+
+The patch fixes the above issue by deferring releasing the dynamic
+DMA window property while releasing the device node.
+
+=============================================
+[ INFO: possible recursive locking detected ]
+3.16.0+ #428 Tainted: G        W
+---------------------------------------------
+drmgr/2273 is trying to acquire lock:
+ ((of_reconfig_chain).rwsem){.+.+..}, at: [<c000000000091890>] \
+ .__blocking_notifier_call_chain+0x40/0x78
+
+but task is already holding lock:
+ ((of_reconfig_chain).rwsem){.+.+..}, at: [<c000000000091890>] \
+ .__blocking_notifier_call_chain+0x40/0x78
+
+other info that might help us debug this:
+ Possible unsafe locking scenario:
+
+       CPU0
+       ----
+  lock((of_reconfig_chain).rwsem);
+  lock((of_reconfig_chain).rwsem);
+ *** DEADLOCK ***
+
+ May be due to missing lock nesting notation
+
+2 locks held by drmgr/2273:
+ #0:  (sb_writers#4){.+.+.+}, at: [<c0000000001cbe70>] \
+      .vfs_write+0xb0/0x1f8
+ #1:  ((of_reconfig_chain).rwsem){.+.+..}, at: [<c000000000091890>] \
+      .__blocking_notifier_call_chain+0x40/0x78
+
+stack backtrace:
+CPU: 17 PID: 2273 Comm: drmgr Tainted: G        W     3.16.0+ #428
+Call Trace:
+[c0000000137e7000] [c000000000013d9c] .show_stack+0x88/0x148 (unreliable)
+[c0000000137e70b0] [c00000000083cd34] .dump_stack+0x7c/0x9c
+[c0000000137e7130] [c0000000000b8afc] .__lock_acquire+0x128c/0x1c68
+[c0000000137e7280] [c0000000000b9a4c] .lock_acquire+0xe8/0x104
+[c0000000137e7350] [c00000000083588c] .down_read+0x4c/0x90
+[c0000000137e73e0] [c000000000091890] .__blocking_notifier_call_chain+0x40/0x78
+[c0000000137e7490] [c000000000091900] .blocking_notifier_call_chain+0x38/0x48
+[c0000000137e7520] [c000000000682a28] .of_reconfig_notify+0x34/0x5c
+[c0000000137e75b0] [c000000000682a9c] .of_property_notify+0x4c/0x54
+[c0000000137e7650] [c000000000682bf0] .of_remove_property+0x30/0xd4
+[c0000000137e76f0] [c000000000052a44] .remove_ddw+0x144/0x168
+[c0000000137e7790] [c000000000053204] .iommu_reconfig_notifier+0x30/0xe0
+[c0000000137e7820] [c00000000009137c] .notifier_call_chain+0x6c/0xb4
+[c0000000137e78c0] [c0000000000918ac] .__blocking_notifier_call_chain+0x5c/0x78
+[c0000000137e7970] [c000000000091900] .blocking_notifier_call_chain+0x38/0x48
+[c0000000137e7a00] [c000000000682a28] .of_reconfig_notify+0x34/0x5c
+[c0000000137e7a90] [c000000000682e14] .of_detach_node+0x44/0x1fc
+[c0000000137e7b40] [c0000000000518e4] .ofdt_write+0x3ac/0x688
+[c0000000137e7c20] [c000000000238430] .proc_reg_write+0xb8/0xd4
+[c0000000137e7cd0] [c0000000001cbeac] .vfs_write+0xec/0x1f8
+[c0000000137e7d70] [c0000000001cc3b0] .SyS_write+0x58/0xa0
+[c0000000137e7e30] [c00000000000a064] syscall_exit+0x0/0x98
+
+Signed-off-by: Gavin Shan <gwshan@linux.vnet.ibm.com>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/platforms/pseries/iommu.c |   20 ++++++++++++++------
+ 1 file changed, 14 insertions(+), 6 deletions(-)
+
+--- a/arch/powerpc/platforms/pseries/iommu.c
++++ b/arch/powerpc/platforms/pseries/iommu.c
+@@ -721,13 +721,13 @@ static int __init disable_ddw_setup(char
+ early_param("disable_ddw", disable_ddw_setup);
+-static void remove_ddw(struct device_node *np)
++static void remove_ddw(struct device_node *np, bool remove_prop)
+ {
+       struct dynamic_dma_window_prop *dwp;
+       struct property *win64;
+       const u32 *ddw_avail;
+       u64 liobn;
+-      int len, ret;
++      int len, ret = 0;
+       ddw_avail = of_get_property(np, "ibm,ddw-applicable", &len);
+       win64 = of_find_property(np, DIRECT64_PROPNAME, NULL);
+@@ -761,7 +761,8 @@ static void remove_ddw(struct device_nod
+                       np->full_name, ret, ddw_avail[2], liobn);
+ delprop:
+-      ret = of_remove_property(np, win64);
++      if (remove_prop)
++              ret = of_remove_property(np, win64);
+       if (ret)
+               pr_warning("%s: failed to remove direct window property: %d\n",
+                       np->full_name, ret);
+@@ -805,7 +806,7 @@ static int find_existing_ddw_windows(voi
+               window = kzalloc(sizeof(*window), GFP_KERNEL);
+               if (!window || len < sizeof(struct dynamic_dma_window_prop)) {
+                       kfree(window);
+-                      remove_ddw(pdn);
++                      remove_ddw(pdn, true);
+                       continue;
+               }
+@@ -1045,7 +1046,7 @@ out_free_window:
+       kfree(window);
+ out_clear_window:
+-      remove_ddw(pdn);
++      remove_ddw(pdn, true);
+ out_free_prop:
+       kfree(win64->name);
+@@ -1255,7 +1256,14 @@ static int iommu_reconfig_notifier(struc
+       switch (action) {
+       case OF_RECONFIG_DETACH_NODE:
+-              remove_ddw(np);
++              /*
++               * Removing the property will invoke the reconfig
++               * notifier again, which causes dead-lock on the
++               * read-write semaphore of the notifier chain. So
++               * we have to remove the property when releasing
++               * the device node.
++               */
++              remove_ddw(np, false);
+               if (pci && pci->iommu_table)
+                       iommu_free_table(pci->iommu_table, np->full_name);
diff --git a/queue-3.14/powerpc-pseries-failure-on-removing-device-node.patch b/queue-3.14/powerpc-pseries-failure-on-removing-device-node.patch
new file mode 100644 (file)
index 0000000..55a34e0
--- /dev/null
@@ -0,0 +1,51 @@
+From f1b3929c232784580e5d8ee324b6bc634e709575 Mon Sep 17 00:00:00 2001
+From: Gavin Shan <gwshan@linux.vnet.ibm.com>
+Date: Mon, 11 Aug 2014 19:16:19 +1000
+Subject: powerpc/pseries: Failure on removing device node
+
+From: Gavin Shan <gwshan@linux.vnet.ibm.com>
+
+commit f1b3929c232784580e5d8ee324b6bc634e709575 upstream.
+
+While running command "drmgr -c phb -r -s 'PHB 528'", following
+backtrace jumped out because the target device node isn't marked
+with OF_DETACHED by of_detach_node(), which caused by error
+returned from memory hotplug related reconfig notifier when
+disabling CONFIG_MEMORY_HOTREMOVE. The patch fixes it.
+
+ERROR: Bad of_node_put() on /pci@800000020000210/ethernet@0
+CPU: 14 PID: 2252 Comm: drmgr Tainted: G        W     3.16.0+ #427
+Call Trace:
+[c000000012a776a0] [c000000000013d9c] .show_stack+0x88/0x148 (unreliable)
+[c000000012a77750] [c00000000083cd34] .dump_stack+0x7c/0x9c
+[c000000012a777d0] [c0000000006807c4] .of_node_release+0x58/0xe0
+[c000000012a77860] [c00000000038a7d0] .kobject_release+0x174/0x1b8
+[c000000012a77900] [c00000000038a884] .kobject_put+0x70/0x78
+[c000000012a77980] [c000000000681680] .of_node_put+0x28/0x34
+[c000000012a77a00] [c000000000681ea8] .__of_get_next_child+0x64/0x70
+[c000000012a77a90] [c000000000682138] .of_find_node_by_path+0x1b8/0x20c
+[c000000012a77b40] [c000000000051840] .ofdt_write+0x308/0x688
+[c000000012a77c20] [c000000000238430] .proc_reg_write+0xb8/0xd4
+[c000000012a77cd0] [c0000000001cbeac] .vfs_write+0xec/0x1f8
+[c000000012a77d70] [c0000000001cc3b0] .SyS_write+0x58/0xa0
+[c000000012a77e30] [c00000000000a064] syscall_exit+0x0/0x98
+
+Signed-off-by: Gavin Shan <gwshan@linux.vnet.ibm.com>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/platforms/pseries/hotplug-memory.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
++++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
+@@ -160,7 +160,7 @@ static int pseries_remove_memory(struct
+ static inline int pseries_remove_memblock(unsigned long base,
+                                         unsigned int memblock_size)
+ {
+-      return -EOPNOTSUPP;
++      return 0;
+ }
+ static inline int pseries_remove_memory(struct device_node *np)
+ {
diff --git a/queue-3.14/powerpc-thp-add-write-barrier-after-updating-the-valid-bit.patch b/queue-3.14/powerpc-thp-add-write-barrier-after-updating-the-valid-bit.patch
new file mode 100644 (file)
index 0000000..dd8b555
--- /dev/null
@@ -0,0 +1,37 @@
+From b0aa44a3dfae3d8f45bd1264349aa87f87b7774f Mon Sep 17 00:00:00 2001
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
+Date: Wed, 13 Aug 2014 12:31:57 +0530
+Subject: powerpc/thp: Add write barrier after updating the valid bit
+
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
+
+commit b0aa44a3dfae3d8f45bd1264349aa87f87b7774f upstream.
+
+With hugepages, we store the hpte valid information in the pte page
+whose address is stored in the second half of the PMD. Use a
+write barrier to make sure clearing pmd busy bit and updating
+hpte valid info are ordered properly.
+
+Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/mm/hugepage-hash64.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/mm/hugepage-hash64.c
++++ b/arch/powerpc/mm/hugepage-hash64.c
+@@ -172,8 +172,11 @@ repeat:
+               mark_hpte_slot_valid(hpte_slot_array, index, slot);
+       }
+       /*
+-       * No need to use ldarx/stdcx here
++       * The hpte valid is stored in the pgtable whose address is in the
++       * second half of the PMD. Order this against clearing of the busy bit in
++       * huge pmd.
+        */
++      smp_wmb();
+       *pmdp = __pmd(new_pmd & ~_PAGE_BUSY);
+       return 0;
+ }
diff --git a/queue-3.14/powerpc-thp-don-t-recompute-vsid-and-ssize-in-loop-on-invalidate.patch b/queue-3.14/powerpc-thp-don-t-recompute-vsid-and-ssize-in-loop-on-invalidate.patch
new file mode 100644 (file)
index 0000000..3653921
--- /dev/null
@@ -0,0 +1,166 @@
+From fa1f8ae80f8bb996594167ff4750a0b0a5a5bb5d Mon Sep 17 00:00:00 2001
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
+Date: Wed, 13 Aug 2014 12:31:58 +0530
+Subject: powerpc/thp: Don't recompute vsid and ssize in loop on invalidate
+
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
+
+commit fa1f8ae80f8bb996594167ff4750a0b0a5a5bb5d upstream.
+
+The segment identifier and segment size will remain the same in
+the loop, So we can compute it outside. We also change the
+hugepage_invalidate interface so that we can use it the later patch
+
+Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/machdep.h    |    6 +++---
+ arch/powerpc/mm/hash_native_64.c      |   19 +++++--------------
+ arch/powerpc/mm/pgtable_64.c          |   24 ++++++++++++------------
+ arch/powerpc/platforms/pseries/lpar.c |   20 ++++++--------------
+ 4 files changed, 26 insertions(+), 43 deletions(-)
+
+--- a/arch/powerpc/include/asm/machdep.h
++++ b/arch/powerpc/include/asm/machdep.h
+@@ -57,10 +57,10 @@ struct machdep_calls {
+       void            (*hpte_removebolted)(unsigned long ea,
+                                            int psize, int ssize);
+       void            (*flush_hash_range)(unsigned long number, int local);
+-      void            (*hugepage_invalidate)(struct mm_struct *mm,
++      void            (*hugepage_invalidate)(unsigned long vsid,
++                                             unsigned long addr,
+                                              unsigned char *hpte_slot_array,
+-                                             unsigned long addr, int psize);
+-
++                                             int psize, int ssize);
+       /* special for kexec, to be called in real mode, linear mapping is
+        * destroyed as well */
+       void            (*hpte_clear_all)(void);
+--- a/arch/powerpc/mm/hash_native_64.c
++++ b/arch/powerpc/mm/hash_native_64.c
+@@ -418,18 +418,18 @@ static void native_hpte_invalidate(unsig
+       local_irq_restore(flags);
+ }
+-static void native_hugepage_invalidate(struct mm_struct *mm,
++static void native_hugepage_invalidate(unsigned long vsid,
++                                     unsigned long addr,
+                                      unsigned char *hpte_slot_array,
+-                                     unsigned long addr, int psize)
++                                     int psize, int ssize)
+ {
+-      int ssize = 0, i;
+-      int lock_tlbie;
++      int i, lock_tlbie;
+       struct hash_pte *hptep;
+       int actual_psize = MMU_PAGE_16M;
+       unsigned int max_hpte_count, valid;
+       unsigned long flags, s_addr = addr;
+       unsigned long hpte_v, want_v, shift;
+-      unsigned long hidx, vpn = 0, vsid, hash, slot;
++      unsigned long hidx, vpn = 0, hash, slot;
+       shift = mmu_psize_defs[psize].shift;
+       max_hpte_count = 1U << (PMD_SHIFT - shift);
+@@ -443,15 +443,6 @@ static void native_hugepage_invalidate(s
+               /* get the vpn */
+               addr = s_addr + (i * (1ul << shift));
+-              if (!is_kernel_addr(addr)) {
+-                      ssize = user_segment_size(addr);
+-                      vsid = get_vsid(mm->context.id, addr, ssize);
+-                      WARN_ON(vsid == 0);
+-              } else {
+-                      vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
+-                      ssize = mmu_kernel_ssize;
+-              }
+-
+               vpn = hpt_vpn(addr, vsid, ssize);
+               hash = hpt_hash(vpn, shift, ssize);
+               if (hidx & _PTEIDX_SECONDARY)
+--- a/arch/powerpc/mm/pgtable_64.c
++++ b/arch/powerpc/mm/pgtable_64.c
+@@ -740,12 +740,21 @@ void hpte_do_hugepage_flush(struct mm_st
+       if (!hpte_slot_array)
+               return;
+-      /* get the base page size */
++      /* get the base page size,vsid and segment size */
+       psize = get_slice_psize(mm, s_addr);
++      if (!is_kernel_addr(s_addr)) {
++              ssize = user_segment_size(s_addr);
++              vsid = get_vsid(mm->context.id, s_addr, ssize);
++              WARN_ON(vsid == 0);
++      } else {
++              vsid = get_kernel_vsid(s_addr, mmu_kernel_ssize);
++              ssize = mmu_kernel_ssize;
++      }
+       if (ppc_md.hugepage_invalidate)
+-              return ppc_md.hugepage_invalidate(mm, hpte_slot_array,
+-                                                s_addr, psize);
++              return ppc_md.hugepage_invalidate(vsid, s_addr,
++                                                hpte_slot_array,
++                                                psize, ssize);
+       /*
+        * No bluk hpte removal support, invalidate each entry
+        */
+@@ -763,15 +772,6 @@ void hpte_do_hugepage_flush(struct mm_st
+               /* get the vpn */
+               addr = s_addr + (i * (1ul << shift));
+-              if (!is_kernel_addr(addr)) {
+-                      ssize = user_segment_size(addr);
+-                      vsid = get_vsid(mm->context.id, addr, ssize);
+-                      WARN_ON(vsid == 0);
+-              } else {
+-                      vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
+-                      ssize = mmu_kernel_ssize;
+-              }
+-
+               vpn = hpt_vpn(addr, vsid, ssize);
+               hash = hpt_hash(vpn, shift, ssize);
+               if (hidx & _PTEIDX_SECONDARY)
+--- a/arch/powerpc/platforms/pseries/lpar.c
++++ b/arch/powerpc/platforms/pseries/lpar.c
+@@ -430,16 +430,17 @@ static void __pSeries_lpar_hugepage_inva
+               spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
+ }
+-static void pSeries_lpar_hugepage_invalidate(struct mm_struct *mm,
+-                                     unsigned char *hpte_slot_array,
+-                                     unsigned long addr, int psize)
++static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
++                                           unsigned long addr,
++                                           unsigned char *hpte_slot_array,
++                                           int psize, int ssize)
+ {
+-      int ssize = 0, i, index = 0;
++      int i, index = 0;
+       unsigned long s_addr = addr;
+       unsigned int max_hpte_count, valid;
+       unsigned long vpn_array[PPC64_HUGE_HPTE_BATCH];
+       unsigned long slot_array[PPC64_HUGE_HPTE_BATCH];
+-      unsigned long shift, hidx, vpn = 0, vsid, hash, slot;
++      unsigned long shift, hidx, vpn = 0, hash, slot;
+       shift = mmu_psize_defs[psize].shift;
+       max_hpte_count = 1U << (PMD_SHIFT - shift);
+@@ -452,15 +453,6 @@ static void pSeries_lpar_hugepage_invali
+               /* get the vpn */
+               addr = s_addr + (i * (1ul << shift));
+-              if (!is_kernel_addr(addr)) {
+-                      ssize = user_segment_size(addr);
+-                      vsid = get_vsid(mm->context.id, addr, ssize);
+-                      WARN_ON(vsid == 0);
+-              } else {
+-                      vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
+-                      ssize = mmu_kernel_ssize;
+-              }
+-
+               vpn = hpt_vpn(addr, vsid, ssize);
+               hash = hpt_hash(vpn, shift, ssize);
+               if (hidx & _PTEIDX_SECONDARY)
diff --git a/queue-3.14/powerpc-thp-handle-combo-pages-in-invalidate.patch b/queue-3.14/powerpc-thp-handle-combo-pages-in-invalidate.patch
new file mode 100644 (file)
index 0000000..db605c3
--- /dev/null
@@ -0,0 +1,100 @@
+From fc0479557572375100ef16c71170b29a98e0d69a Mon Sep 17 00:00:00 2001
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
+Date: Wed, 13 Aug 2014 12:32:00 +0530
+Subject: powerpc/thp: Handle combo pages in invalidate
+
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
+
+commit fc0479557572375100ef16c71170b29a98e0d69a upstream.
+
+If we changed base page size of the segment, either via sub_page_protect
+or via remap_4k_pfn, we do a demote_segment which doesn't flush the hash
+table entries. We do a lazy hash page table flush for all mapped pages
+in the demoted segment. This happens when we handle hash page fault for
+these pages.
+
+We use _PAGE_COMBO bit along with _PAGE_HASHPTE to indicate whether a
+pte is backed by 4K hash pte. If we find _PAGE_COMBO not set on the pte,
+that implies that we could possibly have older 64K hash pte entries in
+the hash page table and we need to invalidate those entries.
+
+Use _PAGE_COMBO to determine the page size with which we should
+invalidate the hash table entries on unmap.
+
+Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/pgtable-ppc64.h |    2 +-
+ arch/powerpc/mm/pgtable_64.c             |   14 +++++++++++---
+ arch/powerpc/mm/tlb_hash64.c             |    2 +-
+ 3 files changed, 13 insertions(+), 5 deletions(-)
+
+--- a/arch/powerpc/include/asm/pgtable-ppc64.h
++++ b/arch/powerpc/include/asm/pgtable-ppc64.h
+@@ -413,7 +413,7 @@ static inline char *get_hpte_slot_array(
+ }
+ extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
+-                                 pmd_t *pmdp);
++                                 pmd_t *pmdp, unsigned long old_pmd);
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
+ extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
+--- a/arch/powerpc/mm/pgtable_64.c
++++ b/arch/powerpc/mm/pgtable_64.c
+@@ -538,7 +538,7 @@ unsigned long pmd_hugepage_update(struct
+       *pmdp = __pmd((old & ~clr) | set);
+ #endif
+       if (old & _PAGE_HASHPTE)
+-              hpte_do_hugepage_flush(mm, addr, pmdp);
++              hpte_do_hugepage_flush(mm, addr, pmdp, old);
+       return old;
+ }
+@@ -645,7 +645,7 @@ void pmdp_splitting_flush(struct vm_area
+       if (!(old & _PAGE_SPLITTING)) {
+               /* We need to flush the hpte */
+               if (old & _PAGE_HASHPTE)
+-                      hpte_do_hugepage_flush(vma->vm_mm, address, pmdp);
++                      hpte_do_hugepage_flush(vma->vm_mm, address, pmdp, old);
+       }
+ }
+@@ -718,7 +718,7 @@ void pmdp_invalidate(struct vm_area_stru
+  * neesd to be flushed.
+  */
+ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
+-                          pmd_t *pmdp)
++                          pmd_t *pmdp, unsigned long old_pmd)
+ {
+       int ssize, i;
+       unsigned long s_addr;
+@@ -741,7 +741,15 @@ void hpte_do_hugepage_flush(struct mm_st
+               return;
+       /* get the base page size,vsid and segment size */
++#ifdef CONFIG_DEBUG_VM
+       psize = get_slice_psize(mm, s_addr);
++      BUG_ON(psize == MMU_PAGE_16M);
++#endif
++      if (old_pmd & _PAGE_COMBO)
++              psize = MMU_PAGE_4K;
++      else
++              psize = MMU_PAGE_64K;
++
+       if (!is_kernel_addr(s_addr)) {
+               ssize = user_segment_size(s_addr);
+               vsid = get_vsid(mm->context.id, s_addr, ssize);
+--- a/arch/powerpc/mm/tlb_hash64.c
++++ b/arch/powerpc/mm/tlb_hash64.c
+@@ -216,7 +216,7 @@ void __flush_hash_table_range(struct mm_
+               if (!(pte & _PAGE_HASHPTE))
+                       continue;
+               if (unlikely(hugepage_shift && pmd_trans_huge(*(pmd_t *)pte)))
+-                      hpte_do_hugepage_flush(mm, start, (pmd_t *)pte);
++                      hpte_do_hugepage_flush(mm, start, (pmd_t *)ptep, pte);
+               else
+                       hpte_need_flush(mm, start, ptep, pte, 0);
+       }
diff --git a/queue-3.14/powerpc-thp-invalidate-old-64k-based-hash-page-mapping-before-insert-of-4k-pte.patch b/queue-3.14/powerpc-thp-invalidate-old-64k-based-hash-page-mapping-before-insert-of-4k-pte.patch
new file mode 100644 (file)
index 0000000..b55da1e
--- /dev/null
@@ -0,0 +1,154 @@
+From 629149fae478f0ac6bf705a535708b192e9c6b59 Mon Sep 17 00:00:00 2001
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
+Date: Wed, 13 Aug 2014 12:31:59 +0530
+Subject: powerpc/thp: Invalidate old 64K based hash page mapping before insert of 4k pte
+
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
+
+commit 629149fae478f0ac6bf705a535708b192e9c6b59 upstream.
+
+If we changed base page size of the segment, either via sub_page_protect
+or via remap_4k_pfn, we do a demote_segment which doesn't flush the hash
+table entries. We do a lazy hash page table flush for all mapped pages
+in the demoted segment. This happens when we handle hash page fault
+for these pages.
+
+We use _PAGE_COMBO bit along with _PAGE_HASHPTE to indicate whether a
+pte is backed by 4K hash pte. If we find _PAGE_COMBO not set on the pte,
+that implies that we could possibly have older 64K hash pte entries in
+the hash page table and we need to invalidate those entries.
+
+Handle this correctly for 16M pages
+
+Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/mm/hugepage-hash64.c |   79 +++++++++++++++++++++++++++++++++-----
+ 1 file changed, 70 insertions(+), 9 deletions(-)
+
+--- a/arch/powerpc/mm/hugepage-hash64.c
++++ b/arch/powerpc/mm/hugepage-hash64.c
+@@ -18,6 +18,57 @@
+ #include <linux/mm.h>
+ #include <asm/machdep.h>
++static void invalidate_old_hpte(unsigned long vsid, unsigned long addr,
++                              pmd_t *pmdp, unsigned int psize, int ssize)
++{
++      int i, max_hpte_count, valid;
++      unsigned long s_addr;
++      unsigned char *hpte_slot_array;
++      unsigned long hidx, shift, vpn, hash, slot;
++
++      s_addr = addr & HPAGE_PMD_MASK;
++      hpte_slot_array = get_hpte_slot_array(pmdp);
++      /*
++       * IF we try to do a HUGE PTE update after a withdraw is done.
++       * we will find the below NULL. This happens when we do
++       * split_huge_page_pmd
++       */
++      if (!hpte_slot_array)
++              return;
++
++      if (ppc_md.hugepage_invalidate)
++              return ppc_md.hugepage_invalidate(vsid, s_addr, hpte_slot_array,
++                                                psize, ssize);
++      /*
++       * No bluk hpte removal support, invalidate each entry
++       */
++      shift = mmu_psize_defs[psize].shift;
++      max_hpte_count = HPAGE_PMD_SIZE >> shift;
++      for (i = 0; i < max_hpte_count; i++) {
++              /*
++               * 8 bits per each hpte entries
++               * 000| [ secondary group (one bit) | hidx (3 bits) | valid bit]
++               */
++              valid = hpte_valid(hpte_slot_array, i);
++              if (!valid)
++                      continue;
++              hidx =  hpte_hash_index(hpte_slot_array, i);
++
++              /* get the vpn */
++              addr = s_addr + (i * (1ul << shift));
++              vpn = hpt_vpn(addr, vsid, ssize);
++              hash = hpt_hash(vpn, shift, ssize);
++              if (hidx & _PTEIDX_SECONDARY)
++                      hash = ~hash;
++
++              slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
++              slot += hidx & _PTEIDX_GROUP_IX;
++              ppc_md.hpte_invalidate(slot, vpn, psize,
++                                     MMU_PAGE_16M, ssize, 0);
++      }
++}
++
++
+ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
+                   pmd_t *pmdp, unsigned long trap, int local, int ssize,
+                   unsigned int psize)
+@@ -85,6 +136,15 @@ int __hash_page_thp(unsigned long ea, un
+       vpn = hpt_vpn(ea, vsid, ssize);
+       hash = hpt_hash(vpn, shift, ssize);
+       hpte_slot_array = get_hpte_slot_array(pmdp);
++      if (psize == MMU_PAGE_4K) {
++              /*
++               * invalidate the old hpte entry if we have that mapped via 64K
++               * base page size. This is because demote_segment won't flush
++               * hash page table entries.
++               */
++              if ((old_pmd & _PAGE_HASHPTE) && !(old_pmd & _PAGE_COMBO))
++                      invalidate_old_hpte(vsid, ea, pmdp, MMU_PAGE_64K, ssize);
++      }
+       valid = hpte_valid(hpte_slot_array, index);
+       if (valid) {
+@@ -107,11 +167,8 @@ int __hash_page_thp(unsigned long ea, un
+                        * safely update this here.
+                        */
+                       valid = 0;
+-                      new_pmd &= ~_PAGE_HPTEFLAGS;
+                       hpte_slot_array[index] = 0;
+-              } else
+-                      /* clear the busy bits and set the hash pte bits */
+-                      new_pmd = (new_pmd & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
++              }
+       }
+       if (!valid) {
+@@ -119,11 +176,7 @@ int __hash_page_thp(unsigned long ea, un
+               /* insert new entry */
+               pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
+-repeat:
+-              hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+-
+-              /* clear the busy bits and set the hash pte bits */
+-              new_pmd = (new_pmd & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
++              new_pmd |= _PAGE_HASHPTE;
+               /* Add in WIMG bits */
+               rflags |= (new_pmd & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
+@@ -132,6 +185,8 @@ repeat:
+                * enable the memory coherence always
+                */
+               rflags |= HPTE_R_M;
++repeat:
++              hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+               /* Insert into the hash table, primary slot */
+               slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
+@@ -172,6 +227,12 @@ repeat:
+               mark_hpte_slot_valid(hpte_slot_array, index, slot);
+       }
+       /*
++       * Mark the pte with _PAGE_COMBO, if we are trying to hash it with
++       * base page size 4k.
++       */
++      if (psize == MMU_PAGE_4K)
++              new_pmd |= _PAGE_COMBO;
++      /*
+        * The hpte valid is stored in the pgtable whose address is in the
+        * second half of the PMD. Order this against clearing of the busy bit in
+        * huge pmd.
diff --git a/queue-3.14/powerpc-thp-invalidate-with-vpn-in-loop.patch b/queue-3.14/powerpc-thp-invalidate-with-vpn-in-loop.patch
new file mode 100644 (file)
index 0000000..bbc5b98
--- /dev/null
@@ -0,0 +1,65 @@
+From 969b7b208f7408712a3526856e4ae60ad13f6928 Mon Sep 17 00:00:00 2001
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
+Date: Wed, 13 Aug 2014 12:32:01 +0530
+Subject: powerpc/thp: Invalidate with vpn in loop
+
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
+
+commit 969b7b208f7408712a3526856e4ae60ad13f6928 upstream.
+
+As per ISA, for 4k base page size we compare 14..65 bits of VA specified
+with the entry_VA in tlb. That implies we need to make sure we do a
+tlbie with all the possible 4k va we used to access the 16MB hugepage.
+With 64k base page size we compare 14..57 bits of VA. Hence we cannot
+ignore the lower 24 bits of va while tlbie .We also cannot tlb
+invalidate a 16MB entry with just one tlbie instruction because
+we don't track which va was used to instantiate the tlb entry.
+
+Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/mm/hash_native_64.c |   23 +++++++----------------
+ 1 file changed, 7 insertions(+), 16 deletions(-)
+
+--- a/arch/powerpc/mm/hash_native_64.c
++++ b/arch/powerpc/mm/hash_native_64.c
+@@ -423,7 +423,7 @@ static void native_hugepage_invalidate(u
+                                      unsigned char *hpte_slot_array,
+                                      int psize, int ssize)
+ {
+-      int i, lock_tlbie;
++      int i;
+       struct hash_pte *hptep;
+       int actual_psize = MMU_PAGE_16M;
+       unsigned int max_hpte_count, valid;
+@@ -462,22 +462,13 @@ static void native_hugepage_invalidate(u
+               else
+                       /* Invalidate the hpte. NOTE: this also unlocks it */
+                       hptep->v = 0;
++              /*
++               * We need to do tlb invalidate for all the address, tlbie
++               * instruction compares entry_VA in tlb with the VA specified
++               * here
++               */
++              tlbie(vpn, psize, actual_psize, ssize, 0);
+       }
+-      /*
+-       * Since this is a hugepage, we just need a single tlbie.
+-       * use the last vpn.
+-       */
+-      lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
+-      if (lock_tlbie)
+-              raw_spin_lock(&native_tlbie_lock);
+-
+-      asm volatile("ptesync":::"memory");
+-      __tlbie(vpn, psize, actual_psize, ssize);
+-      asm volatile("eieio; tlbsync; ptesync":::"memory");
+-
+-      if (lock_tlbie)
+-              raw_spin_unlock(&native_tlbie_lock);
+-
+       local_irq_restore(flags);
+ }
diff --git a/queue-3.14/powerpc-thp-use-access_once-when-loading-pmdp.patch b/queue-3.14/powerpc-thp-use-access_once-when-loading-pmdp.patch
new file mode 100644 (file)
index 0000000..e14bae0
--- /dev/null
@@ -0,0 +1,33 @@
+From 7e467245bf5226db34c4b12d3cbacfa2f7a15a8b Mon Sep 17 00:00:00 2001
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
+Date: Wed, 13 Aug 2014 12:32:02 +0530
+Subject: powerpc/thp: Use ACCESS_ONCE when loading pmdp
+
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
+
+commit 7e467245bf5226db34c4b12d3cbacfa2f7a15a8b upstream.
+
+We would get wrong results in compiler recomputed old_pmd. Avoid
+that by using ACCESS_ONCE
+
+Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/mm/hugepage-hash64.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/mm/hugepage-hash64.c
++++ b/arch/powerpc/mm/hugepage-hash64.c
+@@ -84,7 +84,9 @@ int __hash_page_thp(unsigned long ea, un
+        * atomically mark the linux large page PMD busy and dirty
+        */
+       do {
+-              old_pmd = pmd_val(*pmdp);
++              pmd_t pmd = ACCESS_ONCE(*pmdp);
++
++              old_pmd = pmd_val(pmd);
+               /* If PMD busy, retry the access */
+               if (unlikely(old_pmd & _PAGE_BUSY))
+                       return 0;
diff --git a/queue-3.14/regulator-arizona-ldo1-remove-bypass-functionality.patch b/queue-3.14/regulator-arizona-ldo1-remove-bypass-functionality.patch
new file mode 100644 (file)
index 0000000..ed02fc2
--- /dev/null
@@ -0,0 +1,31 @@
+From 5b919f3ebb533cbe400664837e24f66a0836b907 Mon Sep 17 00:00:00 2001
+From: Nikesh Oswal <nikesh@opensource.wolfsonmicro.com>
+Date: Fri, 4 Jul 2014 09:55:16 +0100
+Subject: regulator: arizona-ldo1: remove bypass functionality
+
+From: Nikesh Oswal <nikesh@opensource.wolfsonmicro.com>
+
+commit 5b919f3ebb533cbe400664837e24f66a0836b907 upstream.
+
+WM5110/8280 devices do not support bypass mode for LDO1 so remove
+the bypass callbacks registered with regulator core.
+
+Signed-off-by: Nikesh Oswal <nikesh@opensource.wolfsonmicro.com>
+Signed-off-by: Mark Brown <broonie@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/regulator/arizona-ldo1.c |    2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/drivers/regulator/arizona-ldo1.c
++++ b/drivers/regulator/arizona-ldo1.c
+@@ -141,8 +141,6 @@ static struct regulator_ops arizona_ldo1
+       .map_voltage = regulator_map_voltage_linear,
+       .get_voltage_sel = regulator_get_voltage_sel_regmap,
+       .set_voltage_sel = regulator_set_voltage_sel_regmap,
+-      .get_bypass = regulator_get_bypass_regmap,
+-      .set_bypass = regulator_set_bypass_regmap,
+ };
+ static const struct regulator_desc arizona_ldo1 = {
index d96caa3bba7211086bf723cab09ae765dfea3bf1..d9e24a802430e110e47a54b6af056b631cca2bbf 100644 (file)
@@ -15,3 +15,16 @@ tpm-missing-tpm_chip_put-in-tpm_get_random.patch
 tpm-provide-a-generic-means-to-override-the-chip-returned-timeouts.patch
 tpm-properly-clean-sysfs-entries-in-error-path.patch
 capabilities-remove-undefined-caps-from-all-processes.patch
+kernel-smp.c-on_each_cpu_cond-fix-warning-in-fallback-path.patch
+mfd-omap-usb-host-fix-improper-mask-use.patch
+regulator-arizona-ldo1-remove-bypass-functionality.patch
+powerpc-mm-numa-fix-break-placement.patch
+powerpc-mm-use-read-barrier-when-creating-real_pte.patch
+powerpc-pseries-failure-on-removing-device-node.patch
+powerpc-pseries-avoid-deadlock-on-removing-ddw.patch
+powerpc-thp-add-write-barrier-after-updating-the-valid-bit.patch
+powerpc-thp-don-t-recompute-vsid-and-ssize-in-loop-on-invalidate.patch
+powerpc-thp-invalidate-old-64k-based-hash-page-mapping-before-insert-of-4k-pte.patch
+powerpc-thp-handle-combo-pages-in-invalidate.patch
+powerpc-thp-invalidate-with-vpn-in-loop.patch
+powerpc-thp-use-access_once-when-loading-pmdp.patch