--- /dev/null
+From 540e07c67efe42ef6b6be4f1956931e676d58a15 Mon Sep 17 00:00:00 2001
+From: Michael Neuling <mikey@neuling.org>
+Date: Mon, 24 Jun 2013 15:47:23 +1000
+Subject: powerpc/hw_brk: Fix clearing of extraneous IRQ
+
+From: Michael Neuling <mikey@neuling.org>
+
+commit 540e07c67efe42ef6b6be4f1956931e676d58a15 upstream.
+
+In 9422de3 "powerpc: Hardware breakpoints rewrite to handle non DABR breakpoint
+registers" we changed the way we mark extraneous irqs with this:
+
+- info->extraneous_interrupt = !((bp->attr.bp_addr <= dar) &&
+- (dar - bp->attr.bp_addr < bp->attr.bp_len));
++ if (!((bp->attr.bp_addr <= dar) &&
++ (dar - bp->attr.bp_addr < bp->attr.bp_len)))
++ info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
+
+Unfortunately this is bogus as it never clears extraneous IRQ if it's already
+set.
+
+This correctly clears extraneous IRQ before possibly setting it.
+
+Signed-off-by: Michael Neuling <mikey@neuling.org>
+Reported-by: Edjunior Barbosa Machado <emachado@linux.vnet.ibm.com>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/hw_breakpoint.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/powerpc/kernel/hw_breakpoint.c
++++ b/arch/powerpc/kernel/hw_breakpoint.c
+@@ -250,6 +250,7 @@ int __kprobes hw_breakpoint_handler(stru
+ * we still need to single-step the instruction, but we don't
+ * generate an event.
+ */
++ info->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ;
+ if (!((bp->attr.bp_addr <= dar) &&
+ (dar - bp->attr.bp_addr < bp->attr.bp_len)))
+ info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
--- /dev/null
+From e2a800beaca1f580945773e57d1a0e7cd37b1056 Mon Sep 17 00:00:00 2001
+From: Michael Neuling <mikey@neuling.org>
+Date: Mon, 1 Jul 2013 14:19:50 +1000
+Subject: powerpc/hw_brk: Fix off by one error when validating DAWR region end
+
+From: Michael Neuling <mikey@neuling.org>
+
+commit e2a800beaca1f580945773e57d1a0e7cd37b1056 upstream.
+
+The Data Address Watchpoint Register (DAWR) on POWER8 can take a 512
+byte range but this range must not cross a 512 byte boundary.
+
+Unfortunately we were off by one when calculating the end of the region,
+hence we were not allowing some breakpoint regions which were actually
+valid. This fixes this error.
+
+Signed-off-by: Michael Neuling <mikey@neuling.org>
+Reported-by: Edjunior Barbosa Machado <emachado@linux.vnet.ibm.com>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/hw_breakpoint.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/powerpc/kernel/hw_breakpoint.c
++++ b/arch/powerpc/kernel/hw_breakpoint.c
+@@ -176,7 +176,7 @@ int arch_validate_hwbkpt_settings(struct
+ length_max = 512 ; /* 64 doublewords */
+ /* DAWR region can't cross 512 boundary */
+ if ((bp->attr.bp_addr >> 10) !=
+- ((bp->attr.bp_addr + bp->attr.bp_len) >> 10))
++ ((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 10))
+ return -EINVAL;
+ }
+ if (info->len >
--- /dev/null
+From b0b0aa9c7faf94e92320eabd8a1786c7747e40a8 Mon Sep 17 00:00:00 2001
+From: Michael Neuling <mikey@neuling.org>
+Date: Mon, 24 Jun 2013 15:47:22 +1000
+Subject: powerpc/hw_brk: Fix setting of length for exact mode breakpoints
+
+From: Michael Neuling <mikey@neuling.org>
+
+commit b0b0aa9c7faf94e92320eabd8a1786c7747e40a8 upstream.
+
+The smallest match region for both the DABR and DAWR is 8 bytes, so the
+kernel needs to filter matches when users want to look at regions smaller than
+this.
+
+Currently we set the length of PPC_BREAKPOINT_MODE_EXACT breakpoints to 8.
+This is wrong as in exact mode we should only match on 1 address, hence the
+length should be 1.
+
+This ensures that the kernel will filter out any exact mode hardware breakpoint
+matches on any addresses other than the requested one.
+
+Signed-off-by: Michael Neuling <mikey@neuling.org>
+Reported-by: Edjunior Barbosa Machado <emachado@linux.vnet.ibm.com>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/ptrace.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/kernel/ptrace.c
++++ b/arch/powerpc/kernel/ptrace.c
+@@ -1449,7 +1449,9 @@ static long ppc_set_hwdebug(struct task_
+ */
+ if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE) {
+ len = bp_info->addr2 - bp_info->addr;
+- } else if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
++ } else if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
++ len = 1;
++ else {
+ ptrace_put_breakpoints(child);
+ return -EINVAL;
+ }
--- /dev/null
+From dd023217e17e72b46fb4d49c7734c426938c3dba Mon Sep 17 00:00:00 2001
+From: Nathan Fontenot <nfont@linux.vnet.ibm.com>
+Date: Mon, 24 Jun 2013 22:08:05 -0500
+Subject: powerpc/numa: Do not update sysfs cpu registration from invalid context
+
+From: Nathan Fontenot <nfont@linux.vnet.ibm.com>
+
+commit dd023217e17e72b46fb4d49c7734c426938c3dba upstream.
+
+The topology update code that updates the cpu node registration in sysfs
+should not be called while in stop_machine(). The register/unregister
+calls take a lock and may sleep.
+
+This patch moves these calls outside of the call to stop_machine().
+
+Signed-off-by: Nathan Fontenot <nfont@linux.vnet.ibm.com>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/mm/numa.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/mm/numa.c
++++ b/arch/powerpc/mm/numa.c
+@@ -1433,11 +1433,9 @@ static int update_cpu_topology(void *dat
+ if (cpu != update->cpu)
+ continue;
+
+- unregister_cpu_under_node(update->cpu, update->old_nid);
+ unmap_cpu_from_node(update->cpu);
+ map_cpu_to_node(update->cpu, update->new_nid);
+ vdso_getcpu_init();
+- register_cpu_under_node(update->cpu, update->new_nid);
+ }
+
+ return 0;
+@@ -1485,6 +1483,9 @@ int arch_update_cpu_topology(void)
+ stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
+
+ for (ud = &updates[0]; ud; ud = ud->next) {
++ unregister_cpu_under_node(ud->cpu, ud->old_nid);
++ register_cpu_under_node(ud->cpu, ud->new_nid);
++
+ dev = get_cpu_device(ud->cpu);
+ if (dev)
+ kobject_uevent(&dev->kobj, KOBJ_CHANGE);
--- /dev/null
+From d8bec4c9cd58f6d3679e09b7293851fb92ad7557 Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <michael@ellerman.id.au>
+Date: Fri, 28 Jun 2013 18:15:10 +1000
+Subject: powerpc/perf: Check that events only include valid bits on Power8
+
+From: Michael Ellerman <michael@ellerman.id.au>
+
+commit d8bec4c9cd58f6d3679e09b7293851fb92ad7557 upstream.
+
+A mistake we have made in the past is that we pull out the fields we
+need from the event code, but don't check that there are no unknown bits
+set. This means that we can't ever assign meaning to those unknown bits
+in future.
+
+Although we have once again failed to do this at release, it is still
+early days for Power8 so I think we can still slip this in and get away
+with it.
+
+Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/perf/power8-pmu.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/arch/powerpc/perf/power8-pmu.c
++++ b/arch/powerpc/perf/power8-pmu.c
+@@ -109,6 +109,16 @@
+ #define EVENT_IS_MARKED (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT)
+ #define EVENT_PSEL_MASK 0xff /* PMCxSEL value */
+
++#define EVENT_VALID_MASK \
++ ((EVENT_THRESH_MASK << EVENT_THRESH_SHIFT) | \
++ (EVENT_SAMPLE_MASK << EVENT_SAMPLE_SHIFT) | \
++ (EVENT_CACHE_SEL_MASK << EVENT_CACHE_SEL_SHIFT) | \
++ (EVENT_PMC_MASK << EVENT_PMC_SHIFT) | \
++ (EVENT_UNIT_MASK << EVENT_UNIT_SHIFT) | \
++ (EVENT_COMBINE_MASK << EVENT_COMBINE_SHIFT) | \
++ (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) | \
++ EVENT_PSEL_MASK)
++
+ /* MMCRA IFM bits - POWER8 */
+ #define POWER8_MMCRA_IFM1 0x0000000040000000UL
+ #define POWER8_MMCRA_IFM2 0x0000000080000000UL
+@@ -212,6 +222,9 @@ static int power8_get_constraint(u64 eve
+
+ mask = value = 0;
+
++ if (event & ~EVENT_VALID_MASK)
++ return -1;
++
+ pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
+ unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
+ cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK;
--- /dev/null
+From 4ea355b5368bde0574c12430df53334c4be3bdcf Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <michael@ellerman.id.au>
+Date: Fri, 28 Jun 2013 18:15:14 +1000
+Subject: powerpc/perf: Don't enable if we have zero events
+
+From: Michael Ellerman <michael@ellerman.id.au>
+
+commit 4ea355b5368bde0574c12430df53334c4be3bdcf upstream.
+
+In power_pmu_enable() we still enable the PMU even if we have zero
+events. This should have no effect but doesn't make much sense. Instead
+just return after telling the hypervisor that we are not using the PMCs.
+
+Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/perf/core-book3s.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/perf/core-book3s.c
++++ b/arch/powerpc/perf/core-book3s.c
+@@ -926,6 +926,11 @@ static void power_pmu_enable(struct pmu
+ if (!cpuhw->disabled)
+ goto out;
+
++ if (cpuhw->n_events == 0) {
++ ppc_set_pmu_inuse(0);
++ goto out;
++ }
++
+ cpuhw->disabled = 0;
+
+ /*
+@@ -937,8 +942,6 @@ static void power_pmu_enable(struct pmu
+ if (!cpuhw->n_added) {
+ mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
+ mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
+- if (cpuhw->n_events == 0)
+- ppc_set_pmu_inuse(0);
+ goto out_enable;
+ }
+
--- /dev/null
+From 7a7a41f9d5b28ac3a916b057a7d3cd3f435ee9a6 Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <michael@ellerman.id.au>
+Date: Fri, 28 Jun 2013 18:15:12 +1000
+Subject: powerpc/perf: Freeze PMC5/6 if we're not using them
+
+From: Michael Ellerman <michael@ellerman.id.au>
+
+commit 7a7a41f9d5b28ac3a916b057a7d3cd3f435ee9a6 upstream.
+
+On Power8 we can freeze PMC5 and 6 if we're not using them. Normally they
+run all the time.
+
+As noticed by Anshuman, we should unfreeze them when we disable the PMU
+as there are legacy tools which expect them to run all the time.
+
+Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/reg.h | 1 +
+ arch/powerpc/perf/core-book3s.c | 5 +++--
+ arch/powerpc/perf/power8-pmu.c | 4 ++++
+ 3 files changed, 8 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/include/asm/reg.h
++++ b/arch/powerpc/include/asm/reg.h
+@@ -626,6 +626,7 @@
+ #define MMCR0_TRIGGER 0x00002000UL /* TRIGGER enable */
+ #define MMCR0_PMAO 0x00000080UL /* performance monitor alert has occurred, set to 0 after handling exception */
+ #define MMCR0_SHRFC 0x00000040UL /* SHRre freeze conditions between threads */
++#define MMCR0_FC56 0x00000010UL /* freeze counters 5 and 6 */
+ #define MMCR0_FCTI 0x00000008UL /* freeze counters in tags inactive mode */
+ #define MMCR0_FCTA 0x00000004UL /* freeze counters in tags active mode */
+ #define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */
+--- a/arch/powerpc/perf/core-book3s.c
++++ b/arch/powerpc/perf/core-book3s.c
+@@ -75,6 +75,7 @@ static unsigned int freeze_events_kernel
+
+ #define MMCR0_FCHV 0
+ #define MMCR0_PMCjCE MMCR0_PMCnCE
++#define MMCR0_FC56 0
+ #define MMCR0_PMAO 0
+
+ #define SPRN_MMCRA SPRN_MMCR2
+@@ -870,11 +871,11 @@ static void power_pmu_disable(struct pmu
+ }
+
+ /*
+- * Set the 'freeze counters' bit, clear PMAO.
++ * Set the 'freeze counters' bit, clear PMAO/FC56.
+ */
+ val = mfspr(SPRN_MMCR0);
+ val |= MMCR0_FC;
+- val &= ~MMCR0_PMAO;
++ val &= ~(MMCR0_PMAO | MMCR0_FC56);
+
+ /*
+ * The barrier is to make sure the mtspr has been
+--- a/arch/powerpc/perf/power8-pmu.c
++++ b/arch/powerpc/perf/power8-pmu.c
+@@ -391,6 +391,10 @@ static int power8_compute_mmcr(u64 event
+ if (pmc_inuse & 0x7c)
+ mmcr[0] |= MMCR0_PMCjCE;
+
++ /* If we're not using PMC 5 or 6, freeze them */
++ if (!(pmc_inuse & 0x60))
++ mmcr[0] |= MMCR0_FC56;
++
+ mmcr[1] = mmcr1;
+ mmcr[2] = mmcra;
+
--- /dev/null
+From 378a6ee99e4a431ec84e4e61893445c041c93007 Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <michael@ellerman.id.au>
+Date: Fri, 28 Jun 2013 18:15:11 +1000
+Subject: powerpc/perf: Rework disable logic in pmu_disable()
+
+From: Michael Ellerman <michael@ellerman.id.au>
+
+commit 378a6ee99e4a431ec84e4e61893445c041c93007 upstream.
+
+In pmu_disable() we disable the PMU by setting the FC (Freeze Counters)
+bit in MMCR0. In order to do this we have to read/modify/write MMCR0.
+
+It's possible that we read a value from MMCR0 which has PMAO (PMU Alert
+Occurred) set. When we write that value back it will cause an interrupt
+to occur. We will then end up in the PMU interrupt handler even though
+we are supposed to have just disabled the PMU.
+
+We can avoid this by making sure we never write PMAO back. We should not
+lose interrupts because when the PMU is re-enabled the overflowed values
+will cause another interrupt.
+
+We also reorder the clearing of SAMPLE_ENABLE so that is done after the
+PMU is frozen. Otherwise there is a small window between the clearing of
+SAMPLE_ENABLE and the setting of FC where we could take an interrupt and
+incorrectly see SAMPLE_ENABLE not set. This would for example change the
+logic in perf_read_regs().
+
+Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/perf/core-book3s.c | 31 +++++++++++++++++++------------
+ 1 file changed, 19 insertions(+), 12 deletions(-)
+
+--- a/arch/powerpc/perf/core-book3s.c
++++ b/arch/powerpc/perf/core-book3s.c
+@@ -75,6 +75,7 @@ static unsigned int freeze_events_kernel
+
+ #define MMCR0_FCHV 0
+ #define MMCR0_PMCjCE MMCR0_PMCnCE
++#define MMCR0_PMAO 0
+
+ #define SPRN_MMCRA SPRN_MMCR2
+ #define MMCRA_SAMPLE_ENABLE 0
+@@ -852,7 +853,7 @@ static void write_mmcr0(struct cpu_hw_ev
+ static void power_pmu_disable(struct pmu *pmu)
+ {
+ struct cpu_hw_events *cpuhw;
+- unsigned long flags;
++ unsigned long flags, val;
+
+ if (!ppmu)
+ return;
+@@ -860,9 +861,6 @@ static void power_pmu_disable(struct pmu
+ cpuhw = &__get_cpu_var(cpu_hw_events);
+
+ if (!cpuhw->disabled) {
+- cpuhw->disabled = 1;
+- cpuhw->n_added = 0;
+-
+ /*
+ * Check if we ever enabled the PMU on this cpu.
+ */
+@@ -872,6 +870,21 @@ static void power_pmu_disable(struct pmu
+ }
+
+ /*
++ * Set the 'freeze counters' bit, clear PMAO.
++ */
++ val = mfspr(SPRN_MMCR0);
++ val |= MMCR0_FC;
++ val &= ~MMCR0_PMAO;
++
++ /*
++ * The barrier is to make sure the mtspr has been
++ * executed and the PMU has frozen the events etc.
++ * before we return.
++ */
++ write_mmcr0(cpuhw, val);
++ mb();
++
++ /*
+ * Disable instruction sampling if it was enabled
+ */
+ if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
+@@ -880,14 +893,8 @@ static void power_pmu_disable(struct pmu
+ mb();
+ }
+
+- /*
+- * Set the 'freeze counters' bit.
+- * The barrier is to make sure the mtspr has been
+- * executed and the PMU has frozen the events
+- * before we return.
+- */
+- write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC);
+- mb();
++ cpuhw->disabled = 1;
++ cpuhw->n_added = 0;
+ }
+ local_irq_restore(flags);
+ }
--- /dev/null
+From 0a48843d6c5114cfa4a9540ee4d6af87628cec01 Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <michael@ellerman.id.au>
+Date: Fri, 28 Jun 2013 18:15:13 +1000
+Subject: powerpc/perf: Use existing out label in power_pmu_enable()
+
+From: Michael Ellerman <michael@ellerman.id.au>
+
+commit 0a48843d6c5114cfa4a9540ee4d6af87628cec01 upstream.
+
+In power_pmu_enable() we can use the existing out label to reduce the
+number of return paths.
+
+Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/perf/core-book3s.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/arch/powerpc/perf/core-book3s.c
++++ b/arch/powerpc/perf/core-book3s.c
+@@ -919,12 +919,13 @@ static void power_pmu_enable(struct pmu
+
+ if (!ppmu)
+ return;
++
+ local_irq_save(flags);
++
+ cpuhw = &__get_cpu_var(cpu_hw_events);
+- if (!cpuhw->disabled) {
+- local_irq_restore(flags);
+- return;
+- }
++ if (!cpuhw->disabled)
++ goto out;
++
+ cpuhw->disabled = 0;
+
+ /*
--- /dev/null
+From 74251fe21bfa9310ddba9e0436d1fcf389e602ee Mon Sep 17 00:00:00 2001
+From: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Date: Mon, 1 Jul 2013 17:54:09 +1000
+Subject: powerpc/powernv: Fix iommu initialization again
+
+From: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+
+commit 74251fe21bfa9310ddba9e0436d1fcf389e602ee upstream.
+
+So because those things always end up in trainwrecks... In 7846de406
+we moved back the iommu initialization earlier, essentially undoing
+37f02195b which was causing us endless trouble... except that in the
+meantime we had merged 959c9bdd58 (to workaround the original breakage)
+which is now ... broken :-)
+
+This fixes it by doing a partial revert of the latter (we keep the
+ppc_md. path which will be needed in the hotplug case, which happens
+also during some EEH error recovery situations).
+
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/platforms/powernv/pci-ioda.c | 21 +++++++++++++++++++++
+ 1 file changed, 21 insertions(+)
+
+--- a/arch/powerpc/platforms/powernv/pci-ioda.c
++++ b/arch/powerpc/platforms/powernv/pci-ioda.c
+@@ -441,6 +441,17 @@ static void pnv_pci_ioda_dma_dev_setup(s
+ set_iommu_table_base(&pdev->dev, &pe->tce32_table);
+ }
+
++static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus)
++{
++ struct pci_dev *dev;
++
++ list_for_each_entry(dev, &bus->devices, bus_list) {
++ set_iommu_table_base(&dev->dev, &pe->tce32_table);
++ if (dev->subordinate)
++ pnv_ioda_setup_bus_dma(pe, dev->subordinate);
++ }
++}
++
+ static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl,
+ u64 *startp, u64 *endp)
+ {
+@@ -596,6 +607,11 @@ static void pnv_pci_ioda_setup_dma_pe(st
+ }
+ iommu_init_table(tbl, phb->hose->node);
+
++ if (pe->pdev)
++ set_iommu_table_base(&pe->pdev->dev, tbl);
++ else
++ pnv_ioda_setup_bus_dma(pe, pe->pbus);
++
+ return;
+ fail:
+ /* XXX Failure: Try to fallback to 64-bit only ? */
+@@ -667,6 +683,11 @@ static void pnv_pci_ioda2_setup_dma_pe(s
+ }
+ iommu_init_table(tbl, phb->hose->node);
+
++ if (pe->pdev)
++ set_iommu_table_base(&pe->pdev->dev, tbl);
++ else
++ pnv_ioda_setup_bus_dma(pe, pe->pbus);
++
+ return;
+ fail:
+ if (pe->tce32_seg >= 0)
--- /dev/null
+From c9f69518e5f08170bc857984a077f693d63171df Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <michael@ellerman.id.au>
+Date: Tue, 25 Jun 2013 17:47:55 +1000
+Subject: powerpc: Remove KVMTEST from RELON exception handlers
+
+From: Michael Ellerman <michael@ellerman.id.au>
+
+commit c9f69518e5f08170bc857984a077f693d63171df upstream.
+
+KVMTEST is a macro which checks whether we are taking an exception from
+guest context, if so we branch out of line and eventually call into the
+KVM code to handle the switch.
+
+When running real guests on bare metal (HV KVM) the hardware ensures
+that we never take a relocation on exception when transitioning from
+guest to host. For PR KVM we disable relocation on exceptions ourself in
+kvmppc_core_init_vm(), as of commit a413f47 "Disable relocation on
+exceptions whenever PR KVM is active".
+
+So convert all the RELON macros to use NOTEST, and drop the remaining
+KVM_HANDLER() definitions we have for 0xe40 and 0xe80.
+
+Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/exception-64s.h | 8 ++++----
+ arch/powerpc/kernel/exceptions-64s.S | 2 --
+ 2 files changed, 4 insertions(+), 6 deletions(-)
+
+--- a/arch/powerpc/include/asm/exception-64s.h
++++ b/arch/powerpc/include/asm/exception-64s.h
+@@ -358,12 +358,12 @@ label##_relon_pSeries: \
+ /* No guest interrupts come through here */ \
+ SET_SCRATCH0(r13); /* save r13 */ \
+ EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
+- EXC_STD, KVMTEST_PR, vec)
++ EXC_STD, NOTEST, vec)
+
+ #define STD_RELON_EXCEPTION_PSERIES_OOL(vec, label) \
+ .globl label##_relon_pSeries; \
+ label##_relon_pSeries: \
+- EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_PR, vec); \
++ EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, vec); \
+ EXCEPTION_RELON_PROLOG_PSERIES_1(label##_common, EXC_STD)
+
+ #define STD_RELON_EXCEPTION_HV(loc, vec, label) \
+@@ -374,12 +374,12 @@ label##_relon_hv: \
+ /* No guest interrupts come through here */ \
+ SET_SCRATCH0(r13); /* save r13 */ \
+ EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
+- EXC_HV, KVMTEST, vec)
++ EXC_HV, NOTEST, vec)
+
+ #define STD_RELON_EXCEPTION_HV_OOL(vec, label) \
+ .globl label##_relon_hv; \
+ label##_relon_hv: \
+- EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST, vec); \
++ EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, vec); \
+ EXCEPTION_RELON_PROLOG_PSERIES_1(label##_common, EXC_HV)
+
+ /* This associate vector numbers with bits in paca->irq_happened */
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -1175,9 +1175,7 @@ __end_handlers:
+
+ /* Equivalents to the above handlers for relocation-on interrupt vectors */
+ STD_RELON_EXCEPTION_HV_OOL(0xe40, emulation_assist)
+- KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe40)
+ MASKABLE_RELON_EXCEPTION_HV_OOL(0xe80, h_doorbell)
+- KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe80)
+
+ STD_RELON_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
+ STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
--- /dev/null
+From 1d567cb4bd42d560a7621cac6f6aebe87343689e Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <michael@ellerman.id.au>
+Date: Tue, 25 Jun 2013 17:47:54 +1000
+Subject: powerpc: Remove unreachable relocation on exception handlers
+
+From: Michael Ellerman <michael@ellerman.id.au>
+
+commit 1d567cb4bd42d560a7621cac6f6aebe87343689e upstream.
+
+We have relocation on exception handlers defined for h_data_storage and
+h_instr_storage. However we will never take relocation on exceptions for
+these because they can only come from a guest, and we never take
+relocation on exceptions when we transition from guest to host.
+
+We also have a handler for hmi_exception (Hypervisor Maintenance) which
+is defined in the architecture to never be delivered with relocation on,
+see see v2.07 Book III-S section 6.5.
+
+So remove the handlers, leaving a branch to self just to be double extra
+paranoid.
+
+Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/exceptions-64s.S | 18 +++---------------
+ 1 file changed, 3 insertions(+), 15 deletions(-)
+
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -793,14 +793,10 @@ system_call_relon_pSeries:
+ STD_RELON_EXCEPTION_PSERIES(0x4d00, 0xd00, single_step)
+
+ . = 0x4e00
+- SET_SCRATCH0(r13)
+- EXCEPTION_PROLOG_0(PACA_EXGEN)
+- b h_data_storage_relon_hv
++ b . /* Can't happen, see v2.07 Book III-S section 6.5 */
+
+ . = 0x4e20
+- SET_SCRATCH0(r13)
+- EXCEPTION_PROLOG_0(PACA_EXGEN)
+- b h_instr_storage_relon_hv
++ b . /* Can't happen, see v2.07 Book III-S section 6.5 */
+
+ . = 0x4e40
+ SET_SCRATCH0(r13)
+@@ -808,9 +804,7 @@ system_call_relon_pSeries:
+ b emulation_assist_relon_hv
+
+ . = 0x4e60
+- SET_SCRATCH0(r13)
+- EXCEPTION_PROLOG_0(PACA_EXGEN)
+- b hmi_exception_relon_hv
++ b . /* Can't happen, see v2.07 Book III-S section 6.5 */
+
+ . = 0x4e80
+ SET_SCRATCH0(r13)
+@@ -1180,14 +1174,8 @@ tm_unavailable_common:
+ __end_handlers:
+
+ /* Equivalents to the above handlers for relocation-on interrupt vectors */
+- STD_RELON_EXCEPTION_HV_OOL(0xe00, h_data_storage)
+- KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe00)
+- STD_RELON_EXCEPTION_HV_OOL(0xe20, h_instr_storage)
+- KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe20)
+ STD_RELON_EXCEPTION_HV_OOL(0xe40, emulation_assist)
+ KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe40)
+- STD_RELON_EXCEPTION_HV_OOL(0xe60, hmi_exception)
+- KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe60)
+ MASKABLE_RELON_EXCEPTION_HV_OOL(0xe80, h_doorbell)
+ KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe80)
+
--- /dev/null
+From 021424a1fce335e05807fd770eb8e1da30a63eea Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <michaele@au1.ibm.com>
+Date: Tue, 25 Jun 2013 17:47:56 +1000
+Subject: powerpc: Rename and flesh out the facility unavailable exception handler
+
+From: Michael Ellerman <michaele@au1.ibm.com>
+
+commit 021424a1fce335e05807fd770eb8e1da30a63eea upstream.
+
+The exception at 0xf60 is not the TM (Transactional Memory) unavailable
+exception, it is the "Facility Unavailable Exception", rename it as
+such.
+
+Flesh out the handler to acknowledge the fact that it can be called for
+many reasons, one of which is TM being unavailable.
+
+Use STD_EXCEPTION_COMMON() for the exception body, for some reason we
+had it open-coded, I've checked the generated code is identical.
+
+Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/exceptions-64s.S | 21 +++++++--------------
+ arch/powerpc/kernel/traps.c | 33 +++++++++++++++++++++++++--------
+ 2 files changed, 32 insertions(+), 22 deletions(-)
+
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -341,10 +341,11 @@ vsx_unavailable_pSeries_1:
+ EXCEPTION_PROLOG_0(PACA_EXGEN)
+ b vsx_unavailable_pSeries
+
++facility_unavailable_trampoline:
+ . = 0xf60
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_0(PACA_EXGEN)
+- b tm_unavailable_pSeries
++ b facility_unavailable_pSeries
+
+ #ifdef CONFIG_CBE_RAS
+ STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
+@@ -522,7 +523,7 @@ denorm_done:
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20)
+ STD_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
+- STD_EXCEPTION_PSERIES_OOL(0xf60, tm_unavailable)
++ STD_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf60)
+
+ /*
+@@ -829,11 +830,11 @@ vsx_unavailable_relon_pSeries_1:
+ EXCEPTION_PROLOG_0(PACA_EXGEN)
+ b vsx_unavailable_relon_pSeries
+
+-tm_unavailable_relon_pSeries_1:
++facility_unavailable_relon_trampoline:
+ . = 0x4f60
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_0(PACA_EXGEN)
+- b tm_unavailable_relon_pSeries
++ b facility_unavailable_relon_pSeries
+
+ STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint)
+ #ifdef CONFIG_PPC_DENORMALISATION
+@@ -1159,15 +1160,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
+ bl .vsx_unavailable_exception
+ b .ret_from_except
+
+- .align 7
+- .globl tm_unavailable_common
+-tm_unavailable_common:
+- EXCEPTION_PROLOG_COMMON(0xf60, PACA_EXGEN)
+- bl .save_nvgprs
+- DISABLE_INTS
+- addi r3,r1,STACK_FRAME_OVERHEAD
+- bl .tm_unavailable_exception
+- b .ret_from_except
++ STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception)
+
+ .align 7
+ .globl __end_handlers
+@@ -1180,7 +1173,7 @@ __end_handlers:
+ STD_RELON_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
+ STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
+ STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
+- STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, tm_unavailable)
++ STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
+
+ #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
+ /*
+--- a/arch/powerpc/kernel/traps.c
++++ b/arch/powerpc/kernel/traps.c
+@@ -1282,25 +1282,42 @@ void vsx_unavailable_exception(struct pt
+ die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
+ }
+
+-void tm_unavailable_exception(struct pt_regs *regs)
++void facility_unavailable_exception(struct pt_regs *regs)
+ {
++ static char *facility_strings[] = {
++ "FPU",
++ "VMX/VSX",
++ "DSCR",
++ "PMU SPRs",
++ "BHRB",
++ "TM",
++ "AT",
++ "EBB",
++ "TAR",
++ };
++ char *facility;
++ u64 value;
++
++ value = mfspr(SPRN_FSCR) >> 56;
++
+ /* We restore the interrupt state now */
+ if (!arch_irq_disabled_regs(regs))
+ local_irq_enable();
+
+- /* Currently we never expect a TMU exception. Catch
+- * this and kill the process!
+- */
+- printk(KERN_EMERG "Unexpected TM unavailable exception at %lx "
+- "(msr %lx)\n",
+- regs->nip, regs->msr);
++ if (value < ARRAY_SIZE(facility_strings))
++ facility = facility_strings[value];
++ else
++ facility = "unknown";
++
++ pr_err("Facility '%s' unavailable, exception at 0x%lx, MSR=%lx\n",
++ facility, regs->nip, regs->msr);
+
+ if (user_mode(regs)) {
+ _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
+ return;
+ }
+
+- die("Unexpected TM unavailable exception", regs, SIGABRT);
++ die("Unexpected facility unavailable exception", regs, SIGABRT);
+ }
+
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
--- /dev/null
+From 8246aca7058f3f2c2ae503081777965cd8df7b90 Mon Sep 17 00:00:00 2001
+From: Chen Gang <gang.chen@asianux.com>
+Date: Wed, 20 Mar 2013 14:30:12 +0800
+Subject: powerpc/smp: Section mismatch from smp_release_cpus to __initdata spinning_secondaries
+
+From: Chen Gang <gang.chen@asianux.com>
+
+commit 8246aca7058f3f2c2ae503081777965cd8df7b90 upstream.
+
+the smp_release_cpus is a normal funciton and called in normal environments,
+ but it calls the __initdata spinning_secondaries.
+ need modify spinning_secondaries to match smp_release_cpus.
+
+the related warning:
+ (the linker report boot_paca.33377, but it should be spinning_secondaries)
+
+-----------------------------------------------------------------------------
+
+WARNING: arch/powerpc/kernel/built-in.o(.text+0x23176): Section mismatch in reference from the function .smp_release_cpus() to the variable .init.data:boot_paca.33377
+The function .smp_release_cpus() references
+the variable __initdata boot_paca.33377.
+This is often because .smp_release_cpus lacks a __initdata
+annotation or the annotation of boot_paca.33377 is wrong.
+
+WARNING: arch/powerpc/kernel/built-in.o(.text+0x231fe): Section mismatch in reference from the function .smp_release_cpus() to the variable .init.data:boot_paca.33377
+The function .smp_release_cpus() references
+the variable __initdata boot_paca.33377.
+This is often because .smp_release_cpus lacks a __initdata
+annotation or the annotation of boot_paca.33377 is wrong.
+
+-----------------------------------------------------------------------------
+
+Signed-off-by: Chen Gang <gang.chen@asianux.com>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/setup_64.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -76,7 +76,7 @@
+ #endif
+
+ int boot_cpuid = 0;
+-int __initdata spinning_secondaries;
++int spinning_secondaries;
+ u64 ppc64_pft_size;
+
+ /* Pick defaults since we might want to patch instructions
--- /dev/null
+From fee55450710dff32a13ae30b4129ec7b5a4b44d0 Mon Sep 17 00:00:00 2001
+From: Michael Neuling <mikey@neuling.org>
+Date: Sun, 9 Jun 2013 21:23:16 +1000
+Subject: powerpc/tm: Fix 32 bit non-rt signals
+
+From: Michael Neuling <mikey@neuling.org>
+
+commit fee55450710dff32a13ae30b4129ec7b5a4b44d0 upstream.
+
+Currently sys_sigreturn() is TM unaware. Therefore, if we take a 32 bit signal
+without SIGINFO (non RT) inside a transaction, on signal return we don't
+restore the signal frame correctly.
+
+This checks if the signal frame being restoring is an active transaction, and
+if so, it copies the additional state to ptregs so it can be restored.
+
+Signed-off-by: Michael Neuling <mikey@neuling.org>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/signal_32.c | 30 +++++++++++++++++++++++++-----
+ 1 file changed, 25 insertions(+), 5 deletions(-)
+
+--- a/arch/powerpc/kernel/signal_32.c
++++ b/arch/powerpc/kernel/signal_32.c
+@@ -1494,16 +1494,22 @@ badframe:
+ long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
+ struct pt_regs *regs)
+ {
++ struct sigframe __user *sf;
+ struct sigcontext __user *sc;
+ struct sigcontext sigctx;
+ struct mcontext __user *sr;
+ void __user *addr;
+ sigset_t set;
++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
++ struct mcontext __user *mcp, *tm_mcp;
++ unsigned long msr_hi;
++#endif
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+- sc = (struct sigcontext __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
++ sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
++ sc = &sf->sctx;
+ addr = sc;
+ if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
+ goto badframe;
+@@ -1520,11 +1526,25 @@ long sys_sigreturn(int r3, int r4, int r
+ #endif
+ set_current_blocked(&set);
+
+- sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
+- addr = sr;
+- if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
+- || restore_user_regs(regs, sr, 1))
++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
++ mcp = (struct mcontext __user *)&sf->mctx;
++ tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
++ if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
+ goto badframe;
++ if (MSR_TM_ACTIVE(msr_hi<<32)) {
++ if (!cpu_has_feature(CPU_FTR_TM))
++ goto badframe;
++ if (restore_tm_user_regs(regs, mcp, tm_mcp))
++ goto badframe;
++ } else
++#endif
++ {
++ sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
++ addr = sr;
++ if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
++ || restore_user_regs(regs, sr, 1))
++ goto badframe;
++ }
+
+ set_thread_flag(TIF_RESTOREALL);
+ return 0;
--- /dev/null
+From 2c27a18f8736da047bef2b997bdd48efc667e3c9 Mon Sep 17 00:00:00 2001
+From: Michael Neuling <mikey@neuling.org>
+Date: Sun, 9 Jun 2013 21:23:17 +1000
+Subject: powerpc/tm: Fix restoration of MSR on 32bit signal return
+
+From: Michael Neuling <mikey@neuling.org>
+
+commit 2c27a18f8736da047bef2b997bdd48efc667e3c9 upstream.
+
+Currently we clear out the MSR TM bits on signal return assuming that the
+signal should never return to an active transaction.
+
+This is bogus as the user may do this. It's most likely the transaction will
+be doomed due to a treclaim but that's a problem for the HW not the kernel.
+
+The current code is a legacy of earlier kernel implementations which did
+software rollback of active transactions in the kernel. That code has now gone
+but we didn't correctly fix up this part of the signals code which still makes
+the assumption that it must be returning to a suspended transaction.
+
+This pulls out both MSR TM bits from the user supplied context rather than just
+setting TM suspend. We pull out only the bits needed to ensure the user can't
+do anything dangerous to the MSR.
+
+Signed-off-by: Michael Neuling <mikey@neuling.org>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/signal_32.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/arch/powerpc/kernel/signal_32.c
++++ b/arch/powerpc/kernel/signal_32.c
+@@ -754,7 +754,7 @@ static long restore_tm_user_regs(struct
+ struct mcontext __user *tm_sr)
+ {
+ long err;
+- unsigned long msr;
++ unsigned long msr, msr_hi;
+ #ifdef CONFIG_VSX
+ int i;
+ #endif
+@@ -859,8 +859,11 @@ static long restore_tm_user_regs(struct
+ tm_enable();
+ /* This loads the checkpointed FP/VEC state, if used */
+ tm_recheckpoint(¤t->thread, msr);
+- /* The task has moved into TM state S, so ensure MSR reflects this */
+- regs->msr = (regs->msr & ~MSR_TS_MASK) | MSR_TS_S;
++ /* Get the top half of the MSR */
++ if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
++ return 1;
++ /* Pull in MSR TM from user context */
++ regs->msr = (regs->msr & ~MSR_TS_MASK) | ((msr_hi<<32) & MSR_TS_MASK);
+
+ /* This loads the speculative FP/VEC state, if used */
+ if (msr & MSR_FP) {
--- /dev/null
+From 55e4341850ac56e63a3eefe9583a9000042164fa Mon Sep 17 00:00:00 2001
+From: Michael Neuling <mikey@neuling.org>
+Date: Sun, 9 Jun 2013 21:23:18 +1000
+Subject: powerpc/tm: Fix return of 32bit rt signals to active transactions
+
+From: Michael Neuling <mikey@neuling.org>
+
+commit 55e4341850ac56e63a3eefe9583a9000042164fa upstream.
+
+Currently we only restore signals which are transactionally suspended but it's
+possible that the transaction can be restored even when it's active. Most
+likely this will result in a transactional rollback by the hardware as the
+transaction will have been doomed by an earlier treclaim.
+
+The current code is a legacy of earlier kernel implementations which did
+software rollback of active transactions in the kernel. That code has now gone
+but we didn't correctly fix up this part of the signals code which still makes
+assumptions based on having software rollback.
+
+This changes the signal return code to always restore both contexts on 32 bit
+rt signal return.
+
+Signed-off-by: Michael Neuling <mikey@neuling.org>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/signal_32.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/powerpc/kernel/signal_32.c
++++ b/arch/powerpc/kernel/signal_32.c
+@@ -1245,7 +1245,7 @@ long sys_rt_sigreturn(int r3, int r4, in
+ if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
+ goto bad;
+
+- if (MSR_TM_SUSPENDED(msr_hi<<32)) {
++ if (MSR_TM_ACTIVE(msr_hi<<32)) {
+ /* We only recheckpoint on return if we're
+ * transaction.
+ */
--- /dev/null
+From 87b4e5393af77f5cba124638f19f6c426e210aec Mon Sep 17 00:00:00 2001
+From: Michael Neuling <mikey@neuling.org>
+Date: Sun, 9 Jun 2013 21:23:19 +1000
+Subject: powerpc/tm: Fix return of active 64bit signals
+
+From: Michael Neuling <mikey@neuling.org>
+
+commit 87b4e5393af77f5cba124638f19f6c426e210aec upstream.
+
+Currently we only restore signals which are transactionally suspended but it's
+possible that the transaction can be restored even when it's active. Most
+likely this will result in a transactional rollback by the hardware as the
+transaction will have been doomed by an earlier treclaim.
+
+The current code is a legacy of earlier kernel implementations which did
+software rollback of active transactions in the kernel. That code has now gone
+but we didn't correctly fix up this part of the signals code which still makes
+assumptions based on having software rollback.
+
+This changes the signal return code to always restore both contexts on 64 bit
+signal return. It also ensures that the MSR TM bits are properly restored from
+the signal context which they are not currently.
+
+Signed-off-by: Michael Neuling <mikey@neuling.org>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/signal_64.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/arch/powerpc/kernel/signal_64.c
++++ b/arch/powerpc/kernel/signal_64.c
+@@ -410,6 +410,10 @@ static long restore_tm_sigcontexts(struc
+
+ /* get MSR separately, transfer the LE bit if doing signal return */
+ err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
++ /* pull in MSR TM from user context */
++ regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
++
++ /* pull in MSR LE from user context */
+ regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
+
+ /* The following non-GPR non-FPR non-VR state is also checkpointed: */
+@@ -505,8 +509,6 @@ static long restore_tm_sigcontexts(struc
+ tm_enable();
+ /* This loads the checkpointed FP/VEC state, if used */
+ tm_recheckpoint(¤t->thread, msr);
+- /* The task has moved into TM state S, so ensure MSR reflects this: */
+- regs->msr = (regs->msr & ~MSR_TS_MASK) | __MASK(33);
+
+ /* This loads the speculative FP/VEC state, if used */
+ if (msr & MSR_FP) {
+@@ -654,7 +656,7 @@ int sys_rt_sigreturn(unsigned long r3, u
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR]))
+ goto badframe;
+- if (MSR_TM_SUSPENDED(msr)) {
++ if (MSR_TM_ACTIVE(msr)) {
+ /* We recheckpoint on return. */
+ struct ucontext __user *uc_transact;
+ if (__get_user(uc_transact, &uc->uc_link))
--- /dev/null
+From 1d25f11fdbcc5390d68efd98c28900bfd29b264c Mon Sep 17 00:00:00 2001
+From: Michael Neuling <mikey@neuling.org>
+Date: Sun, 9 Jun 2013 21:23:15 +1000
+Subject: powerpc/tm: Fix writing top half of MSR on 32 bit signals
+
+From: Michael Neuling <mikey@neuling.org>
+
+commit 1d25f11fdbcc5390d68efd98c28900bfd29b264c upstream.
+
+The MSR TM controls are in the top 32 bits of the MSR hence on 32 bit signals,
+we stick the top half of the MSR in the checkpointed signal context so that the
+user can access it.
+
+Unfortunately, we don't currently write anything to the checkpointed signal
+context when coming in a from a non transactional process and hence the top MSR
+bits can contain junk.
+
+This updates the 32 bit signal handling code to always write something to the
+top MSR bits so that users know if the process is transactional or not and the
+kernel can use it on signal return.
+
+Signed-off-by: Michael Neuling <mikey@neuling.org>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/signal_32.c | 29 +++++++++++++++++++++--------
+ 1 file changed, 21 insertions(+), 8 deletions(-)
+
+--- a/arch/powerpc/kernel/signal_32.c
++++ b/arch/powerpc/kernel/signal_32.c
+@@ -407,7 +407,8 @@ inline unsigned long copy_transact_fpr_f
+ * altivec/spe instructions at some point.
+ */
+ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
+- int sigret, int ctx_has_vsx_region)
++ struct mcontext __user *tm_frame, int sigret,
++ int ctx_has_vsx_region)
+ {
+ unsigned long msr = regs->msr;
+
+@@ -475,6 +476,12 @@ static int save_user_regs(struct pt_regs
+
+ if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
+ return 1;
++ /* We need to write 0 the MSR top 32 bits in the tm frame so that we
++ * can check it on the restore to see if TM is active
++ */
++ if (tm_frame && __put_user(0, &tm_frame->mc_gregs[PT_MSR]))
++ return 1;
++
+ if (sigret) {
+ /* Set up the sigreturn trampoline: li r0,sigret; sc */
+ if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
+@@ -952,6 +959,7 @@ int handle_rt_signal32(unsigned long sig
+ {
+ struct rt_sigframe __user *rt_sf;
+ struct mcontext __user *frame;
++ struct mcontext __user *tm_frame = NULL;
+ void __user *addr;
+ unsigned long newsp = 0;
+ int sigret;
+@@ -985,23 +993,24 @@ int handle_rt_signal32(unsigned long sig
+ }
+
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
++ tm_frame = &rt_sf->uc_transact.uc_mcontext;
+ if (MSR_TM_ACTIVE(regs->msr)) {
+- if (save_tm_user_regs(regs, &rt_sf->uc.uc_mcontext,
+- &rt_sf->uc_transact.uc_mcontext, sigret))
++ if (save_tm_user_regs(regs, frame, tm_frame, sigret))
+ goto badframe;
+ }
+ else
+ #endif
+- if (save_user_regs(regs, frame, sigret, 1))
++ {
++ if (save_user_regs(regs, frame, tm_frame, sigret, 1))
+ goto badframe;
++ }
+ regs->link = tramp;
+
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (MSR_TM_ACTIVE(regs->msr)) {
+ if (__put_user((unsigned long)&rt_sf->uc_transact,
+ &rt_sf->uc.uc_link)
+- || __put_user(to_user_ptr(&rt_sf->uc_transact.uc_mcontext),
+- &rt_sf->uc_transact.uc_regs))
++ || __put_user((unsigned long)tm_frame, &rt_sf->uc_transact.uc_regs))
+ goto badframe;
+ }
+ else
+@@ -1170,7 +1179,7 @@ long sys_swapcontext(struct ucontext __u
+ mctx = (struct mcontext __user *)
+ ((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
+ if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size)
+- || save_user_regs(regs, mctx, 0, ctx_has_vsx_region)
++ || save_user_regs(regs, mctx, NULL, 0, ctx_has_vsx_region)
+ || put_sigset_t(&old_ctx->uc_sigmask, ¤t->blocked)
+ || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
+ return -EFAULT;
+@@ -1392,6 +1401,7 @@ int handle_signal32(unsigned long sig, s
+ {
+ struct sigcontext __user *sc;
+ struct sigframe __user *frame;
++ struct mcontext __user *tm_mctx = NULL;
+ unsigned long newsp = 0;
+ int sigret;
+ unsigned long tramp;
+@@ -1425,6 +1435,7 @@ int handle_signal32(unsigned long sig, s
+ }
+
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
++ tm_mctx = &frame->mctx_transact;
+ if (MSR_TM_ACTIVE(regs->msr)) {
+ if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
+ sigret))
+@@ -1432,8 +1443,10 @@ int handle_signal32(unsigned long sig, s
+ }
+ else
+ #endif
+- if (save_user_regs(regs, &frame->mctx, sigret, 1))
++ {
++ if (save_user_regs(regs, &frame->mctx, tm_mctx, sigret, 1))
+ goto badframe;
++ }
+
+ regs->link = tramp;
+
--- /dev/null
+From b14b6260efeee6eb8942c6e6420e31281892acb6 Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <michael@ellerman.id.au>
+Date: Tue, 25 Jun 2013 17:47:57 +1000
+Subject: powerpc: Wire up the HV facility unavailable exception
+
+From: Michael Ellerman <michael@ellerman.id.au>
+
+commit b14b6260efeee6eb8942c6e6420e31281892acb6 upstream.
+
+Similar to the facility unavailble exception, except the facilities are
+controlled by HFSCR.
+
+Adapt the facility_unavailable_exception() so it can be called for
+either the regular or Hypervisor facility unavailable exceptions.
+
+Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/exceptions-64s.S | 15 +++++++++++++++
+ arch/powerpc/kernel/traps.c | 16 ++++++++++++----
+ 2 files changed, 27 insertions(+), 4 deletions(-)
+
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -347,6 +347,12 @@ facility_unavailable_trampoline:
+ EXCEPTION_PROLOG_0(PACA_EXGEN)
+ b facility_unavailable_pSeries
+
++hv_facility_unavailable_trampoline:
++ . = 0xf80
++ SET_SCRATCH0(r13)
++ EXCEPTION_PROLOG_0(PACA_EXGEN)
++ b facility_unavailable_hv
++
+ #ifdef CONFIG_CBE_RAS
+ STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
+ KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
+@@ -525,6 +531,8 @@ denorm_done:
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
+ STD_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf60)
++ STD_EXCEPTION_HV_OOL(0xf82, facility_unavailable)
++ KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xf82)
+
+ /*
+ * An interrupt came in while soft-disabled. We set paca->irq_happened, then:
+@@ -836,6 +844,12 @@ facility_unavailable_relon_trampoline:
+ EXCEPTION_PROLOG_0(PACA_EXGEN)
+ b facility_unavailable_relon_pSeries
+
++hv_facility_unavailable_relon_trampoline:
++ . = 0x4f80
++ SET_SCRATCH0(r13)
++ EXCEPTION_PROLOG_0(PACA_EXGEN)
++ b facility_unavailable_relon_hv
++
+ STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint)
+ #ifdef CONFIG_PPC_DENORMALISATION
+ . = 0x5500
+@@ -1174,6 +1188,7 @@ __end_handlers:
+ STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
+ STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
+ STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
++ STD_RELON_EXCEPTION_HV_OOL(0xf80, facility_unavailable)
+
+ #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
+ /*
+--- a/arch/powerpc/kernel/traps.c
++++ b/arch/powerpc/kernel/traps.c
+@@ -1295,10 +1295,18 @@ void facility_unavailable_exception(stru
+ "EBB",
+ "TAR",
+ };
+- char *facility;
++ char *facility, *prefix;
+ u64 value;
+
+- value = mfspr(SPRN_FSCR) >> 56;
++ if (regs->trap == 0xf60) {
++ value = mfspr(SPRN_FSCR);
++ prefix = "";
++ } else {
++ value = mfspr(SPRN_HFSCR);
++ prefix = "Hypervisor ";
++ }
++
++ value = value >> 56;
+
+ /* We restore the interrupt state now */
+ if (!arch_irq_disabled_regs(regs))
+@@ -1309,8 +1317,8 @@ void facility_unavailable_exception(stru
+ else
+ facility = "unknown";
+
+- pr_err("Facility '%s' unavailable, exception at 0x%lx, MSR=%lx\n",
+- facility, regs->nip, regs->msr);
++ pr_err("%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n",
++ prefix, facility, regs->nip, regs->msr);
+
+ if (user_mode(regs)) {
+ _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
--- /dev/null
+powerpc-hw_brk-fix-setting-of-length-for-exact-mode-breakpoints.patch
+powerpc-hw_brk-fix-clearing-of-extraneous-irq.patch
+powerpc-hw_brk-fix-off-by-one-error-when-validating-dawr-region-end.patch
+powerpc-powernv-fix-iommu-initialization-again.patch
+powerpc-tm-fix-writing-top-half-of-msr-on-32-bit-signals.patch
+powerpc-tm-fix-32-bit-non-rt-signals.patch
+powerpc-tm-fix-restoration-of-msr-on-32bit-signal-return.patch
+powerpc-tm-fix-return-of-32bit-rt-signals-to-active-transactions.patch
+powerpc-tm-fix-return-of-active-64bit-signals.patch
+powerpc-remove-unreachable-relocation-on-exception-handlers.patch
+powerpc-remove-kvmtest-from-relon-exception-handlers.patch
+powerpc-rename-and-flesh-out-the-facility-unavailable-exception-handler.patch
+powerpc-wire-up-the-hv-facility-unavailable-exception.patch
+powerpc-smp-section-mismatch-from-smp_release_cpus-to-__initdata-spinning_secondaries.patch
+powerpc-numa-do-not-update-sysfs-cpu-registration-from-invalid-context.patch
+powerpc-perf-check-that-events-only-include-valid-bits-on-power8.patch
+powerpc-perf-rework-disable-logic-in-pmu_disable.patch
+powerpc-perf-freeze-pmc5-6-if-we-re-not-using-them.patch
+powerpc-perf-use-existing-out-label-in-power_pmu_enable.patch
+powerpc-perf-don-t-enable-if-we-have-zero-events.patch