--- /dev/null
+From efbc74ace95338484f8d732037b99c7c77098fce Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Fri, 24 Feb 2012 12:12:38 +0100
+Subject: ARM: 7345/1: errata: update workaround for A9 erratum #743622
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit efbc74ace95338484f8d732037b99c7c77098fce upstream.
+
+Erratum #743622 affects all r2 variants of the Cortex-A9 processor, so
+ensure that the workaround is applied regardless of the revision.
+
+Reported-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/Kconfig | 2 +-
+ arch/arm/mm/proc-v7.S | 4 +---
+ 2 files changed, 2 insertions(+), 4 deletions(-)
+
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -1272,7 +1272,7 @@ config ARM_ERRATA_743622
+ depends on CPU_V7
+ help
+ This option enables the workaround for the 743622 Cortex-A9
+- (r2p0..r2p2) erratum. Under very rare conditions, a faulty
++ (r2p*) erratum. Under very rare conditions, a faulty
+ optimisation in the Cortex-A9 Store Buffer may lead to data
+ corruption. This workaround sets a specific bit in the diagnostic
+ register of the Cortex-A9 which disables the Store Buffer
+--- a/arch/arm/mm/proc-v7.S
++++ b/arch/arm/mm/proc-v7.S
+@@ -352,9 +352,7 @@ __v7_setup:
+ mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
+ #endif
+ #ifdef CONFIG_ARM_ERRATA_743622
+- teq r6, #0x20 @ present in r2p0
+- teqne r6, #0x21 @ present in r2p1
+- teqne r6, #0x22 @ present in r2p2
++ teq r5, #0x00200000 @ only present in r2p*
+ mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register
+ orreq r10, r10, #1 << 6 @ set bit #6
+ mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
--- /dev/null
+From 5727347180ebc6b4a866fcbe00dcb39cc03acb37 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Tue, 6 Mar 2012 17:33:17 +0100
+Subject: ARM: 7354/1: perf: limit sample_period to half max_period in non-sampling mode
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit 5727347180ebc6b4a866fcbe00dcb39cc03acb37 upstream.
+
+On ARM, the PMU does not stop counting after an overflow and therefore
+IRQ latency affects the new counter value read by the kernel. This is
+significant for non-sampling runs where it is possible for the new value
+to overtake the previous one, causing the delta to be out by up to
+max_period events.
+
+Commit a737823d ("ARM: 6835/1: perf: ensure overflows aren't missed due
+to IRQ latency") attempted to fix this problem by allowing interrupt
+handlers to pass an overflow flag to the event update function, causing
+the overflow calculation to assume that the counter passed through zero
+when going from prev to new. Unfortunately, this doesn't work when
+overflow occurs on the perf_task_tick path because we have the flag
+cleared and end up computing a large negative delta.
+
+This patch removes the overflow flag from armpmu_event_update and
+instead limits the sample_period to half of the max_period for
+non-sampling profiling runs.
+
+Signed-off-by: Ming Lei <ming.lei@canonical.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/include/asm/pmu.h | 2 +-
+ arch/arm/kernel/perf_event.c | 22 +++++++++++-----------
+ arch/arm/kernel/perf_event_v6.c | 2 +-
+ arch/arm/kernel/perf_event_v7.c | 2 +-
+ arch/arm/kernel/perf_event_xscale.c | 4 ++--
+ 5 files changed, 16 insertions(+), 16 deletions(-)
+
+--- a/arch/arm/include/asm/pmu.h
++++ b/arch/arm/include/asm/pmu.h
+@@ -125,7 +125,7 @@ int __init armpmu_register(struct arm_pm
+
+ u64 armpmu_event_update(struct perf_event *event,
+ struct hw_perf_event *hwc,
+- int idx, int overflow);
++ int idx);
+
+ int armpmu_event_set_period(struct perf_event *event,
+ struct hw_perf_event *hwc,
+--- a/arch/arm/kernel/perf_event.c
++++ b/arch/arm/kernel/perf_event.c
+@@ -187,7 +187,7 @@ armpmu_event_set_period(struct perf_even
+ u64
+ armpmu_event_update(struct perf_event *event,
+ struct hw_perf_event *hwc,
+- int idx, int overflow)
++ int idx)
+ {
+ struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+ u64 delta, prev_raw_count, new_raw_count;
+@@ -200,13 +200,7 @@ again:
+ new_raw_count) != prev_raw_count)
+ goto again;
+
+- new_raw_count &= armpmu->max_period;
+- prev_raw_count &= armpmu->max_period;
+-
+- if (overflow)
+- delta = armpmu->max_period - prev_raw_count + new_raw_count + 1;
+- else
+- delta = new_raw_count - prev_raw_count;
++ delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
+
+ local64_add(delta, &event->count);
+ local64_sub(delta, &hwc->period_left);
+@@ -223,7 +217,7 @@ armpmu_read(struct perf_event *event)
+ if (hwc->idx < 0)
+ return;
+
+- armpmu_event_update(event, hwc, hwc->idx, 0);
++ armpmu_event_update(event, hwc, hwc->idx);
+ }
+
+ static void
+@@ -239,7 +233,7 @@ armpmu_stop(struct perf_event *event, in
+ if (!(hwc->state & PERF_HES_STOPPED)) {
+ armpmu->disable(hwc, hwc->idx);
+ barrier(); /* why? */
+- armpmu_event_update(event, hwc, hwc->idx, 0);
++ armpmu_event_update(event, hwc, hwc->idx);
+ hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+ }
+ }
+@@ -519,7 +513,13 @@ __hw_perf_event_init(struct perf_event *
+ hwc->config_base |= (unsigned long)mapping;
+
+ if (!hwc->sample_period) {
+- hwc->sample_period = armpmu->max_period;
++ /*
++ * For non-sampling runs, limit the sample_period to half
++ * of the counter width. That way, the new counter value
++ * is far less likely to overtake the previous one unless
++ * you have some serious IRQ latency issues.
++ */
++ hwc->sample_period = armpmu->max_period >> 1;
+ hwc->last_period = hwc->sample_period;
+ local64_set(&hwc->period_left, hwc->sample_period);
+ }
+--- a/arch/arm/kernel/perf_event_v6.c
++++ b/arch/arm/kernel/perf_event_v6.c
+@@ -520,7 +520,7 @@ armv6pmu_handle_irq(int irq_num,
+ continue;
+
+ hwc = &event->hw;
+- armpmu_event_update(event, hwc, idx, 1);
++ armpmu_event_update(event, hwc, idx);
+ data.period = event->hw.last_period;
+ if (!armpmu_event_set_period(event, hwc, idx))
+ continue;
+--- a/arch/arm/kernel/perf_event_v7.c
++++ b/arch/arm/kernel/perf_event_v7.c
+@@ -1032,7 +1032,7 @@ static irqreturn_t armv7pmu_handle_irq(i
+ continue;
+
+ hwc = &event->hw;
+- armpmu_event_update(event, hwc, idx, 1);
++ armpmu_event_update(event, hwc, idx);
+ data.period = event->hw.last_period;
+ if (!armpmu_event_set_period(event, hwc, idx))
+ continue;
+--- a/arch/arm/kernel/perf_event_xscale.c
++++ b/arch/arm/kernel/perf_event_xscale.c
+@@ -257,7 +257,7 @@ xscale1pmu_handle_irq(int irq_num, void
+ continue;
+
+ hwc = &event->hw;
+- armpmu_event_update(event, hwc, idx, 1);
++ armpmu_event_update(event, hwc, idx);
+ data.period = event->hw.last_period;
+ if (!armpmu_event_set_period(event, hwc, idx))
+ continue;
+@@ -594,7 +594,7 @@ xscale2pmu_handle_irq(int irq_num, void
+ continue;
+
+ hwc = &event->hw;
+- armpmu_event_update(event, hwc, idx, 1);
++ armpmu_event_update(event, hwc, idx);
+ data.period = event->hw.last_period;
+ if (!armpmu_event_set_period(event, hwc, idx))
+ continue;
--- /dev/null
+From 99c1745b9c76910e195889044f914b4898b7c9a5 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Tue, 6 Mar 2012 17:34:22 +0100
+Subject: ARM: 7355/1: perf: clear overflow flag when disabling counter on ARMv7 PMU
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit 99c1745b9c76910e195889044f914b4898b7c9a5 upstream.
+
+When disabling a counter on an ARMv7 PMU, we should also clear the
+overflow flag in case an overflow occurred whilst stopping the counter.
+This prevents a spurious overflow being picked up later and leading to
+either false accounting or a NULL dereference.
+
+Reported-by: Ming Lei <tom.leiming@gmail.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kernel/perf_event_v7.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/arch/arm/kernel/perf_event_v7.c
++++ b/arch/arm/kernel/perf_event_v7.c
+@@ -878,6 +878,11 @@ static inline int armv7_pmnc_disable_int
+
+ counter = ARMV7_IDX_TO_COUNTER(idx);
+ asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
++ isb();
++ /* Clear the overflow flag in case an interrupt is pending. */
++ asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
++ isb();
++
+ return idx;
+ }
+
--- /dev/null
+From f6f5a30c834135c9f2fa10400c59ebbdd9188567 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Tue, 6 Mar 2012 17:34:50 +0100
+Subject: ARM: 7356/1: perf: check that we have an event in the PMU IRQ handlers
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit f6f5a30c834135c9f2fa10400c59ebbdd9188567 upstream.
+
+The PMU IRQ handlers in perf assume that if a counter has overflowed
+then perf must be responsible. In the paranoid world of crazy hardware,
+this could be false, so check that we do have a valid event before
+attempting to dereference NULL in the interrupt path.
+
+Signed-off-by: Ming Lei <tom.leiming@gmail.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kernel/perf_event_v6.c | 20 ++------------------
+ arch/arm/kernel/perf_event_v7.c | 4 ++++
+ arch/arm/kernel/perf_event_xscale.c | 6 ++++++
+ 3 files changed, 12 insertions(+), 18 deletions(-)
+
+--- a/arch/arm/kernel/perf_event_v6.c
++++ b/arch/arm/kernel/perf_event_v6.c
+@@ -463,23 +463,6 @@ armv6pmu_enable_event(struct hw_perf_eve
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+ }
+
+-static int counter_is_active(unsigned long pmcr, int idx)
+-{
+- unsigned long mask = 0;
+- if (idx == ARMV6_CYCLE_COUNTER)
+- mask = ARMV6_PMCR_CCOUNT_IEN;
+- else if (idx == ARMV6_COUNTER0)
+- mask = ARMV6_PMCR_COUNT0_IEN;
+- else if (idx == ARMV6_COUNTER1)
+- mask = ARMV6_PMCR_COUNT1_IEN;
+-
+- if (mask)
+- return pmcr & mask;
+-
+- WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+- return 0;
+-}
+-
+ static irqreturn_t
+ armv6pmu_handle_irq(int irq_num,
+ void *dev)
+@@ -509,7 +492,8 @@ armv6pmu_handle_irq(int irq_num,
+ struct perf_event *event = cpuc->events[idx];
+ struct hw_perf_event *hwc;
+
+- if (!counter_is_active(pmcr, idx))
++ /* Ignore if we don't have an event. */
++ if (!event)
+ continue;
+
+ /*
+--- a/arch/arm/kernel/perf_event_v7.c
++++ b/arch/arm/kernel/perf_event_v7.c
+@@ -1029,6 +1029,10 @@ static irqreturn_t armv7pmu_handle_irq(i
+ struct perf_event *event = cpuc->events[idx];
+ struct hw_perf_event *hwc;
+
++ /* Ignore if we don't have an event. */
++ if (!event)
++ continue;
++
+ /*
+ * We have a single interrupt for all counters. Check that
+ * each counter has overflowed before we process it.
+--- a/arch/arm/kernel/perf_event_xscale.c
++++ b/arch/arm/kernel/perf_event_xscale.c
+@@ -253,6 +253,9 @@ xscale1pmu_handle_irq(int irq_num, void
+ struct perf_event *event = cpuc->events[idx];
+ struct hw_perf_event *hwc;
+
++ if (!event)
++ continue;
++
+ if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
+ continue;
+
+@@ -590,6 +593,9 @@ xscale2pmu_handle_irq(int irq_num, void
+ struct perf_event *event = cpuc->events[idx];
+ struct hw_perf_event *hwc;
+
++ if (!event)
++ continue;
++
+ if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx))
+ continue;
+
--- /dev/null
+From 3f31ae121348afd9ed39700ea2a63c17cd7eeed1 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Tue, 6 Mar 2012 17:35:55 +0100
+Subject: ARM: 7357/1: perf: fix overflow handling for xscale2 PMUs
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit 3f31ae121348afd9ed39700ea2a63c17cd7eeed1 upstream.
+
+xscale2 PMUs indicate overflow not via the PMU control register, but by
+a separate overflow FLAG register instead.
+
+This patch fixes the xscale2 PMU code to use this register to detect
+to overflow and ensures that we clear any pending overflow when
+disabling a counter.
+
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kernel/perf_event_xscale.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/arch/arm/kernel/perf_event_xscale.c
++++ b/arch/arm/kernel/perf_event_xscale.c
+@@ -596,7 +596,7 @@ xscale2pmu_handle_irq(int irq_num, void
+ if (!event)
+ continue;
+
+- if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx))
++ if (!xscale2_pmnc_counter_has_overflowed(of_flags, idx))
+ continue;
+
+ hwc = &event->hw;
+@@ -667,7 +667,7 @@ xscale2pmu_enable_event(struct hw_perf_e
+ static void
+ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
+ {
+- unsigned long flags, ien, evtsel;
++ unsigned long flags, ien, evtsel, of_flags;
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+
+ ien = xscale2pmu_read_int_enable();
+@@ -676,26 +676,31 @@ xscale2pmu_disable_event(struct hw_perf_
+ switch (idx) {
+ case XSCALE_CYCLE_COUNTER:
+ ien &= ~XSCALE2_CCOUNT_INT_EN;
++ of_flags = XSCALE2_CCOUNT_OVERFLOW;
+ break;
+ case XSCALE_COUNTER0:
+ ien &= ~XSCALE2_COUNT0_INT_EN;
+ evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
+ evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT;
++ of_flags = XSCALE2_COUNT0_OVERFLOW;
+ break;
+ case XSCALE_COUNTER1:
+ ien &= ~XSCALE2_COUNT1_INT_EN;
+ evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
+ evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT;
++ of_flags = XSCALE2_COUNT1_OVERFLOW;
+ break;
+ case XSCALE_COUNTER2:
+ ien &= ~XSCALE2_COUNT2_INT_EN;
+ evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
+ evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT;
++ of_flags = XSCALE2_COUNT2_OVERFLOW;
+ break;
+ case XSCALE_COUNTER3:
+ ien &= ~XSCALE2_COUNT3_INT_EN;
+ evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
+ evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT;
++ of_flags = XSCALE2_COUNT3_OVERFLOW;
+ break;
+ default:
+ WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+@@ -705,6 +710,7 @@ xscale2pmu_disable_event(struct hw_perf_
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
+ xscale2pmu_write_event_select(evtsel);
+ xscale2pmu_write_int_enable(ien);
++ xscale2pmu_write_overflow_flags(of_flags);
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+ }
+
omap-4430sdp-panda-add-hdmi-hpd-gpio.patch
omapdss-hdmi-phy-burnout-fix.patch
omapdss-hdmi-hot-plug-detect-fix.patch
+arm-7345-1-errata-update-workaround-for-a9-erratum-743622.patch
+arm-7354-1-perf-limit-sample_period-to-half-max_period-in-non-sampling-mode.patch
+arm-7355-1-perf-clear-overflow-flag-when-disabling-counter-on-armv7-pmu.patch
+arm-7356-1-perf-check-that-we-have-an-event-in-the-pmu-irq-handlers.patch
+arm-7357-1-perf-fix-overflow-handling-for-xscale2-pmus.patch