]> git.ipfire.org Git - people/ms/linux.git/commitdiff
metag: perf: fixes for interrupting perf counters
authorJames Hogan <james.hogan@imgtec.com>
Wed, 27 Feb 2013 16:16:38 +0000 (16:16 +0000)
committerJames Hogan <james.hogan@imgtec.com>
Fri, 15 Mar 2013 13:20:00 +0000 (13:20 +0000)
The overflow handler needs to read modify write when re-enabling the
counter so as not to change the counter value as it may have been
changed to ready the next interrupt on overflow. Similarly for
interrupting counters metag_pmu_enable_counter needs to leave the
counter value unchanged rather than resetting it to zero.

Signed-off-by: James Hogan <james.hogan@imgtec.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
arch/metag/kernel/perf/perf_event.c

index 8096db2a550b812ed3aa6daf37fd347cd9aa2861..a00f527eade5b553205614b5c0f5860252b2c2af 100644 (file)
@@ -643,13 +643,15 @@ static void metag_pmu_enable_counter(struct hw_perf_event *event, int idx)
                config = tmp >> 4;
        }
 
-       /*
-        * Enabled counters start from 0. Early cores clear the count on
-        * write but newer cores don't, so we make sure that the count is
-        * set to 0.
-        */
        tmp = ((config & 0xf) << 28) |
                        ((1 << 24) << cpu_2_hwthread_id[get_cpu()]);
+       if (metag_pmu->max_period)
+               /*
+                * Cores supporting overflow interrupts may have had the counter
+                * set to a specific value that needs preserving.
+                */
+               tmp |= metag_in32(PERF_COUNT(idx)) & 0x00ffffff;
+
        metag_out32(tmp, PERF_COUNT(idx));
 unlock:
        raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
@@ -764,10 +766,16 @@ static irqreturn_t metag_pmu_counter_overflow(int irq, void *dev)
 
        /*
         * Enable the counter again once core overflow processing has
-        * completed.
+        * completed. Note the counter value may have been modified while it was
+        * inactive to set it up ready for the next interrupt.
         */
-       if (!perf_event_overflow(event, &sampledata, regs))
+       if (!perf_event_overflow(event, &sampledata, regs)) {
+               __global_lock2(flags);
+               counter = (counter & 0xff000000) |
+                         (metag_in32(PERF_COUNT(idx)) & 0x00ffffff);
                metag_out32(counter, PERF_COUNT(idx));
+               __global_unlock2(flags);
+       }
 
        return IRQ_HANDLED;
 }