]> git.ipfire.org Git - people/ms/linux.git/blob - arch/mips/kernel/sync-r4k.c
atomic: use <linux/atomic.h>
[people/ms/linux.git] / arch / mips / kernel / sync-r4k.c
1 /*
2 * Count register synchronisation.
3 *
4 * All CPUs will have their count registers synchronised to the CPU0 next time
5 * value. This can cause a small timewarp for CPU0. All other CPU's should
6 * not have done anything significant (but they may have had interrupts
7 * enabled briefly - prom_smp_finish() should not be responsible for enabling
8 * interrupts...)
9 *
10 * FIXME: broken for SMTC
11 */
12
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/irqflags.h>
16 #include <linux/cpumask.h>
17
18 #include <asm/r4k-timer.h>
19 #include <linux/atomic.h>
20 #include <asm/barrier.h>
21 #include <asm/mipsregs.h>
22
23 static atomic_t __cpuinitdata count_start_flag = ATOMIC_INIT(0);
24 static atomic_t __cpuinitdata count_count_start = ATOMIC_INIT(0);
25 static atomic_t __cpuinitdata count_count_stop = ATOMIC_INIT(0);
26 static atomic_t __cpuinitdata count_reference = ATOMIC_INIT(0);
27
28 #define COUNTON 100
29 #define NR_LOOPS 5
30
31 void __cpuinit synchronise_count_master(void)
32 {
33 int i;
34 unsigned long flags;
35 unsigned int initcount;
36 int nslaves;
37
38 #ifdef CONFIG_MIPS_MT_SMTC
39 /*
40 * SMTC needs to synchronise per VPE, not per CPU
41 * ignore for now
42 */
43 return;
44 #endif
45
46 printk(KERN_INFO "Synchronize counters across %u CPUs: ",
47 num_online_cpus());
48
49 local_irq_save(flags);
50
51 /*
52 * Notify the slaves that it's time to start
53 */
54 atomic_set(&count_reference, read_c0_count());
55 atomic_set(&count_start_flag, 1);
56 smp_wmb();
57
58 /* Count will be initialised to current timer for all CPU's */
59 initcount = read_c0_count();
60
61 /*
62 * We loop a few times to get a primed instruction cache,
63 * then the last pass is more or less synchronised and
64 * the master and slaves each set their cycle counters to a known
65 * value all at once. This reduces the chance of having random offsets
66 * between the processors, and guarantees that the maximum
67 * delay between the cycle counters is never bigger than
68 * the latency of information-passing (cachelines) between
69 * two CPUs.
70 */
71
72 nslaves = num_online_cpus()-1;
73 for (i = 0; i < NR_LOOPS; i++) {
74 /* slaves loop on '!= ncpus' */
75 while (atomic_read(&count_count_start) != nslaves)
76 mb();
77 atomic_set(&count_count_stop, 0);
78 smp_wmb();
79
80 /* this lets the slaves write their count register */
81 atomic_inc(&count_count_start);
82
83 /*
84 * Everyone initialises count in the last loop:
85 */
86 if (i == NR_LOOPS-1)
87 write_c0_count(initcount);
88
89 /*
90 * Wait for all slaves to leave the synchronization point:
91 */
92 while (atomic_read(&count_count_stop) != nslaves)
93 mb();
94 atomic_set(&count_count_start, 0);
95 smp_wmb();
96 atomic_inc(&count_count_stop);
97 }
98 /* Arrange for an interrupt in a short while */
99 write_c0_compare(read_c0_count() + COUNTON);
100
101 local_irq_restore(flags);
102
103 /*
104 * i386 code reported the skew here, but the
105 * count registers were almost certainly out of sync
106 * so no point in alarming people
107 */
108 printk("done.\n");
109 }
110
111 void __cpuinit synchronise_count_slave(void)
112 {
113 int i;
114 unsigned long flags;
115 unsigned int initcount;
116 int ncpus;
117
118 #ifdef CONFIG_MIPS_MT_SMTC
119 /*
120 * SMTC needs to synchronise per VPE, not per CPU
121 * ignore for now
122 */
123 return;
124 #endif
125
126 local_irq_save(flags);
127
128 /*
129 * Not every cpu is online at the time this gets called,
130 * so we first wait for the master to say everyone is ready
131 */
132
133 while (!atomic_read(&count_start_flag))
134 mb();
135
136 /* Count will be initialised to next expire for all CPU's */
137 initcount = atomic_read(&count_reference);
138
139 ncpus = num_online_cpus();
140 for (i = 0; i < NR_LOOPS; i++) {
141 atomic_inc(&count_count_start);
142 while (atomic_read(&count_count_start) != ncpus)
143 mb();
144
145 /*
146 * Everyone initialises count in the last loop:
147 */
148 if (i == NR_LOOPS-1)
149 write_c0_count(initcount);
150
151 atomic_inc(&count_count_stop);
152 while (atomic_read(&count_count_stop) != ncpus)
153 mb();
154 }
155 /* Arrange for an interrupt in a short while */
156 write_c0_compare(read_c0_count() + COUNTON);
157
158 local_irq_restore(flags);
159 }
160 #undef NR_LOOPS