]>
Commit | Line | Data |
---|---|---|
b783edae WD |
1 | /* |
2 | * linux/include/asm-arm/proc-armv/system.h | |
3 | * | |
4 | * Copyright (C) 1996 Russell King | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | #ifndef __ASM_PROC_SYSTEM_H | |
11 | #define __ASM_PROC_SYSTEM_H | |
12 | ||
b783edae WD |
13 | /* |
14 | * Save the current interrupt enable state & disable IRQs | |
15 | */ | |
0ae76531 DF |
16 | #ifdef CONFIG_ARM64 |
17 | ||
18 | /* | |
19 | * Save the current interrupt enable state | |
20 | * and disable IRQs/FIQs | |
21 | */ | |
22 | #define local_irq_save(flags) \ | |
23 | ({ \ | |
24 | asm volatile( \ | |
25 | "mrs %0, daif" \ | |
26 | "msr daifset, #3" \ | |
27 | : "=r" (flags) \ | |
28 | : \ | |
29 | : "memory"); \ | |
30 | }) | |
31 | ||
32 | /* | |
33 | * restore saved IRQ & FIQ state | |
34 | */ | |
35 | #define local_irq_restore(flags) \ | |
36 | ({ \ | |
37 | asm volatile( \ | |
38 | "msr daif, %0" \ | |
39 | : \ | |
40 | : "r" (flags) \ | |
41 | : "memory"); \ | |
42 | }) | |
43 | ||
44 | /* | |
45 | * Enable IRQs/FIQs | |
46 | */ | |
47 | #define local_irq_enable() \ | |
48 | ({ \ | |
49 | asm volatile( \ | |
50 | "msr daifclr, #3" \ | |
51 | : \ | |
52 | : \ | |
53 | : "memory"); \ | |
54 | }) | |
55 | ||
56 | /* | |
57 | * Disable IRQs/FIQs | |
58 | */ | |
59 | #define local_irq_disable() \ | |
60 | ({ \ | |
61 | asm volatile( \ | |
62 | "msr daifset, #3" \ | |
63 | : \ | |
64 | : \ | |
65 | : "memory"); \ | |
66 | }) | |
67 | ||
68 | #else /* CONFIG_ARM64 */ | |
69 | ||
b783edae WD |
70 | #define local_irq_save(x) \ |
71 | ({ \ | |
72 | unsigned long temp; \ | |
73 | __asm__ __volatile__( \ | |
74 | "mrs %0, cpsr @ local_irq_save\n" \ | |
75 | " orr %1, %0, #128\n" \ | |
76 | " msr cpsr_c, %1" \ | |
77 | : "=r" (x), "=r" (temp) \ | |
78 | : \ | |
79 | : "memory"); \ | |
80 | }) | |
8bde7f77 | 81 | |
b783edae WD |
82 | /* |
83 | * Enable IRQs | |
84 | */ | |
85 | #define local_irq_enable() \ | |
86 | ({ \ | |
87 | unsigned long temp; \ | |
88 | __asm__ __volatile__( \ | |
89 | "mrs %0, cpsr @ local_irq_enable\n" \ | |
90 | " bic %0, %0, #128\n" \ | |
91 | " msr cpsr_c, %0" \ | |
92 | : "=r" (temp) \ | |
93 | : \ | |
94 | : "memory"); \ | |
95 | }) | |
96 | ||
97 | /* | |
98 | * Disable IRQs | |
99 | */ | |
100 | #define local_irq_disable() \ | |
101 | ({ \ | |
102 | unsigned long temp; \ | |
103 | __asm__ __volatile__( \ | |
104 | "mrs %0, cpsr @ local_irq_disable\n" \ | |
105 | " orr %0, %0, #128\n" \ | |
106 | " msr cpsr_c, %0" \ | |
107 | : "=r" (temp) \ | |
108 | : \ | |
109 | : "memory"); \ | |
110 | }) | |
111 | ||
112 | /* | |
113 | * Enable FIQs | |
114 | */ | |
115 | #define __stf() \ | |
116 | ({ \ | |
117 | unsigned long temp; \ | |
118 | __asm__ __volatile__( \ | |
119 | "mrs %0, cpsr @ stf\n" \ | |
120 | " bic %0, %0, #64\n" \ | |
121 | " msr cpsr_c, %0" \ | |
122 | : "=r" (temp) \ | |
123 | : \ | |
124 | : "memory"); \ | |
125 | }) | |
126 | ||
127 | /* | |
128 | * Disable FIQs | |
129 | */ | |
130 | #define __clf() \ | |
131 | ({ \ | |
132 | unsigned long temp; \ | |
133 | __asm__ __volatile__( \ | |
134 | "mrs %0, cpsr @ clf\n" \ | |
135 | " orr %0, %0, #64\n" \ | |
136 | " msr cpsr_c, %0" \ | |
137 | : "=r" (temp) \ | |
138 | : \ | |
139 | : "memory"); \ | |
140 | }) | |
141 | ||
142 | /* | |
143 | * Save the current interrupt enable state. | |
144 | */ | |
145 | #define local_save_flags(x) \ | |
146 | ({ \ | |
147 | __asm__ __volatile__( \ | |
148 | "mrs %0, cpsr @ local_save_flags\n" \ | |
149 | : "=r" (x) \ | |
150 | : \ | |
151 | : "memory"); \ | |
152 | }) | |
153 | ||
154 | /* | |
155 | * restore saved IRQ & FIQ state | |
156 | */ | |
157 | #define local_irq_restore(x) \ | |
158 | __asm__ __volatile__( \ | |
159 | "msr cpsr_c, %0 @ local_irq_restore\n" \ | |
160 | : \ | |
161 | : "r" (x) \ | |
162 | : "memory") | |
163 | ||
0ae76531 DF |
164 | #endif /* CONFIG_ARM64 */ |
165 | ||
166 | #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) || \ | |
167 | defined(CONFIG_ARM64) | |
b783edae WD |
168 | /* |
169 | * On the StrongARM, "swp" is terminally broken since it bypasses the | |
170 | * cache totally. This means that the cache becomes inconsistent, and, | |
171 | * since we use normal loads/stores as well, this is really bad. | |
172 | * Typically, this causes oopsen in filp_close, but could have other, | |
173 | * more disasterous effects. There are two work-arounds: | |
174 | * 1. Disable interrupts and emulate the atomic swap | |
175 | * 2. Clean the cache, perform atomic swap, flush the cache | |
176 | * | |
177 | * We choose (1) since its the "easiest" to achieve here and is not | |
178 | * dependent on the processor type. | |
179 | */ | |
180 | #define swp_is_buggy | |
181 | #endif | |
182 | ||
183 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) | |
184 | { | |
185 | extern void __bad_xchg(volatile void *, int); | |
186 | unsigned long ret; | |
187 | #ifdef swp_is_buggy | |
188 | unsigned long flags; | |
189 | #endif | |
190 | ||
191 | switch (size) { | |
192 | #ifdef swp_is_buggy | |
193 | case 1: | |
194 | local_irq_save(flags); | |
195 | ret = *(volatile unsigned char *)ptr; | |
196 | *(volatile unsigned char *)ptr = x; | |
197 | local_irq_restore(flags); | |
198 | break; | |
199 | ||
200 | case 4: | |
201 | local_irq_save(flags); | |
202 | ret = *(volatile unsigned long *)ptr; | |
203 | *(volatile unsigned long *)ptr = x; | |
204 | local_irq_restore(flags); | |
205 | break; | |
206 | #else | |
207 | case 1: __asm__ __volatile__ ("swpb %0, %1, [%2]" | |
208 | : "=&r" (ret) | |
209 | : "r" (x), "r" (ptr) | |
210 | : "memory"); | |
211 | break; | |
212 | case 4: __asm__ __volatile__ ("swp %0, %1, [%2]" | |
213 | : "=&r" (ret) | |
214 | : "r" (x), "r" (ptr) | |
215 | : "memory"); | |
216 | break; | |
217 | #endif | |
218 | default: __bad_xchg(ptr, size), ret = 0; | |
219 | } | |
220 | ||
221 | return ret; | |
222 | } | |
223 | ||
224 | #endif |