]>
Commit | Line | Data |
---|---|---|
b783edae WD |
1 | /* |
2 | * linux/include/asm-arm/proc-armv/system.h | |
3 | * | |
4 | * Copyright (C) 1996 Russell King | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | #ifndef __ASM_PROC_SYSTEM_H | |
11 | #define __ASM_PROC_SYSTEM_H | |
12 | ||
13 | #include <linux/config.h> | |
14 | ||
15 | #define set_cr(x) \ | |
16 | __asm__ __volatile__( \ | |
17 | "mcr p15, 0, %0, c1, c0 @ set CR" \ | |
18 | : : "r" (x)) | |
19 | ||
20 | #define CR_M (1 << 0) /* MMU enable */ | |
21 | #define CR_A (1 << 1) /* Alignment abort enable */ | |
22 | #define CR_C (1 << 2) /* Dcache enable */ | |
23 | #define CR_W (1 << 3) /* Write buffer enable */ | |
24 | #define CR_P (1 << 4) /* 32-bit exception handler */ | |
25 | #define CR_D (1 << 5) /* 32-bit data address range */ | |
26 | #define CR_L (1 << 6) /* Implementation defined */ | |
27 | #define CD_B (1 << 7) /* Big endian */ | |
28 | #define CR_S (1 << 8) /* System MMU protection */ | |
29 | #define CD_R (1 << 9) /* ROM MMU protection */ | |
30 | #define CR_F (1 << 10) /* Implementation defined */ | |
31 | #define CR_Z (1 << 11) /* Implementation defined */ | |
32 | #define CR_I (1 << 12) /* Icache enable */ | |
33 | #define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */ | |
34 | #define CR_RR (1 << 14) /* Round Robin cache replacement */ | |
35 | ||
36 | extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ | |
37 | extern unsigned long cr_alignment; /* defined in entry-armv.S */ | |
38 | ||
39 | #if __LINUX_ARM_ARCH__ >= 4 | |
40 | #define vectors_base() ((cr_alignment & CR_V) ? 0xffff0000 : 0) | |
41 | #else | |
42 | #define vectors_base() (0) | |
43 | #endif | |
44 | ||
45 | /* | |
46 | * Save the current interrupt enable state & disable IRQs | |
47 | */ | |
48 | #define local_irq_save(x) \ | |
49 | ({ \ | |
50 | unsigned long temp; \ | |
51 | __asm__ __volatile__( \ | |
52 | "mrs %0, cpsr @ local_irq_save\n" \ | |
53 | " orr %1, %0, #128\n" \ | |
54 | " msr cpsr_c, %1" \ | |
55 | : "=r" (x), "=r" (temp) \ | |
56 | : \ | |
57 | : "memory"); \ | |
58 | }) | |
8bde7f77 | 59 | |
b783edae WD |
60 | /* |
61 | * Enable IRQs | |
62 | */ | |
63 | #define local_irq_enable() \ | |
64 | ({ \ | |
65 | unsigned long temp; \ | |
66 | __asm__ __volatile__( \ | |
67 | "mrs %0, cpsr @ local_irq_enable\n" \ | |
68 | " bic %0, %0, #128\n" \ | |
69 | " msr cpsr_c, %0" \ | |
70 | : "=r" (temp) \ | |
71 | : \ | |
72 | : "memory"); \ | |
73 | }) | |
74 | ||
75 | /* | |
76 | * Disable IRQs | |
77 | */ | |
78 | #define local_irq_disable() \ | |
79 | ({ \ | |
80 | unsigned long temp; \ | |
81 | __asm__ __volatile__( \ | |
82 | "mrs %0, cpsr @ local_irq_disable\n" \ | |
83 | " orr %0, %0, #128\n" \ | |
84 | " msr cpsr_c, %0" \ | |
85 | : "=r" (temp) \ | |
86 | : \ | |
87 | : "memory"); \ | |
88 | }) | |
89 | ||
90 | /* | |
91 | * Enable FIQs | |
92 | */ | |
93 | #define __stf() \ | |
94 | ({ \ | |
95 | unsigned long temp; \ | |
96 | __asm__ __volatile__( \ | |
97 | "mrs %0, cpsr @ stf\n" \ | |
98 | " bic %0, %0, #64\n" \ | |
99 | " msr cpsr_c, %0" \ | |
100 | : "=r" (temp) \ | |
101 | : \ | |
102 | : "memory"); \ | |
103 | }) | |
104 | ||
105 | /* | |
106 | * Disable FIQs | |
107 | */ | |
108 | #define __clf() \ | |
109 | ({ \ | |
110 | unsigned long temp; \ | |
111 | __asm__ __volatile__( \ | |
112 | "mrs %0, cpsr @ clf\n" \ | |
113 | " orr %0, %0, #64\n" \ | |
114 | " msr cpsr_c, %0" \ | |
115 | : "=r" (temp) \ | |
116 | : \ | |
117 | : "memory"); \ | |
118 | }) | |
119 | ||
120 | /* | |
121 | * Save the current interrupt enable state. | |
122 | */ | |
123 | #define local_save_flags(x) \ | |
124 | ({ \ | |
125 | __asm__ __volatile__( \ | |
126 | "mrs %0, cpsr @ local_save_flags\n" \ | |
127 | : "=r" (x) \ | |
128 | : \ | |
129 | : "memory"); \ | |
130 | }) | |
131 | ||
132 | /* | |
133 | * restore saved IRQ & FIQ state | |
134 | */ | |
135 | #define local_irq_restore(x) \ | |
136 | __asm__ __volatile__( \ | |
137 | "msr cpsr_c, %0 @ local_irq_restore\n" \ | |
138 | : \ | |
139 | : "r" (x) \ | |
140 | : "memory") | |
141 | ||
142 | #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) | |
143 | /* | |
144 | * On the StrongARM, "swp" is terminally broken since it bypasses the | |
145 | * cache totally. This means that the cache becomes inconsistent, and, | |
146 | * since we use normal loads/stores as well, this is really bad. | |
147 | * Typically, this causes oopsen in filp_close, but could have other, | |
148 | * more disasterous effects. There are two work-arounds: | |
149 | * 1. Disable interrupts and emulate the atomic swap | |
150 | * 2. Clean the cache, perform atomic swap, flush the cache | |
151 | * | |
152 | * We choose (1) since its the "easiest" to achieve here and is not | |
153 | * dependent on the processor type. | |
154 | */ | |
155 | #define swp_is_buggy | |
156 | #endif | |
157 | ||
158 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) | |
159 | { | |
160 | extern void __bad_xchg(volatile void *, int); | |
161 | unsigned long ret; | |
162 | #ifdef swp_is_buggy | |
163 | unsigned long flags; | |
164 | #endif | |
165 | ||
166 | switch (size) { | |
167 | #ifdef swp_is_buggy | |
168 | case 1: | |
169 | local_irq_save(flags); | |
170 | ret = *(volatile unsigned char *)ptr; | |
171 | *(volatile unsigned char *)ptr = x; | |
172 | local_irq_restore(flags); | |
173 | break; | |
174 | ||
175 | case 4: | |
176 | local_irq_save(flags); | |
177 | ret = *(volatile unsigned long *)ptr; | |
178 | *(volatile unsigned long *)ptr = x; | |
179 | local_irq_restore(flags); | |
180 | break; | |
181 | #else | |
182 | case 1: __asm__ __volatile__ ("swpb %0, %1, [%2]" | |
183 | : "=&r" (ret) | |
184 | : "r" (x), "r" (ptr) | |
185 | : "memory"); | |
186 | break; | |
187 | case 4: __asm__ __volatile__ ("swp %0, %1, [%2]" | |
188 | : "=&r" (ret) | |
189 | : "r" (x), "r" (ptr) | |
190 | : "memory"); | |
191 | break; | |
192 | #endif | |
193 | default: __bad_xchg(ptr, size), ret = 0; | |
194 | } | |
195 | ||
196 | return ret; | |
197 | } | |
198 | ||
199 | #endif |