]>
git.ipfire.org Git - people/ms/u-boot.git/blob - arch/mips/include/asm/system.h
2 * Copyright (C) 1994 - 1999 by Ralf Baechle
3 * Copyright (C) 1996 by Paul M. Antoine
4 * Copyright (C) 1994 - 1999 by Ralf Baechle
6 * Changed set_except_vector declaration to allow return of previous
7 * vector address value - necessary for "borrowing" vectors.
9 * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
10 * Copyright (C) 2000 MIPS Technologies, Inc.
12 * SPDX-License-Identifier: GPL-2.0
18 #include <asm/sgidefs.h>
19 #include <asm/ptrace.h>
20 #include <linux/stringify.h>
22 #include <linux/kernel.h>
25 static __inline__
void
43 * For cli() we have to insert nops to make shure that the new value
44 * has actually arrived in the status register before the end of this
46 * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
49 static __inline__
void
70 #define __save_flags(x) \
71 __asm__ __volatile__( \
78 #define __save_and_cli(x) \
79 __asm__ __volatile__( \
86 ".set\tnoreorder\n\t" \
96 #define __restore_flags(flags) \
98 unsigned long __tmp1; \
100 __asm__ __volatile__( \
101 ".set\tnoreorder\t\t\t# __restore_flags\n\t" \
103 "mfc0\t$1, $12\n\t" \
108 "mtc0\t%0, $12\n\t" \
121 extern void __global_sti(void);
122 extern void __global_cli(void);
123 extern unsigned long __global_save_flags(void);
124 extern void __global_restore_flags(unsigned long);
125 # define sti() __global_sti()
126 # define cli() __global_cli()
127 # define save_flags(x) do { x = __global_save_flags(); } while (0)
128 # define restore_flags(x) __global_restore_flags(x)
129 # define save_and_cli(x) do { save_flags(x); cli(); } while(0)
131 #else /* Single processor */
133 # define sti() __sti()
134 # define cli() __cli()
135 # define save_flags(x) __save_flags(x)
136 # define save_and_cli(x) __save_and_cli(x)
137 # define restore_flags(x) __restore_flags(x)
141 /* For spinlocks etc */
142 #define local_irq_save(x) __save_and_cli(x);
143 #define local_irq_restore(x) __restore_flags(x);
144 #define local_irq_disable() __cli();
145 #define local_irq_enable() __sti();
148 * These are probably defined overly paranoid ...
150 #ifdef CONFIG_CPU_HAS_WB
152 #include <asm/wbflush.h>
153 #define rmb() do { } while(0)
154 #define wmb() wbflush()
155 #define mb() wbflush()
157 #else /* CONFIG_CPU_HAS_WB */
160 __asm__ __volatile__( \
161 "# prevent instructions being moved around\n\t" \
162 ".set\tnoreorder\n\t" \
163 "# 8 nops to fool the R4400 pipeline\n\t" \
164 "nop;nop;nop;nop;nop;nop;nop;nop\n\t" \
172 #endif /* CONFIG_CPU_HAS_WB */
175 #define smp_mb() mb()
176 #define smp_rmb() rmb()
177 #define smp_wmb() wmb()
179 #define smp_mb() barrier()
180 #define smp_rmb() barrier()
181 #define smp_wmb() barrier()
184 #define set_mb(var, value) \
185 do { var = value; mb(); } while (0)
187 #define set_wmb(var, value) \
188 do { var = value; wmb(); } while (0)
190 #if !defined (_LANGUAGE_ASSEMBLY)
192 * switch_to(n) should switch tasks to task nr n, first
193 * checking that n isn't the current task, in which case it does nothing.
196 extern asmlinkage
void *resume(void *last
, void *next
);
198 #endif /* !defined (_LANGUAGE_ASSEMBLY) */
200 #define prepare_to_switch() do { } while(0)
201 #define switch_to(prev,next,last) \
203 (last) = resume(prev, next); \
207 * For 32 and 64 bit operands we can take advantage of ll and sc.
208 * FIXME: This doesn't work for R3000 machines.
210 static __inline__
unsigned long xchg_u32(volatile int * m
, unsigned long val
)
212 #ifdef CONFIG_CPU_HAS_LLSC
215 __asm__
__volatile__(
216 ".set\tnoreorder\t\t\t# xchg_u32\n\t"
219 "1:\tmove\t$1, %2\n\t"
225 : "=r" (val
), "=o" (*m
), "=r" (dummy
)
226 : "o" (*m
), "2" (val
)
231 unsigned long flags
, retval
;
237 restore_flags(flags
);
239 #endif /* Processor-dependent optimization */
242 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
243 #define tas(ptr) (xchg((ptr),1))
245 static __inline__
unsigned long
246 __xchg(unsigned long x
, volatile void * ptr
, int size
)
250 return xchg_u32(ptr
, x
);
255 extern void *set_except_vector(int n
, void *addr
);
257 extern void __die(const char *, struct pt_regs
*, const char *where
,
258 unsigned long line
) __attribute__((noreturn
));
259 extern void __die_if_kernel(const char *, struct pt_regs
*, const char *where
,
262 #define die(msg, regs) \
263 __die(msg, regs, __FILE__ ":"__FUNCTION__, __LINE__)
264 #define die_if_kernel(msg, regs) \
265 __die_if_kernel(msg, regs, __FILE__ ":"__FUNCTION__, __LINE__)
267 static inline void execution_hazard_barrier(void)
269 __asm__
__volatile__(
275 static inline void instruction_hazard_barrier(void)
280 __stringify(PTR_LA
) "\t%0, 1f\n"
286 #endif /* _ASM_SYSTEM_H */