]> git.ipfire.org Git - people/arne_f/kernel.git/blame - arch/s390/include/asm/atomic_ops.h
License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[people/arne_f/kernel.git] / arch / s390 / include / asm / atomic_ops.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
126b30c3
MS
2/*
3 * Low level function for atomic operations
4 *
5 * Copyright IBM Corp. 1999, 2016
6 */
7
8#ifndef __ARCH_S390_ATOMIC_OPS__
9#define __ARCH_S390_ATOMIC_OPS__
10
11#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
12
13#define __ATOMIC_OP(op_name, op_type, op_string, op_barrier) \
14static inline op_type op_name(op_type val, op_type *ptr) \
15{ \
16 op_type old; \
17 \
18 asm volatile( \
19 op_string " %[old],%[val],%[ptr]\n" \
20 op_barrier \
21 : [old] "=d" (old), [ptr] "+Q" (*ptr) \
22 : [val] "d" (val) : "cc", "memory"); \
23 return old; \
24} \
25
26#define __ATOMIC_OPS(op_name, op_type, op_string) \
27 __ATOMIC_OP(op_name, op_type, op_string, "\n") \
28 __ATOMIC_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
29
30__ATOMIC_OPS(__atomic_add, int, "laa")
31__ATOMIC_OPS(__atomic_and, int, "lan")
32__ATOMIC_OPS(__atomic_or, int, "lao")
33__ATOMIC_OPS(__atomic_xor, int, "lax")
34
35__ATOMIC_OPS(__atomic64_add, long, "laag")
36__ATOMIC_OPS(__atomic64_and, long, "lang")
37__ATOMIC_OPS(__atomic64_or, long, "laog")
38__ATOMIC_OPS(__atomic64_xor, long, "laxg")
39
40#undef __ATOMIC_OPS
41#undef __ATOMIC_OP
42
43static inline void __atomic_add_const(int val, int *ptr)
44{
45 asm volatile(
46 " asi %[ptr],%[val]\n"
47 : [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc");
48}
49
50static inline void __atomic64_add_const(long val, long *ptr)
51{
52 asm volatile(
53 " agsi %[ptr],%[val]\n"
54 : [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc");
55}
56
57#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
58
59#define __ATOMIC_OP(op_name, op_string) \
60static inline int op_name(int val, int *ptr) \
61{ \
62 int old, new; \
63 \
64 asm volatile( \
65 "0: lr %[new],%[old]\n" \
66 op_string " %[new],%[val]\n" \
67 " cs %[old],%[new],%[ptr]\n" \
68 " jl 0b" \
69 : [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\
70 : [val] "d" (val), "0" (*ptr) : "cc", "memory"); \
71 return old; \
72}
73
74#define __ATOMIC_OPS(op_name, op_string) \
75 __ATOMIC_OP(op_name, op_string) \
76 __ATOMIC_OP(op_name##_barrier, op_string)
77
78__ATOMIC_OPS(__atomic_add, "ar")
79__ATOMIC_OPS(__atomic_and, "nr")
80__ATOMIC_OPS(__atomic_or, "or")
81__ATOMIC_OPS(__atomic_xor, "xr")
82
83#undef __ATOMIC_OPS
84
85#define __ATOMIC64_OP(op_name, op_string) \
86static inline long op_name(long val, long *ptr) \
87{ \
88 long old, new; \
89 \
90 asm volatile( \
91 "0: lgr %[new],%[old]\n" \
92 op_string " %[new],%[val]\n" \
93 " csg %[old],%[new],%[ptr]\n" \
94 " jl 0b" \
95 : [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\
96 : [val] "d" (val), "0" (*ptr) : "cc", "memory"); \
97 return old; \
98}
99
100#define __ATOMIC64_OPS(op_name, op_string) \
101 __ATOMIC64_OP(op_name, op_string) \
102 __ATOMIC64_OP(op_name##_barrier, op_string)
103
104__ATOMIC64_OPS(__atomic64_add, "agr")
105__ATOMIC64_OPS(__atomic64_and, "ngr")
106__ATOMIC64_OPS(__atomic64_or, "ogr")
107__ATOMIC64_OPS(__atomic64_xor, "xgr")
108
109#undef __ATOMIC64_OPS
110
111#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
112
113static inline int __atomic_cmpxchg(int *ptr, int old, int new)
114{
02c503ff
MS
115 return __sync_val_compare_and_swap(ptr, old, new);
116}
117
118static inline int __atomic_cmpxchg_bool(int *ptr, int old, int new)
119{
120 return __sync_bool_compare_and_swap(ptr, old, new);
126b30c3
MS
121}
122
123static inline long __atomic64_cmpxchg(long *ptr, long old, long new)
124{
02c503ff
MS
125 return __sync_val_compare_and_swap(ptr, old, new);
126}
127
128static inline long __atomic64_cmpxchg_bool(long *ptr, long old, long new)
129{
130 return __sync_bool_compare_and_swap(ptr, old, new);
126b30c3
MS
131}
132
133#endif /* __ARCH_S390_ATOMIC_OPS__ */