1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
|
/*
* Low level function for atomic operations
*
* Copyright IBM Corp. 1999, 2016
*/
#ifndef __ARCH_S390_ATOMIC_OPS__
#define __ARCH_S390_ATOMIC_OPS__
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
#define __ATOMIC_OP(op_name, op_type, op_string, op_barrier) \
static inline op_type op_name(op_type val, op_type *ptr) \
{ \
op_type old; \
\
asm volatile( \
op_string " %[old],%[val],%[ptr]\n" \
op_barrier \
: [old] "=d" (old), [ptr] "+Q" (*ptr) \
: [val] "d" (val) : "cc", "memory"); \
return old; \
} \
#define __ATOMIC_OPS(op_name, op_type, op_string) \
__ATOMIC_OP(op_name, op_type, op_string, "\n") \
__ATOMIC_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
__ATOMIC_OPS(__atomic_add, int, "laa")
__ATOMIC_OPS(__atomic_and, int, "lan")
__ATOMIC_OPS(__atomic_or, int, "lao")
__ATOMIC_OPS(__atomic_xor, int, "lax")
__ATOMIC_OPS(__atomic64_add, long, "laag")
__ATOMIC_OPS(__atomic64_and, long, "lang")
__ATOMIC_OPS(__atomic64_or, long, "laog")
__ATOMIC_OPS(__atomic64_xor, long, "laxg")
#undef __ATOMIC_OPS
#undef __ATOMIC_OP
static inline void __atomic_add_const(int val, int *ptr)
{
asm volatile(
" asi %[ptr],%[val]\n"
: [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc");
}
static inline void __atomic64_add_const(long val, long *ptr)
{
asm volatile(
" agsi %[ptr],%[val]\n"
: [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc");
}
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
#define __ATOMIC_OP(op_name, op_string) \
static inline int op_name(int val, int *ptr) \
{ \
int old, new; \
\
asm volatile( \
"0: lr %[new],%[old]\n" \
op_string " %[new],%[val]\n" \
" cs %[old],%[new],%[ptr]\n" \
" jl 0b" \
: [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\
: [val] "d" (val), "0" (*ptr) : "cc", "memory"); \
return old; \
}
#define __ATOMIC_OPS(op_name, op_string) \
__ATOMIC_OP(op_name, op_string) \
__ATOMIC_OP(op_name##_barrier, op_string)
__ATOMIC_OPS(__atomic_add, "ar")
__ATOMIC_OPS(__atomic_and, "nr")
__ATOMIC_OPS(__atomic_or, "or")
__ATOMIC_OPS(__atomic_xor, "xr")
#undef __ATOMIC_OPS
#define __ATOMIC64_OP(op_name, op_string) \
static inline long op_name(long val, long *ptr) \
{ \
long old, new; \
\
asm volatile( \
"0: lgr %[new],%[old]\n" \
op_string " %[new],%[val]\n" \
" csg %[old],%[new],%[ptr]\n" \
" jl 0b" \
: [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\
: [val] "d" (val), "0" (*ptr) : "cc", "memory"); \
return old; \
}
#define __ATOMIC64_OPS(op_name, op_string) \
__ATOMIC64_OP(op_name, op_string) \
__ATOMIC64_OP(op_name##_barrier, op_string)
__ATOMIC64_OPS(__atomic64_add, "agr")
__ATOMIC64_OPS(__atomic64_and, "ngr")
__ATOMIC64_OPS(__atomic64_or, "ogr")
__ATOMIC64_OPS(__atomic64_xor, "xgr")
#undef __ATOMIC64_OPS
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
static inline int __atomic_cmpxchg(int *ptr, int old, int new)
{
return __sync_val_compare_and_swap(ptr, old, new);
}
static inline int __atomic_cmpxchg_bool(int *ptr, int old, int new)
{
return __sync_bool_compare_and_swap(ptr, old, new);
}
static inline long __atomic64_cmpxchg(long *ptr, long old, long new)
{
return __sync_val_compare_and_swap(ptr, old, new);
}
static inline long __atomic64_cmpxchg_bool(long *ptr, long old, long new)
{
return __sync_bool_compare_and_swap(ptr, old, new);
}
#endif /* __ARCH_S390_ATOMIC_OPS__ */
|