1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_CSKY_ATOMIC_H
#define __ASM_CSKY_ATOMIC_H
#ifdef CONFIG_SMP
#include <asm-generic/atomic64.h>
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
#define __atomic_acquire_fence() __bar_brarw()
#define __atomic_release_fence() __bar_brwaw()
static __always_inline int arch_atomic_read(const atomic_t *v)
{
return READ_ONCE(v->counter);
}
static __always_inline void arch_atomic_set(atomic_t *v, int i)
{
WRITE_ONCE(v->counter, i);
}
#define ATOMIC_OP(op) \
static __always_inline \
void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long tmp; \
__asm__ __volatile__ ( \
"1: ldex.w %0, (%2) \n" \
" " #op " %0, %1 \n" \
" stex.w %0, (%2) \n" \
" bez %0, 1b \n" \
: "=&r" (tmp) \
: "r" (i), "r" (&v->counter) \
: "memory"); \
}
ATOMIC_OP(add)
ATOMIC_OP(sub)
ATOMIC_OP(and)
ATOMIC_OP( or)
ATOMIC_OP(xor)
#undef ATOMIC_OP
#define ATOMIC_FETCH_OP(op) \
static __always_inline \
int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
{ \
register int ret, tmp; \
__asm__ __volatile__ ( \
"1: ldex.w %0, (%3) \n" \
" mov %1, %0 \n" \
" " #op " %0, %2 \n" \
" stex.w %0, (%3) \n" \
" bez %0, 1b \n" \
: "=&r" (tmp), "=&r" (ret) \
: "r" (i), "r"(&v->counter) \
: "memory"); \
return ret; \
}
#define ATOMIC_OP_RETURN(op, c_op) \
static __always_inline \
int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
{ \
return arch_atomic_fetch_##op##_relaxed(i, v) c_op i; \
}
#define ATOMIC_OPS(op, c_op) \
ATOMIC_FETCH_OP(op) \
ATOMIC_OP_RETURN(op, c_op)
ATOMIC_OPS(add, +)
ATOMIC_OPS(sub, -)
#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#define ATOMIC_OPS(op) \
ATOMIC_FETCH_OP(op)
ATOMIC_OPS(and)
ATOMIC_OPS( or)
ATOMIC_OPS(xor)
#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#define ATOMIC_OP() \
static __always_inline \
int arch_atomic_xchg_relaxed(atomic_t *v, int n) \
{ \
return __xchg_relaxed(n, &(v->counter), 4); \
} \
static __always_inline \
int arch_atomic_cmpxchg_relaxed(atomic_t *v, int o, int n) \
{ \
return __cmpxchg_relaxed(&(v->counter), o, n, 4); \
} \
static __always_inline \
int arch_atomic_cmpxchg_acquire(atomic_t *v, int o, int n) \
{ \
return __cmpxchg_acquire(&(v->counter), o, n, 4); \
} \
static __always_inline \
int arch_atomic_cmpxchg(atomic_t *v, int o, int n) \
{ \
return __cmpxchg(&(v->counter), o, n, 4); \
}
#define ATOMIC_OPS() \
ATOMIC_OP()
ATOMIC_OPS()
#define arch_atomic_xchg_relaxed arch_atomic_xchg_relaxed
#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed
#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire
#define arch_atomic_cmpxchg arch_atomic_cmpxchg
#undef ATOMIC_OPS
#undef ATOMIC_OP
#else
#include <asm-generic/atomic.h>
#endif
#endif /* __ASM_CSKY_ATOMIC_H */
|