Skip to content

Commit

Permalink
arm64: atomics: implement native {relaxed, acquire, release} atomics
Browse files Browse the repository at this point in the history
Commit 654672d ("locking/atomics: Add _{acquire|release|relaxed}()
variants of some atomic operation") introduced a relaxed atomic API to
Linux that maps nicely onto the arm64 memory model, including the new
ARMv8.1 atomic instructions.

This patch hooks up the API to our relaxed atomic instructions, rather
than have them all expand to the full-barrier variants as they do
currently.

Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
  • Loading branch information
wildea01 authored and ctmarinas committed Oct 12, 2015
1 parent e8f3010 commit 305d454
Show file tree
Hide file tree
Showing 4 changed files with 371 additions and 262 deletions.
63 changes: 59 additions & 4 deletions arch/arm64/include/asm/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -55,13 +55,42 @@

#define atomic_read(v) READ_ONCE((v)->counter)
#define atomic_set(v, i) (((v)->counter) = (i))

#define atomic_add_return_relaxed atomic_add_return_relaxed
#define atomic_add_return_acquire atomic_add_return_acquire
#define atomic_add_return_release atomic_add_return_release
#define atomic_add_return atomic_add_return

#define atomic_inc_return_relaxed(v) atomic_add_return_relaxed(1, (v))
#define atomic_inc_return_acquire(v) atomic_add_return_acquire(1, (v))
#define atomic_inc_return_release(v) atomic_add_return_release(1, (v))
#define atomic_inc_return(v) atomic_add_return(1, (v))

#define atomic_sub_return_relaxed atomic_sub_return_relaxed
#define atomic_sub_return_acquire atomic_sub_return_acquire
#define atomic_sub_return_release atomic_sub_return_release
#define atomic_sub_return atomic_sub_return

#define atomic_dec_return_relaxed(v) atomic_sub_return_relaxed(1, (v))
#define atomic_dec_return_acquire(v) atomic_sub_return_acquire(1, (v))
#define atomic_dec_return_release(v) atomic_sub_return_release(1, (v))
#define atomic_dec_return(v) atomic_sub_return(1, (v))

#define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
#define atomic_xchg_acquire(v, new) xchg_acquire(&((v)->counter), (new))
#define atomic_xchg_release(v, new) xchg_release(&((v)->counter), (new))
#define atomic_xchg(v, new) xchg(&((v)->counter), (new))

#define atomic_cmpxchg_relaxed(v, old, new) \
cmpxchg_relaxed(&((v)->counter), (old), (new))
#define atomic_cmpxchg_acquire(v, old, new) \
cmpxchg_acquire(&((v)->counter), (old), (new))
#define atomic_cmpxchg_release(v, old, new) \
cmpxchg_release(&((v)->counter), (old), (new))
#define atomic_cmpxchg(v, old, new) cmpxchg(&((v)->counter), (old), (new))

#define atomic_inc(v) atomic_add(1, (v))
#define atomic_dec(v) atomic_sub(1, (v))
#define atomic_inc_return(v) atomic_add_return(1, (v))
#define atomic_dec_return(v) atomic_sub_return(1, (v))
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
Expand All @@ -75,13 +104,39 @@
#define ATOMIC64_INIT ATOMIC_INIT
#define atomic64_read atomic_read
#define atomic64_set atomic_set

#define atomic64_add_return_relaxed atomic64_add_return_relaxed
#define atomic64_add_return_acquire atomic64_add_return_acquire
#define atomic64_add_return_release atomic64_add_return_release
#define atomic64_add_return atomic64_add_return

#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1, (v))
#define atomic64_inc_return_acquire(v) atomic64_add_return_acquire(1, (v))
#define atomic64_inc_return_release(v) atomic64_add_return_release(1, (v))
#define atomic64_inc_return(v) atomic64_add_return(1, (v))

#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
#define atomic64_sub_return_acquire atomic64_sub_return_acquire
#define atomic64_sub_return_release atomic64_sub_return_release
#define atomic64_sub_return atomic64_sub_return

#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1, (v))
#define atomic64_dec_return_acquire(v) atomic64_sub_return_acquire(1, (v))
#define atomic64_dec_return_release(v) atomic64_sub_return_release(1, (v))
#define atomic64_dec_return(v) atomic64_sub_return(1, (v))

#define atomic64_xchg_relaxed atomic_xchg_relaxed
#define atomic64_xchg_acquire atomic_xchg_acquire
#define atomic64_xchg_release atomic_xchg_release
#define atomic64_xchg atomic_xchg

#define atomic64_cmpxchg_relaxed atomic_cmpxchg_relaxed
#define atomic64_cmpxchg_acquire atomic_cmpxchg_acquire
#define atomic64_cmpxchg_release atomic_cmpxchg_release
#define atomic64_cmpxchg atomic_cmpxchg

#define atomic64_inc(v) atomic64_add(1, (v))
#define atomic64_dec(v) atomic64_sub(1, (v))
#define atomic64_inc_return(v) atomic64_add_return(1, (v))
#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
Expand Down
98 changes: 60 additions & 38 deletions arch/arm64/include/asm/atomic_ll_sc.h
Original file line number Diff line number Diff line change
Expand Up @@ -55,40 +55,47 @@ __LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \
} \
__LL_SC_EXPORT(atomic_##op);

#define ATOMIC_OP_RETURN(op, asm_op) \
#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \
__LL_SC_INLINE int \
__LL_SC_PREFIX(atomic_##op##_return(int i, atomic_t *v)) \
__LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \
{ \
unsigned long tmp; \
int result; \
\
asm volatile("// atomic_" #op "_return\n" \
asm volatile("// atomic_" #op "_return" #name "\n" \
" prfm pstl1strm, %2\n" \
"1: ldxr %w0, %2\n" \
"1: ld" #acq "xr %w0, %2\n" \
" " #asm_op " %w0, %w0, %w3\n" \
" stlxr %w1, %w0, %2\n" \
" cbnz %w1, 1b" \
" st" #rel "xr %w1, %w0, %2\n" \
" cbnz %w1, 1b\n" \
" " #mb \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: "Ir" (i) \
: "memory"); \
: cl); \
\
smp_mb(); \
return result; \
} \
__LL_SC_EXPORT(atomic_##op##_return);
__LL_SC_EXPORT(atomic_##op##_return##name);

#define ATOMIC_OPS(...) \
ATOMIC_OP(__VA_ARGS__) \
ATOMIC_OP_RETURN( , dmb ish, , l, "memory", __VA_ARGS__)

#define ATOMIC_OPS(op, asm_op) \
ATOMIC_OP(op, asm_op) \
ATOMIC_OP_RETURN(op, asm_op)
#define ATOMIC_OPS_RLX(...) \
ATOMIC_OPS(__VA_ARGS__) \
ATOMIC_OP_RETURN(_relaxed, , , , , __VA_ARGS__)\
ATOMIC_OP_RETURN(_acquire, , a, , "memory", __VA_ARGS__)\
ATOMIC_OP_RETURN(_release, , , l, "memory", __VA_ARGS__)

ATOMIC_OPS(add, add)
ATOMIC_OPS(sub, sub)
ATOMIC_OPS_RLX(add, add)
ATOMIC_OPS_RLX(sub, sub)

ATOMIC_OP(and, and)
ATOMIC_OP(andnot, bic)
ATOMIC_OP(or, orr)
ATOMIC_OP(xor, eor)

#undef ATOMIC_OPS_RLX
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
Expand All @@ -111,40 +118,47 @@ __LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \
} \
__LL_SC_EXPORT(atomic64_##op);

#define ATOMIC64_OP_RETURN(op, asm_op) \
#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \
__LL_SC_INLINE long \
__LL_SC_PREFIX(atomic64_##op##_return(long i, atomic64_t *v)) \
__LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \
{ \
long result; \
unsigned long tmp; \
\
asm volatile("// atomic64_" #op "_return\n" \
asm volatile("// atomic64_" #op "_return" #name "\n" \
" prfm pstl1strm, %2\n" \
"1: ldxr %0, %2\n" \
"1: ld" #acq "xr %0, %2\n" \
" " #asm_op " %0, %0, %3\n" \
" stlxr %w1, %0, %2\n" \
" cbnz %w1, 1b" \
" st" #rel "xr %w1, %0, %2\n" \
" cbnz %w1, 1b\n" \
" " #mb \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: "Ir" (i) \
: "memory"); \
: cl); \
\
smp_mb(); \
return result; \
} \
__LL_SC_EXPORT(atomic64_##op##_return);
__LL_SC_EXPORT(atomic64_##op##_return##name);

#define ATOMIC64_OPS(...) \
ATOMIC64_OP(__VA_ARGS__) \
ATOMIC64_OP_RETURN(, dmb ish, , l, "memory", __VA_ARGS__)

#define ATOMIC64_OPS(op, asm_op) \
ATOMIC64_OP(op, asm_op) \
ATOMIC64_OP_RETURN(op, asm_op)
#define ATOMIC64_OPS_RLX(...) \
ATOMIC64_OPS(__VA_ARGS__) \
ATOMIC64_OP_RETURN(_relaxed,, , , , __VA_ARGS__) \
ATOMIC64_OP_RETURN(_acquire,, a, , "memory", __VA_ARGS__) \
ATOMIC64_OP_RETURN(_release,, , l, "memory", __VA_ARGS__)

ATOMIC64_OPS(add, add)
ATOMIC64_OPS(sub, sub)
ATOMIC64_OPS_RLX(add, add)
ATOMIC64_OPS_RLX(sub, sub)

ATOMIC64_OP(and, and)
ATOMIC64_OP(andnot, bic)
ATOMIC64_OP(or, orr)
ATOMIC64_OP(xor, eor)

#undef ATOMIC64_OPS_RLX
#undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
Expand Down Expand Up @@ -172,7 +186,7 @@ __LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
}
__LL_SC_EXPORT(atomic64_dec_if_positive);

#define __CMPXCHG_CASE(w, sz, name, mb, rel, cl) \
#define __CMPXCHG_CASE(w, sz, name, mb, acq, rel, cl) \
__LL_SC_INLINE unsigned long \
__LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \
unsigned long old, \
Expand All @@ -182,7 +196,7 @@ __LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \
\
asm volatile( \
" prfm pstl1strm, %[v]\n" \
"1: ldxr" #sz "\t%" #w "[oldval], %[v]\n" \
"1: ld" #acq "xr" #sz "\t%" #w "[oldval], %[v]\n" \
" eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \
" cbnz %" #w "[tmp], 2f\n" \
" st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \
Expand All @@ -199,14 +213,22 @@ __LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \
} \
__LL_SC_EXPORT(__cmpxchg_case_##name);

__CMPXCHG_CASE(w, b, 1, , , )
__CMPXCHG_CASE(w, h, 2, , , )
__CMPXCHG_CASE(w, , 4, , , )
__CMPXCHG_CASE( , , 8, , , )
__CMPXCHG_CASE(w, b, mb_1, dmb ish, l, "memory")
__CMPXCHG_CASE(w, h, mb_2, dmb ish, l, "memory")
__CMPXCHG_CASE(w, , mb_4, dmb ish, l, "memory")
__CMPXCHG_CASE( , , mb_8, dmb ish, l, "memory")
__CMPXCHG_CASE(w, b, 1, , , , )
__CMPXCHG_CASE(w, h, 2, , , , )
__CMPXCHG_CASE(w, , 4, , , , )
__CMPXCHG_CASE( , , 8, , , , )
__CMPXCHG_CASE(w, b, acq_1, , a, , "memory")
__CMPXCHG_CASE(w, h, acq_2, , a, , "memory")
__CMPXCHG_CASE(w, , acq_4, , a, , "memory")
__CMPXCHG_CASE( , , acq_8, , a, , "memory")
__CMPXCHG_CASE(w, b, rel_1, , , l, "memory")
__CMPXCHG_CASE(w, h, rel_2, , , l, "memory")
__CMPXCHG_CASE(w, , rel_4, , , l, "memory")
__CMPXCHG_CASE( , , rel_8, , , l, "memory")
__CMPXCHG_CASE(w, b, mb_1, dmb ish, , l, "memory")
__CMPXCHG_CASE(w, h, mb_2, dmb ish, , l, "memory")
__CMPXCHG_CASE(w, , mb_4, dmb ish, , l, "memory")
__CMPXCHG_CASE( , , mb_8, dmb ish, , l, "memory")

#undef __CMPXCHG_CASE

Expand Down
Loading

0 comments on commit 305d454

Please sign in to comment.