Skip to content

Commit

Permalink
cleanup of illumos compatibility atomics
Browse files Browse the repository at this point in the history
atomic_cas_32 is implemented using atomic_fcmpset_32 on all platforms.
Ditto for atomic_cas_64 and atomic_fcmpset_64 on platforms that have it.
The only exception is sparc64 that provides MD atomic_cas_32 and
atomic_cas_64.
This is slightly inefficient as fcmpset reports whether the operation
updated the target and that information is not needed for cas.
Nevertheless, there is less code to maintain and to add for new platforms.
Also, the operations are done inline now as opposed to function calls before.

atomic_add_64_nv is implemented using atomic_fetchadd_64 on platforms
that provide it.

casptr, cas32, atomic_or_8, atomic_or_8_nv are completely removed as they
have no users.

atomic_mtx that is used to emulate 64-bit atomics on platforms that lack
them is defined only on those platforms.

As a result, platform specific opensolaris_atomic.S files have lost most of
their code.  The only exception is i386 where the compat+contrib code
provides 64-bit atomics for userland use.  That code assumes availability of
cmpxchg8b instruction.  FreeBSD does not have that assumption for i386
userland and does not provide 64-bit atomics.  Hopefully, this can and will
be fixed.

MFC after:	3 weeks
  • Loading branch information
avg-I committed Oct 9, 2019
1 parent 6bf933c commit e9642c2
Show file tree
Hide file tree
Showing 7 changed files with 38 additions and 290 deletions.
41 changes: 4 additions & 37 deletions sys/cddl/compat/opensolaris/kern/opensolaris_atomic.c
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,9 @@ __FBSDID("$FreeBSD$");
#include <sys/mutex.h>
#include <sys/atomic.h>

#if !defined(__LP64__) && !defined(__mips_n32) && \
!defined(ARM_HAVE_ATOMIC64) && !defined(I386_HAVE_ATOMIC64)

#ifdef _KERNEL
#include <sys/kernel.h>

Expand All @@ -52,8 +55,6 @@ atomic_init(void)
}
#endif

#if !defined(__LP64__) && !defined(__mips_n32) && \
!defined(ARM_HAVE_ATOMIC64) && !defined(I386_HAVE_ATOMIC64)
void
atomic_add_64(volatile uint64_t *target, int64_t delta)
{
Expand Down Expand Up @@ -94,7 +95,6 @@ atomic_load_64(volatile uint64_t *a)
mtx_unlock(&atomic_mtx);
return (ret);
}
#endif

uint64_t
atomic_add_64_nv(volatile uint64_t *target, int64_t delta)
Expand All @@ -107,27 +107,6 @@ atomic_add_64_nv(volatile uint64_t *target, int64_t delta)
return (newval);
}

#if defined(__powerpc__) || defined(__arm__) || defined(__mips__)
void
atomic_or_8(volatile uint8_t *target, uint8_t value)
{
mtx_lock(&atomic_mtx);
*target |= value;
mtx_unlock(&atomic_mtx);
}
#endif

uint8_t
atomic_or_8_nv(volatile uint8_t *target, uint8_t value)
{
uint8_t newval;

mtx_lock(&atomic_mtx);
newval = (*target |= value);
mtx_unlock(&atomic_mtx);
return (newval);
}

uint64_t
atomic_cas_64(volatile uint64_t *target, uint64_t cmp, uint64_t newval)
{
Expand All @@ -140,19 +119,7 @@ atomic_cas_64(volatile uint64_t *target, uint64_t cmp, uint64_t newval)
mtx_unlock(&atomic_mtx);
return (oldval);
}

uint32_t
atomic_cas_32(volatile uint32_t *target, uint32_t cmp, uint32_t newval)
{
uint32_t oldval;

mtx_lock(&atomic_mtx);
oldval = *target;
if (oldval == cmp)
*target = newval;
mtx_unlock(&atomic_mtx);
return (oldval);
}
#endif

void
membar_producer(void)
Expand Down
63 changes: 34 additions & 29 deletions sys/cddl/compat/opensolaris/sys/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,10 +32,6 @@
#include <sys/types.h>
#include <machine/atomic.h>

#define casptr(_a, _b, _c) \
atomic_cmpset_ptr((volatile uintptr_t *)(_a), (uintptr_t)(_b), (uintptr_t) (_c))
#define cas32 atomic_cmpset_32

#if defined(__i386__) && (defined(_KERNEL) || defined(KLD_MODULE))
#define I386_HAVE_ATOMIC64
#endif
Expand All @@ -46,27 +42,12 @@ extern void atomic_add_64(volatile uint64_t *target, int64_t delta);
extern void atomic_dec_64(volatile uint64_t *target);
extern uint64_t atomic_swap_64(volatile uint64_t *a, uint64_t value);
extern uint64_t atomic_load_64(volatile uint64_t *a);
#endif
#ifndef __sparc64__
extern uint32_t atomic_cas_32(volatile uint32_t *target, uint32_t cmp,
uint32_t newval);
extern uint64_t atomic_add_64_nv(volatile uint64_t *target, int64_t delta);
extern uint64_t atomic_cas_64(volatile uint64_t *target, uint64_t cmp,
uint64_t newval);
#endif
extern uint64_t atomic_add_64_nv(volatile uint64_t *target, int64_t delta);
extern uint8_t atomic_or_8_nv(volatile uint8_t *target, uint8_t value);
extern void membar_producer(void);

#if defined(__sparc64__) || defined(__powerpc__) || defined(__arm__) || \
defined(__mips__) || defined(__aarch64__) || defined(__riscv)
extern void atomic_or_8(volatile uint8_t *target, uint8_t value);
#else
static __inline void
atomic_or_8(volatile uint8_t *target, uint8_t value)
{
atomic_set_8(target, value);
}
#endif
extern void membar_producer(void);

static __inline uint32_t
atomic_add_32_nv(volatile uint32_t *target, int32_t delta)
Expand All @@ -80,6 +61,18 @@ atomic_add_int_nv(volatile u_int *target, int delta)
return (atomic_add_32_nv(target, delta));
}

static __inline void
atomic_inc_32(volatile uint32_t *target)
{
atomic_add_32(target, 1);
}

static __inline uint32_t
atomic_inc_32_nv(volatile uint32_t *target)
{
return (atomic_add_32_nv(target, 1));
}

static __inline void
atomic_dec_32(volatile uint32_t *target)
{
Expand All @@ -89,8 +82,17 @@ atomic_dec_32(volatile uint32_t *target)
static __inline uint32_t
atomic_dec_32_nv(volatile uint32_t *target)
{
return (atomic_fetchadd_32(target, -1) - 1);
return (atomic_add_32_nv(target, -1));
}

#ifndef __sparc64__
static inline uint32_t
atomic_cas_32(volatile uint32_t *target, uint32_t cmp, uint32_t newval)
{
(void)atomic_fcmpset_32(target, &cmp, newval);
return (cmp);
}
#endif

#if defined(__LP64__) || defined(__mips_n32) || \
defined(ARM_HAVE_ATOMIC64) || defined(I386_HAVE_ATOMIC64)
Expand All @@ -99,19 +101,22 @@ atomic_dec_64(volatile uint64_t *target)
{
atomic_subtract_64(target, 1);
}
#endif

static __inline void
atomic_inc_32(volatile uint32_t *target)
static inline uint64_t
atomic_add_64_nv(volatile uint64_t *target, int64_t delta)
{
atomic_add_32(target, 1);
return (atomic_fetchadd_64(target, delta) + delta);
}

static __inline uint32_t
atomic_inc_32_nv(volatile uint32_t *target)
#ifndef __sparc64__
static inline uint64_t
atomic_cas_64(volatile uint64_t *target, uint64_t cmp, uint64_t newval)
{
return (atomic_add_32_nv(target, 1));
(void)atomic_fcmpset_64(target, &cmp, newval);
return (cmp);
}
#endif
#endif

static __inline void
atomic_inc_64(volatile uint64_t *target)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,58 +28,6 @@

#include <machine/asm.h>

/*
* uint64_t atomic_add_64_nv(volatile uint64_t *target, int64_t delta)
*/
ENTRY(atomic_add_64_nv)
1: ldxr x2, [x0] /* Load *target */
add x2, x2, x1 /* x2 = x2 + delta */
stxr w3, x2, [x0] /* Store *target */
cbnz w3, 1b /* Check if the store succeeded */
mov x0, x2 /* Return the new value */
ret
END(atomic_add_64_nv)

/*
* uint32_t
* atomic_cas_32(volatile uint32_t *target, uint32_t cmp, uint32_t newval)
*/
ENTRY(atomic_cas_32)
1: ldxr w3, [x0] /* Load *target */
cmp w3, w1 /* Does *targe == cmp? */
b.ne 2f /* If not exit */
stxr w4, w2, [x0] /* Store newval to *target */
cbnz w4, 1b /* Check if the store succeeded */
2: mov w0, w3 /* Return the old value */
ret
END(atomic_cas_32)

/*
* uint64_t
* atomic_cas_64(volatile uint64_t *target, uint64_t cmp, uint64_t newval)
*/
ENTRY(atomic_cas_64)
1: ldxr x3, [x0] /* Load *target */
cmp x3, x1 /* Does *targe == cmp? */
b.ne 2f /* If not exit */
stxr w4, x2, [x0] /* Store newval to *target */
cbnz w4, 1b /* Check if the store succeeded */
2: mov x0, x3 /* Return the old value */
ret
END(atomic_cas_64)

/*
* uint8_t atomic_or_8_nv(volatile uint8_t *target, uint8_t value)
*/
ENTRY(atomic_or_8_nv)
1: ldxrb w2, [x0] /* Load *target */
orr w2, w2, w1 /* x2 = x2 | delta */
stxrb w3, w2, [x0] /* Store *target */
cbnz w3, 1b /* Check if the store succeeded */
mov w0, w2 /* Return the new value */
ret
END(atomic_or_8_nv)

ENTRY(membar_producer)
dmb ish
ret
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,40 +28,6 @@
#define _ASM
#include <sys/asm_linkage.h>

ENTRY(atomic_add_64_nv)
mov %rsi, %rax // %rax = delta addend
lock
xaddq %rsi, (%rdi) // %rsi = old value, (%rdi) = sum
addq %rsi, %rax // new value = original value + delta
ret
SET_SIZE(atomic_add_64_nv)

ENTRY(atomic_or_8_nv)
movb (%rdi), %al // %al = old value
1:
movb %sil, %cl
orb %al, %cl // %cl = new value
lock
cmpxchgb %cl, (%rdi) // try to stick it in
jne 1b
movzbl %cl, %eax // return new value
ret
SET_SIZE(atomic_or_8_nv)

ENTRY(atomic_cas_32)
movl %esi, %eax
lock
cmpxchgl %edx, (%rdi)
ret
SET_SIZE(atomic_cas_32)

ENTRY(atomic_cas_64)
movq %rsi, %rax
lock
cmpxchgq %rdx, (%rdi)
ret
SET_SIZE(atomic_cas_64)

ENTRY(membar_producer)
sfence
ret
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,28 +89,6 @@
SET_SIZE(atomic_add_64_nv)
SET_SIZE(atomic_add_64)

ENTRY(atomic_or_8_nv)
movl 4(%esp), %edx // %edx = target address
movb (%edx), %al // %al = old value
1:
movl 8(%esp), %ecx // %ecx = delta
orb %al, %cl // %cl = new value
lock
cmpxchgb %cl, (%edx) // try to stick it in
jne 1b
movzbl %cl, %eax // return new value
ret
SET_SIZE(atomic_or_8_nv)

ENTRY(atomic_cas_32)
movl 4(%esp), %edx
movl 8(%esp), %eax
movl 12(%esp), %ecx
lock
cmpxchgl %ecx, (%edx)
ret
SET_SIZE(atomic_cas_32)

ENTRY(atomic_cas_64)
pushl %ebx
pushl %esi
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,61 +27,6 @@

#include <machine/asm.h>

ENTRY(atomic_add_64_nv)
1: ldarx %r5,0,%r3
add %r5,%r4,%r5
stdcx. %r5,0,%r3
bne- 1b

mr %r3,%r5
blr

ENTRY(atomic_cas_32)
1: lwarx %r6,0,%r3
cmplw %r6,%r4
bne 2f
stwcx. %r5,0,%r3
bne- 1b
b 3f

2: stwcx. %r6,0,%r3 /* clear reservation */

3: mr %r3,%r6
blr

ENTRY(atomic_cas_64)
1: ldarx %r6,0,%r3
cmpld %r6,%r4
bne 2f
stdcx. %r5,0,%r3
bne- 1b
b 3f

2: stdcx. %r6,0,%r3 /* clear reservation */

3: mr %r3,%r6
blr

ENTRY(atomic_or_8_nv)
li %r6,3
andc. %r6,%r3,%r6 /* r6 = r3 & ~4 */
addi %r7,%r6,3
sub %r7,%r7,%r3 /* offset in r7 */
sldi %r7,%r7,3 /* bits to shift in r7 */

rlwinm %r4,%r4,0,24,31 /* mask and rotate the argument */
slw %r4,%r4,%r7

1: lwarx %r5,0,%r6
or %r5,%r4,%r5
stwcx. %r5,0,%r6
bne- 1b

srw %r3,%r5,%r7
rlwinm %r3,%r3,0,24,31 /* mask return value */

blr

ENTRY(membar_producer)
eieio
blr
Expand Down
Loading

0 comments on commit e9642c2

Please sign in to comment.