diff --git a/Include/internal/pycore_atomic.h b/Include/internal/pycore_atomic.h index 48d246ea08f3d98..22ce971a64f3dfd 100644 --- a/Include/internal/pycore_atomic.h +++ b/Include/internal/pycore_atomic.h @@ -1,5 +1,5 @@ -#ifndef Py_ATOMIC_H -#define Py_ATOMIC_H +#ifndef Py_INTERNAL_ATOMIC_H +#define Py_INTERNAL_ATOMIC_H #ifdef __cplusplus extern "C" { #endif @@ -554,4 +554,4 @@ typedef struct _Py_atomic_int { #ifdef __cplusplus } #endif -#endif /* Py_ATOMIC_H */ +#endif /* Py_INTERNAL_ATOMIC_H */ diff --git a/Include/pyatomic.h b/Include/pyatomic.h new file mode 100644 index 000000000000000..b8ad93562eb024a --- /dev/null +++ b/Include/pyatomic.h @@ -0,0 +1,372 @@ +#ifndef Py_ATOMIC_H +#define Py_ATOMIC_H + +static inline int +_Py_atomic_add_int(volatile int *address, int value); + +static inline int8_t +_Py_atomic_add_int8(volatile int8_t *address, int8_t value); + +static inline int16_t +_Py_atomic_add_int16(volatile int16_t *address, int16_t value); + +static inline int32_t +_Py_atomic_add_int32(volatile int32_t *address, int32_t value); + +static inline int64_t +_Py_atomic_add_int64(volatile int64_t *address, int64_t value); + +static inline intptr_t +_Py_atomic_add_intptr(volatile intptr_t *address, intptr_t value); + +static inline unsigned int +_Py_atomic_add_uint(volatile unsigned int *address, unsigned int value); + +static inline uint8_t +_Py_atomic_add_uint8(volatile uint8_t *address, uint8_t value); + +static inline uint16_t +_Py_atomic_add_uint16(volatile uint16_t *address, uint16_t value); + +static inline uint32_t +_Py_atomic_add_uint32(volatile uint32_t *address, uint32_t value); + +static inline uint64_t +_Py_atomic_add_uint64(volatile uint64_t *address, uint64_t value); + +static inline uintptr_t +_Py_atomic_add_uintptr(volatile uintptr_t *address, uintptr_t value); + +static inline Py_ssize_t +_Py_atomic_add_ssize(volatile Py_ssize_t *address, Py_ssize_t value); + + +static inline int +_Py_atomic_compare_exchange_int(volatile int *address, int expected, int value); + +static inline int +_Py_atomic_compare_exchange_int8(volatile int8_t *address, int8_t expected, int8_t value); + +static inline int +_Py_atomic_compare_exchange_int16(volatile int16_t *address, int16_t expected, int16_t value); + +static inline int +_Py_atomic_compare_exchange_int32(volatile int32_t *address, int32_t expected, int32_t value); + +static inline int +_Py_atomic_compare_exchange_int64(volatile int64_t *address, int64_t expected, int64_t value); + +static inline int +_Py_atomic_compare_exchange_intptr(volatile intptr_t *address, intptr_t expected, intptr_t value); + +static inline int +_Py_atomic_compare_exchange_uint(volatile unsigned int *address, unsigned int expected, unsigned int value); + +static inline int +_Py_atomic_compare_exchange_uint8(volatile uint8_t *address, uint8_t expected, uint8_t value); + +static inline int +_Py_atomic_compare_exchange_uint16(volatile uint16_t *address, uint16_t expected, uint16_t value); + +static inline int +_Py_atomic_compare_exchange_uint32(volatile uint32_t *address, uint32_t expected, uint32_t value); + +static inline int +_Py_atomic_compare_exchange_uint64(volatile uint64_t *address, uint64_t expected, uint64_t value); + +static inline int +_Py_atomic_compare_exchange_uintptr(volatile uintptr_t *address, uintptr_t expected, uintptr_t value); + +static inline int +_Py_atomic_compare_exchange_ssize(volatile Py_ssize_t *address, Py_ssize_t expected, Py_ssize_t value); + +static inline int +_Py_atomic_compare_exchange_ptr(volatile void *address, void *expected, void *value); + + +static inline int +_Py_atomic_exchange_int(volatile int *address, int value); + +static inline int8_t +_Py_atomic_exchange_int8(volatile int8_t *address, int8_t value); + +static inline int16_t +_Py_atomic_exchange_int16(volatile int16_t *address, int16_t value); + +static inline int32_t +_Py_atomic_exchange_int32(volatile int32_t *address, int32_t value); + +static inline int64_t +_Py_atomic_exchange_int64(volatile int64_t *address, int64_t value); + +static inline intptr_t +_Py_atomic_exchange_intptr(volatile intptr_t *address, intptr_t value); + +static inline unsigned int +_Py_atomic_exchange_uint(volatile unsigned int *address, unsigned int value); + +static inline uint8_t +_Py_atomic_exchange_uint8(volatile uint8_t *address, uint8_t value); + +static inline uint16_t +_Py_atomic_exchange_uint16(volatile uint16_t *address, uint16_t value); + +static inline uint32_t +_Py_atomic_exchange_uint32(volatile uint32_t *address, uint32_t value); + +static inline uint64_t +_Py_atomic_exchange_uint64(volatile uint64_t *address, uint64_t value); + +static inline uintptr_t +_Py_atomic_exchange_uintptr(volatile uintptr_t *address, uintptr_t value); + +static inline Py_ssize_t +_Py_atomic_exchange_ssize(volatile Py_ssize_t *address, Py_ssize_t value); + +static inline void * +_Py_atomic_exchange_ptr(volatile void *address, void *value); + + +static inline uint8_t +_Py_atomic_and_uint8(volatile uint8_t *address, uint8_t value); + +static inline uint16_t +_Py_atomic_and_uint16(volatile uint16_t *address, uint16_t value); + +static inline uint32_t +_Py_atomic_and_uint32(volatile uint32_t *address, uint32_t value); + +static inline uint64_t +_Py_atomic_and_uint64(volatile uint64_t *address, uint64_t value); + +static inline uintptr_t +_Py_atomic_and_uintptr(volatile uintptr_t *address, uintptr_t value); + + +static inline uint8_t +_Py_atomic_or_uint8(volatile uint8_t *address, uint8_t value); + +static inline uint16_t +_Py_atomic_or_uint16(volatile uint16_t *address, uint16_t value); + +static inline uint32_t +_Py_atomic_or_uint32(volatile uint32_t *address, uint32_t value); + +static inline uint64_t +_Py_atomic_or_uint64(volatile uint64_t *address, uint64_t value); + +static inline uintptr_t +_Py_atomic_or_uintptr(volatile uintptr_t *address, uintptr_t value); + + +static inline int +_Py_atomic_load_int(const volatile int *address); + +static inline int8_t +_Py_atomic_load_int8(const volatile int8_t *address); + +static inline int16_t +_Py_atomic_load_int16(const volatile int16_t *address); + +static inline int32_t +_Py_atomic_load_int32(const volatile int32_t *address); + +static inline int64_t +_Py_atomic_load_int64(const volatile int64_t *address); + +static inline intptr_t +_Py_atomic_load_intptr(const volatile intptr_t *address); + +static inline uint8_t +_Py_atomic_load_uint8(const volatile uint8_t *address); + +static inline uint16_t +_Py_atomic_load_uint16(const volatile uint16_t *address); + +static inline uint32_t +_Py_atomic_load_uint32(const volatile uint32_t *address); + +static inline uint64_t +_Py_atomic_load_uint64(const volatile uint64_t *address); + +static inline uintptr_t +_Py_atomic_load_uintptr(const volatile uintptr_t *address); + +static inline unsigned int +_Py_atomic_load_uint(const volatile unsigned int *address); + +static inline Py_ssize_t +_Py_atomic_load_ssize(const volatile Py_ssize_t *address); + +static inline void * +_Py_atomic_load_ptr(const volatile void *address); + + +static inline int +_Py_atomic_load_int_relaxed(const volatile int *address); + +static inline int8_t +_Py_atomic_load_int8_relaxed(const volatile int8_t *address); + +static inline int16_t +_Py_atomic_load_int16_relaxed(const volatile int16_t *address); + +static inline int32_t +_Py_atomic_load_int32_relaxed(const volatile int32_t *address); + +static inline int64_t +_Py_atomic_load_int64_relaxed(const volatile int64_t *address); + +static inline intptr_t +_Py_atomic_load_intptr_relaxed(const volatile intptr_t *address); + +static inline uint8_t +_Py_atomic_load_uint8_relaxed(const volatile uint8_t *address); + +static inline uint16_t +_Py_atomic_load_uint16_relaxed(const volatile uint16_t *address); + +static inline uint32_t +_Py_atomic_load_uint32_relaxed(const volatile uint32_t *address); + +static inline uint64_t +_Py_atomic_load_uint64_relaxed(const volatile uint64_t *address); + +static inline uintptr_t +_Py_atomic_load_uintptr_relaxed(const volatile uintptr_t *address); + +static inline unsigned int +_Py_atomic_load_uint_relaxed(const volatile unsigned int *address); + +static inline Py_ssize_t +_Py_atomic_load_ssize_relaxed(const volatile Py_ssize_t *address); + +static inline void * +_Py_atomic_load_ptr_relaxed(const volatile void *address); + + +static inline void +_Py_atomic_store_int(volatile int *address, int value); + +static inline void +_Py_atomic_store_int8(volatile int8_t *address, int8_t value); + +static inline void +_Py_atomic_store_int16(volatile int16_t *address, int16_t value); + +static inline void +_Py_atomic_store_int32(volatile int32_t *address, int32_t value); + +static inline void +_Py_atomic_store_int64(volatile int64_t *address, int64_t value); + +static inline void +_Py_atomic_store_intptr(volatile intptr_t *address, intptr_t value); + +static inline void +_Py_atomic_store_uint8(volatile uint8_t *address, uint8_t value); + +static inline void +_Py_atomic_store_uint16(volatile uint16_t *address, uint16_t value); + +static inline void +_Py_atomic_store_uint32(volatile uint32_t *address, uint32_t value); + +static inline void +_Py_atomic_store_uint64(volatile uint64_t *address, uint64_t value); + +static inline void +_Py_atomic_store_uintptr(volatile uintptr_t *address, uintptr_t value); + +static inline void +_Py_atomic_store_uint(volatile unsigned int *address, unsigned int value); + +static inline void +_Py_atomic_store_ptr(volatile void *address, void *value); + +static inline void +_Py_atomic_store_ssize(volatile Py_ssize_t* address, Py_ssize_t value); + + +static inline void +_Py_atomic_store_int_relaxed(volatile int *address, int value); + +static inline void +_Py_atomic_store_int8_relaxed(volatile int8_t *address, int8_t value); + +static inline void +_Py_atomic_store_int16_relaxed(volatile int16_t *address, int16_t value); + +static inline void +_Py_atomic_store_int32_relaxed(volatile int32_t *address, int32_t value); + +static inline void +_Py_atomic_store_int64_relaxed(volatile int64_t *address, int64_t value); + +static inline void +_Py_atomic_store_intptr_relaxed(volatile intptr_t *address, intptr_t value); + +static inline void +_Py_atomic_store_uint8_relaxed(volatile uint8_t* address, uint8_t value); + +static inline void +_Py_atomic_store_uint16_relaxed(volatile uint16_t *address, uint16_t value); + +static inline void +_Py_atomic_store_uint32_relaxed(volatile uint32_t *address, uint32_t value); + +static inline void +_Py_atomic_store_uint64_relaxed(volatile uint64_t *address, uint64_t value); + +static inline void +_Py_atomic_store_uintptr_relaxed(volatile uintptr_t *address, uintptr_t value); + +static inline void +_Py_atomic_store_uint_relaxed(volatile unsigned int *address, unsigned int value); + +static inline void +_Py_atomic_store_ptr_relaxed(volatile void *address, void *value); + +static inline void +_Py_atomic_store_ssize_relaxed(volatile Py_ssize_t *address, Py_ssize_t value); + + +static inline void +_Py_atomic_store_uint64_release(volatile uint64_t *address, uint64_t value); + +static inline void +_Py_atomic_store_ptr_release(volatile void *address, void *value); + + + static inline void +_Py_atomic_fence_seq_cst(void); + + static inline void +_Py_atomic_fence_release(void); + + +#ifndef _Py_USE_GCC_BUILTIN_ATOMICS +#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)) +#define _Py_USE_GCC_BUILTIN_ATOMICS 1 +#elif defined(__clang__) +#if __has_builtin(__atomic_load) +#define _Py_USE_GCC_BUILTIN_ATOMICS 1 +#endif +#endif +#endif + +#if _Py_USE_GCC_BUILTIN_ATOMICS +#define Py_ATOMIC_GCC_H +#include "pyatomic_gcc.h" +#elif __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__) +#define Py_ATOMIC_STD_H +#include "pyatomic_std.h" +#elif defined(_MSC_VER) +#define Py_ATOMIC_MSC_H +#include "pyatomic_msc.h" +#else +#error "define pyatomic for this platform" +#endif + +#endif /* Py_ATOMIC_H */ + diff --git a/Include/pyatomic_gcc.h b/Include/pyatomic_gcc.h new file mode 100644 index 000000000000000..64d917933d12da3 --- /dev/null +++ b/Include/pyatomic_gcc.h @@ -0,0 +1,679 @@ +#ifndef Py_ATOMIC_GCC_H +# error "this header file must not be included directly" +#endif + +// This is the implementation of Python atomic operations using GCC's built-in +// functions that match the C+11 memory model. This implementation is preferred +// for GCC compatible compilers, such as Clang. These functions are available in +// GCC 4.8+ without needing to compile with --std=c11 or --std=gnu11. + +static inline int +_Py_atomic_add_int(volatile int *address, int value) +{ + return __atomic_fetch_add(address, value, __ATOMIC_SEQ_CST); +} + +static inline unsigned int +_Py_atomic_add_uint(volatile unsigned int *address, unsigned int value) +{ + return __atomic_fetch_add(address, value, __ATOMIC_SEQ_CST); +} + +static inline int8_t +_Py_atomic_add_int8(volatile int8_t *address, int8_t value) +{ + return __atomic_fetch_add(address, value, __ATOMIC_SEQ_CST); +} + +static inline int16_t +_Py_atomic_add_int16(volatile int16_t *address, int16_t value) +{ + return __atomic_fetch_add(address, value, __ATOMIC_SEQ_CST); +} + +static inline int32_t +_Py_atomic_add_int32(volatile int32_t *address, int32_t value) +{ + return __atomic_fetch_add(address, value, __ATOMIC_SEQ_CST); +} + +static inline int64_t +_Py_atomic_add_int64(volatile int64_t *address, int64_t value) +{ + return __atomic_fetch_add(address, value, __ATOMIC_SEQ_CST); +} + +static inline intptr_t +_Py_atomic_add_intptr(volatile intptr_t *address, intptr_t value) +{ + return __atomic_fetch_add(address, value, __ATOMIC_SEQ_CST); +} + +static inline uint8_t +_Py_atomic_add_uint8(volatile uint8_t *address, uint8_t value) +{ + return __atomic_fetch_add(address, value, __ATOMIC_SEQ_CST); +} + +static inline uint16_t +_Py_atomic_add_uint16(volatile uint16_t *address, uint16_t value) +{ + return __atomic_fetch_add(address, value, __ATOMIC_SEQ_CST); +} + +static inline uint32_t +_Py_atomic_add_uint32(volatile uint32_t *address, uint32_t value) +{ + return __atomic_fetch_add(address, value, __ATOMIC_SEQ_CST); +} + +static inline uint64_t +_Py_atomic_add_uint64(volatile uint64_t *address, uint64_t value) +{ + return __atomic_fetch_add(address, value, __ATOMIC_SEQ_CST); +} + +static inline uintptr_t +_Py_atomic_add_uintptr(volatile uintptr_t *address, uintptr_t value) +{ + return __atomic_fetch_add(address, value, __ATOMIC_SEQ_CST); +} + +static inline Py_ssize_t +_Py_atomic_add_ssize(volatile Py_ssize_t *address, Py_ssize_t value) +{ + return __atomic_fetch_add(address, value, __ATOMIC_SEQ_CST); +} + + +static inline int +_Py_atomic_compare_exchange_int(volatile int *address, int expected, int value) +{ + return __atomic_compare_exchange_n(address, &expected, value, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); +} + +static inline int +_Py_atomic_compare_exchange_int8(volatile int8_t *address, int8_t expected, int8_t value) +{ + return __atomic_compare_exchange_n(address, &expected, value, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); +} + +static inline int +_Py_atomic_compare_exchange_int16(volatile int16_t *address, int16_t expected, int16_t value) +{ + return __atomic_compare_exchange_n(address, &expected, value, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); +} + +static inline int +_Py_atomic_compare_exchange_int32(volatile int32_t *address, int32_t expected, int32_t value) +{ + return __atomic_compare_exchange_n(address, &expected, value, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); +} + +static inline int +_Py_atomic_compare_exchange_int64(volatile int64_t *address, int64_t expected, int64_t value) +{ + return __atomic_compare_exchange_n(address, &expected, value, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); +} + +static inline int +_Py_atomic_compare_exchange_intptr(volatile intptr_t *address, intptr_t expected, intptr_t value) +{ + return __atomic_compare_exchange_n(address, &expected, value, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); +} + +static inline int +_Py_atomic_compare_exchange_uint(volatile unsigned int *address, unsigned int expected, unsigned int value) +{ + return __atomic_compare_exchange_n(address, &expected, value, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); +} + +static inline int +_Py_atomic_compare_exchange_uint8(volatile uint8_t *address, uint8_t expected, uint8_t value) +{ + return __atomic_compare_exchange_n(address, &expected, value, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); +} + +static inline int +_Py_atomic_compare_exchange_uint16(volatile uint16_t *address, uint16_t expected, uint16_t value) +{ + return __atomic_compare_exchange_n(address, &expected, value, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); +} + +static inline int +_Py_atomic_compare_exchange_uint32(volatile uint32_t *address, uint32_t expected, uint32_t value) +{ + return __atomic_compare_exchange_n(address, &expected, value, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); +} + +static inline int +_Py_atomic_compare_exchange_uint64(volatile uint64_t *address, uint64_t expected, uint64_t value) +{ + return __atomic_compare_exchange_n(address, &expected, value, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); +} + +static inline int +_Py_atomic_compare_exchange_uintptr(volatile uintptr_t *address, uintptr_t expected, uintptr_t value) +{ + return __atomic_compare_exchange_n(address, &expected, value, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); +} + +static inline int +_Py_atomic_compare_exchange_ssize(volatile Py_ssize_t *address, Py_ssize_t expected, Py_ssize_t value) +{ + return __atomic_compare_exchange_n(address, &expected, value, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); +} + +static inline int +_Py_atomic_compare_exchange_ptr(volatile void *address, void *expected, void *value) +{ + volatile void *e = expected; + return __atomic_compare_exchange_n((volatile void **)address, &e, value, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); +} + + +static inline int +_Py_atomic_exchange_int(volatile int *address, int value) +{ + return __atomic_exchange_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline int8_t +_Py_atomic_exchange_int8(volatile int8_t *address, int8_t value) +{ + return __atomic_exchange_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline int16_t +_Py_atomic_exchange_int16(volatile int16_t *address, int16_t value) +{ + return __atomic_exchange_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline int32_t +_Py_atomic_exchange_int32(volatile int32_t *address, int32_t value) +{ + return __atomic_exchange_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline int64_t +_Py_atomic_exchange_int64(volatile int64_t *address, int64_t value) +{ + return __atomic_exchange_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline intptr_t +_Py_atomic_exchange_intptr(volatile intptr_t *address, intptr_t value) +{ + return __atomic_exchange_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline unsigned int +_Py_atomic_exchange_uint(volatile unsigned int *address, unsigned int value) +{ + return __atomic_exchange_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline uint8_t +_Py_atomic_exchange_uint8(volatile uint8_t *address, uint8_t value) +{ + return __atomic_exchange_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline uint16_t +_Py_atomic_exchange_uint16(volatile uint16_t *address, uint16_t value) +{ + return __atomic_exchange_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline uint32_t +_Py_atomic_exchange_uint32(volatile uint32_t *address, uint32_t value) +{ + return __atomic_exchange_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline uint64_t +_Py_atomic_exchange_uint64(volatile uint64_t *address, uint64_t value) +{ + return __atomic_exchange_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline uintptr_t +_Py_atomic_exchange_uintptr(volatile uintptr_t *address, uintptr_t value) +{ + return __atomic_exchange_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline Py_ssize_t +_Py_atomic_exchange_ssize(volatile Py_ssize_t *address, Py_ssize_t value) +{ + return __atomic_exchange_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline void * +_Py_atomic_exchange_ptr(volatile void *address, void *value) +{ + return __atomic_exchange_n((void **)address, value, __ATOMIC_SEQ_CST); +} + +static inline uint8_t +_Py_atomic_and_uint8(volatile uint8_t *address, uint8_t value) +{ + return __atomic_fetch_and(address, value, __ATOMIC_SEQ_CST); +} + +static inline uint16_t +_Py_atomic_and_uint16(volatile uint16_t *address, uint16_t value) +{ + return __atomic_fetch_and(address, value, __ATOMIC_SEQ_CST); +} + +static inline uint32_t +_Py_atomic_and_uint32(volatile uint32_t *address, uint32_t value) +{ + return __atomic_fetch_and(address, value, __ATOMIC_SEQ_CST); +} + +static inline uint64_t +_Py_atomic_and_uint64(volatile uint64_t *address, uint64_t value) +{ + return __atomic_fetch_and(address, value, __ATOMIC_SEQ_CST); +} + +static inline uintptr_t +_Py_atomic_and_uintptr(volatile uintptr_t *address, uintptr_t value) +{ + return __atomic_fetch_and(address, value, __ATOMIC_SEQ_CST); +} + +static inline uint8_t +_Py_atomic_or_uint8(volatile uint8_t *address, uint8_t value) +{ + return __atomic_fetch_or(address, value, __ATOMIC_SEQ_CST); +} + +static inline uint16_t +_Py_atomic_or_uint16(volatile uint16_t *address, uint16_t value) +{ + return __atomic_fetch_or(address, value, __ATOMIC_SEQ_CST); +} + +static inline uint32_t +_Py_atomic_or_uint32(volatile uint32_t *address, uint32_t value) +{ + return __atomic_fetch_or(address, value, __ATOMIC_SEQ_CST); +} + +static inline uint64_t +_Py_atomic_or_uint64(volatile uint64_t *address, uint64_t value) +{ + return __atomic_fetch_or(address, value, __ATOMIC_SEQ_CST); +} + +static inline uintptr_t +_Py_atomic_or_uintptr(volatile uintptr_t *address, uintptr_t value) +{ + return __atomic_fetch_or(address, value, __ATOMIC_SEQ_CST); +} + +static inline int +_Py_atomic_load_int(const volatile int *address) +{ + return __atomic_load_n(address, __ATOMIC_SEQ_CST); +} + +static inline int8_t +_Py_atomic_load_int8(const volatile int8_t *address) +{ + return __atomic_load_n(address, __ATOMIC_SEQ_CST); +} + +static inline int16_t +_Py_atomic_load_int16(const volatile int16_t *address) +{ + return __atomic_load_n(address, __ATOMIC_SEQ_CST); +} + +static inline int32_t +_Py_atomic_load_int32(const volatile int32_t *address) +{ + return __atomic_load_n(address, __ATOMIC_SEQ_CST); +} + +static inline int64_t +_Py_atomic_load_int64(const volatile int64_t *address) +{ + return __atomic_load_n(address, __ATOMIC_SEQ_CST); +} + +static inline intptr_t +_Py_atomic_load_intptr(const volatile intptr_t *address) +{ + return __atomic_load_n(address, __ATOMIC_SEQ_CST); +} + +static inline uint8_t +_Py_atomic_load_uint8(const volatile uint8_t *address) +{ + return __atomic_load_n(address, __ATOMIC_SEQ_CST); +} + +static inline uint16_t +_Py_atomic_load_uint16(const volatile uint16_t *address) +{ + return __atomic_load_n(address, __ATOMIC_SEQ_CST); +} + +static inline uint32_t +_Py_atomic_load_uint32(const volatile uint32_t *address) +{ + return __atomic_load_n(address, __ATOMIC_SEQ_CST); +} + +static inline uint64_t +_Py_atomic_load_uint64(const volatile uint64_t *address) +{ + return __atomic_load_n(address, __ATOMIC_SEQ_CST); +} + +static inline uintptr_t +_Py_atomic_load_uintptr(const volatile uintptr_t *address) +{ + return __atomic_load_n(address, __ATOMIC_SEQ_CST); +} + +static inline unsigned int +_Py_atomic_load_uint(const volatile unsigned int *address) +{ + return __atomic_load_n(address, __ATOMIC_SEQ_CST); +} + +static inline Py_ssize_t +_Py_atomic_load_ssize(const volatile Py_ssize_t *address) +{ + return __atomic_load_n(address, __ATOMIC_SEQ_CST); +} + +static inline void * +_Py_atomic_load_ptr(const volatile void *address) +{ + return (void *)__atomic_load_n((volatile void **)address, __ATOMIC_SEQ_CST); +} + + +static inline int +_Py_atomic_load_int_relaxed(const volatile int *address) +{ + return __atomic_load_n(address, __ATOMIC_RELAXED); +} + +static inline int8_t +_Py_atomic_load_int8_relaxed(const volatile int8_t *address) +{ + return __atomic_load_n(address, __ATOMIC_RELAXED); +} + +static inline int16_t +_Py_atomic_load_int16_relaxed(const volatile int16_t *address) +{ + return __atomic_load_n(address, __ATOMIC_RELAXED); +} + +static inline int32_t +_Py_atomic_load_int32_relaxed(const volatile int32_t *address) +{ + return __atomic_load_n(address, __ATOMIC_RELAXED); +} + +static inline int64_t +_Py_atomic_load_int64_relaxed(const volatile int64_t *address) +{ + return __atomic_load_n(address, __ATOMIC_RELAXED); +} + +static inline intptr_t +_Py_atomic_load_intptr_relaxed(const volatile intptr_t *address) +{ + return __atomic_load_n(address, __ATOMIC_RELAXED); +} + +static inline uint8_t +_Py_atomic_load_uint8_relaxed(const volatile uint8_t *address) +{ + return __atomic_load_n(address, __ATOMIC_RELAXED); +} + +static inline uint16_t +_Py_atomic_load_uint16_relaxed(const volatile uint16_t *address) +{ + return __atomic_load_n(address, __ATOMIC_RELAXED); +} + +static inline uint32_t +_Py_atomic_load_uint32_relaxed(const volatile uint32_t *address) +{ + return __atomic_load_n(address, __ATOMIC_RELAXED); +} + +static inline uint64_t +_Py_atomic_load_uint64_relaxed(const volatile uint64_t *address) +{ + return __atomic_load_n(address, __ATOMIC_RELAXED); +} + +static inline uintptr_t +_Py_atomic_load_uintptr_relaxed(const volatile uintptr_t *address) +{ + return __atomic_load_n(address, __ATOMIC_RELAXED); +} + +static inline unsigned int +_Py_atomic_load_uint_relaxed(const volatile unsigned int *address) +{ + return __atomic_load_n(address, __ATOMIC_RELAXED); +} + +static inline Py_ssize_t +_Py_atomic_load_ssize_relaxed(const volatile Py_ssize_t *address) +{ + return __atomic_load_n(address, __ATOMIC_RELAXED); +} + +static inline void * +_Py_atomic_load_ptr_relaxed(const volatile void *address) +{ + return (void *)__atomic_load_n((const volatile void **)address, __ATOMIC_RELAXED); +} + +static inline void +_Py_atomic_store_int(volatile int *address, int value) +{ + __atomic_store_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline void +_Py_atomic_store_int8(volatile int8_t *address, int8_t value) +{ + __atomic_store_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline void +_Py_atomic_store_int16(volatile int16_t *address, int16_t value) +{ + __atomic_store_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline void +_Py_atomic_store_int32(volatile int32_t *address, int32_t value) +{ + __atomic_store_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline void +_Py_atomic_store_int64(volatile int64_t *address, int64_t value) +{ + __atomic_store_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline void +_Py_atomic_store_intptr(volatile intptr_t *address, intptr_t value) +{ + __atomic_store_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline void +_Py_atomic_store_uint8(volatile uint8_t *address, uint8_t value) +{ + __atomic_store_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline void +_Py_atomic_store_uint16(volatile uint16_t *address, uint16_t value) +{ + __atomic_store_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline void +_Py_atomic_store_uint32(volatile uint32_t *address, uint32_t value) +{ + __atomic_store_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline void +_Py_atomic_store_uint64(volatile uint64_t *address, uint64_t value) +{ + __atomic_store_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline void +_Py_atomic_store_uintptr(volatile uintptr_t *address, uintptr_t value) +{ + __atomic_store_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline void +_Py_atomic_store_uint(volatile unsigned int *address, unsigned int value) +{ + __atomic_store_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline void +_Py_atomic_store_ptr(volatile void *address, void *value) +{ + __atomic_store_n((volatile void **)address, value, __ATOMIC_SEQ_CST); +} + +static inline void +_Py_atomic_store_ssize(volatile Py_ssize_t *address, Py_ssize_t value) +{ + __atomic_store_n(address, value, __ATOMIC_SEQ_CST); +} + +static inline void +_Py_atomic_store_int_relaxed(volatile int *address, int value) +{ + __atomic_store_n(address, value, __ATOMIC_RELAXED); +} + +static inline void +_Py_atomic_store_int8_relaxed(volatile int8_t *address, int8_t value) +{ + __atomic_store_n(address, value, __ATOMIC_RELAXED); +} + +static inline void +_Py_atomic_store_int16_relaxed(volatile int16_t *address, int16_t value) +{ + __atomic_store_n(address, value, __ATOMIC_RELAXED); +} + +static inline void +_Py_atomic_store_int32_relaxed(volatile int32_t *address, int32_t value) +{ + __atomic_store_n(address, value, __ATOMIC_RELAXED); +} + +static inline void +_Py_atomic_store_int64_relaxed(volatile int64_t *address, int64_t value) +{ + __atomic_store_n(address, value, __ATOMIC_RELAXED); +} + +static inline void +_Py_atomic_store_intptr_relaxed(volatile intptr_t *address, intptr_t value) +{ + __atomic_store_n(address, value, __ATOMIC_RELAXED); +} + +static inline void +_Py_atomic_store_uint8_relaxed(volatile uint8_t *address, uint8_t value) +{ + __atomic_store_n(address, value, __ATOMIC_RELAXED); +} + +static inline void +_Py_atomic_store_uint16_relaxed(volatile uint16_t *address, uint16_t value) +{ + __atomic_store_n(address, value, __ATOMIC_RELAXED); +} + +static inline void +_Py_atomic_store_uint32_relaxed(volatile uint32_t *address, uint32_t value) +{ + __atomic_store_n(address, value, __ATOMIC_RELAXED); +} + +static inline void +_Py_atomic_store_uint64_relaxed(volatile uint64_t *address, uint64_t value) +{ + __atomic_store_n(address, value, __ATOMIC_RELAXED); +} + +static inline void +_Py_atomic_store_uintptr_relaxed(volatile uintptr_t *address, uintptr_t value) +{ + __atomic_store_n(address, value, __ATOMIC_RELAXED); +} + +static inline void +_Py_atomic_store_uint_relaxed(volatile unsigned int *address, unsigned int value) +{ + __atomic_store_n(address, value, __ATOMIC_RELAXED); +} + +static inline void +_Py_atomic_store_ptr_relaxed(volatile void *address, void *value) +{ + __atomic_store_n((volatile void **)address, value, __ATOMIC_RELAXED); +} + +static inline void +_Py_atomic_store_ssize_relaxed(volatile Py_ssize_t *address, Py_ssize_t value) +{ + __atomic_store_n(address, value, __ATOMIC_RELAXED); +} + + +static inline void +_Py_atomic_store_uint64_release(volatile uint64_t *address, uint64_t value) +{ + __atomic_store_n(address, value, __ATOMIC_RELEASE); +} + +static inline void +_Py_atomic_store_ptr_release(volatile void *address, void *value) +{ + __atomic_store_n((volatile void **)address, value, __ATOMIC_RELEASE); +} + + static inline void +_Py_atomic_fence_seq_cst(void) +{ + __atomic_thread_fence(__ATOMIC_SEQ_CST); +} + + static inline void +_Py_atomic_fence_release(void) +{ + __atomic_thread_fence(__ATOMIC_RELEASE); +} diff --git a/Include/pyatomic_msc.h b/Include/pyatomic_msc.h new file mode 100644 index 000000000000000..626bc7a84bdbf2c --- /dev/null +++ b/Include/pyatomic_msc.h @@ -0,0 +1,861 @@ +#ifndef Py_ATOMIC_MSC_H +# error "this header file must not be included directly" +#endif + +// This is the implementation of Python atomic operations for MSVC if the +// compiler does not support C11 or C++11 atomics. + +#include + + +static inline int +_Py_atomic_add_int(volatile int *address, int value) +{ + return (int)_InterlockedExchangeAdd((volatile long*)address, (long)value); +} + +static inline unsigned int +_Py_atomic_add_uint(volatile unsigned int *address, unsigned int value) +{ + return (unsigned int)_InterlockedExchangeAdd((volatile long*)address, (long)value); +} + +static inline int8_t +_Py_atomic_add_int8(volatile int8_t *address, int8_t value) +{ + return (int8_t)_InterlockedExchangeAdd8((volatile char*)address, (char)value); +} + +static inline int16_t +_Py_atomic_add_int16(volatile int16_t *address, int16_t value) +{ + return (int16_t)_InterlockedExchangeAdd16((volatile short*)address, (short)value); +} + +static inline int32_t +_Py_atomic_add_int32(volatile int32_t *address, int32_t value) +{ + return (int32_t)_InterlockedExchangeAdd((volatile long*)address, (long)value); +} + +static inline int64_t +_Py_atomic_add_int64(volatile int64_t *address, int64_t value) +{ +#if defined(_M_X64) || defined(_M_ARM64) + return (int64_t)_InterlockedExchangeAdd64((volatile __int64*)address, (__int64)value); +#else + for (;;) { + int64_t old_value = *address; + int64_t new_value = old_value + value; + if (_InterlockedCompareExchange64((volatile __int64*)address, (__int64)new_value, (__int64)old_value)) { + return old_value; + } + } +#endif +} + +static inline intptr_t +_Py_atomic_add_intptr(volatile intptr_t *address, intptr_t value) +{ +#if SIZEOF_VOID_P == 8 + return (intptr_t)_InterlockedExchangeAdd64((volatile __int64*)address, (__int64)value); +#else + return (intptr_t)_InterlockedExchangeAdd((volatile long*)address, (long)value); +#endif +} + +static inline uint8_t +_Py_atomic_add_uint8(volatile uint8_t *address, uint8_t value) +{ + return (uint8_t)_InterlockedExchangeAdd8((volatile char*)address, (char)value); +} + +static inline uint16_t +_Py_atomic_add_uint16(volatile uint16_t *address, uint16_t value) +{ + return (uint16_t)_InterlockedExchangeAdd16((volatile short*)address, (short)value); +} + +static inline uint32_t +_Py_atomic_add_uint32(volatile uint32_t *address, uint32_t value) +{ + return (uint32_t)_InterlockedExchangeAdd((volatile long*)address, (long)value); +} + +static inline uint64_t +_Py_atomic_add_uint64(volatile uint64_t *address, uint64_t value) +{ + return (uint64_t)_Py_atomic_add_int64((volatile int64_t*)address, (int64_t)value); +} + +static inline uintptr_t +_Py_atomic_add_uintptr(volatile uintptr_t *address, uintptr_t value) +{ +#if SIZEOF_VOID_P == 8 + return (uintptr_t)_InterlockedExchangeAdd64((volatile __int64*)address, (__int64)value); +#else + return (uintptr_t)_InterlockedExchangeAdd((volatile long*)address, (long)value); +#endif +} + +static inline Py_ssize_t +_Py_atomic_add_ssize(volatile Py_ssize_t *address, Py_ssize_t value) +{ +#if SIZEOF_SIZE_T == 8 + return (Py_ssize_t)_InterlockedExchangeAdd64((volatile __int64*)address, (__int64)value); +#else + return (Py_ssize_t)_InterlockedExchangeAdd((volatile long*)address, (long)value); +#endif +} + + +static inline int +_Py_atomic_compare_exchange_int(volatile int *address, int expected, int value) +{ + return (long)expected == _InterlockedCompareExchange((volatile long*)address, (long)value, (long)expected); +} + +static inline int +_Py_atomic_compare_exchange_int8(volatile int8_t *address, int8_t expected, int8_t value) +{ + return (char)expected == _InterlockedCompareExchange8((volatile char*)address, (char)value, (char)expected); +} + +static inline int +_Py_atomic_compare_exchange_int16(volatile int16_t *address, int16_t expected, int16_t value) +{ + return (short)expected == _InterlockedCompareExchange16((volatile short*)address, (short)value, (short)expected); +} + +static inline int +_Py_atomic_compare_exchange_int32(volatile int32_t *address, int32_t expected, int32_t value) +{ + return (long)expected == _InterlockedCompareExchange((volatile long*)address, (long)value, (long)expected); +} + +static inline int +_Py_atomic_compare_exchange_int64(volatile int64_t *address, int64_t expected, int64_t value) +{ + return (__int64)expected == _InterlockedCompareExchange64((volatile __int64*)address, (__int64)value, (__int64)expected); +} + +static inline int +_Py_atomic_compare_exchange_intptr(volatile intptr_t *address, intptr_t expected, intptr_t value) +{ + return (void *)expected == _InterlockedCompareExchangePointer((void * volatile *)address, (void *)value, (void *)expected); +} + +static inline int +_Py_atomic_compare_exchange_uint8(volatile uint8_t *address, uint8_t expected, uint8_t value) +{ + return (char)expected == _InterlockedCompareExchange8((volatile char*)address, (char)value, (char)expected); +} + +static inline int +_Py_atomic_compare_exchange_uint16(volatile uint16_t *address, uint16_t expected, uint16_t value) +{ + return (short)expected == _InterlockedCompareExchange16((volatile short*)address, (short)value, (short)expected); +} + +static inline int +_Py_atomic_compare_exchange_uint(volatile unsigned int *address, unsigned int expected, unsigned int value) +{ + return (long)expected == _InterlockedCompareExchange((volatile long*)address, (long)value, (long)expected); +} + +static inline int +_Py_atomic_compare_exchange_uint32(volatile uint32_t *address, uint32_t expected, uint32_t value) +{ + return (long)expected == _InterlockedCompareExchange((volatile long*)address, (long)value, (long)expected); +} + +static inline int +_Py_atomic_compare_exchange_uint64(volatile uint64_t *address, uint64_t expected, uint64_t value) +{ + return (__int64)expected == _InterlockedCompareExchange64((volatile __int64*)address, (__int64)value, (__int64)expected); +} + +static inline int +_Py_atomic_compare_exchange_uintptr(volatile uintptr_t *address, uintptr_t expected, uintptr_t value) +{ + return (void *)expected == _InterlockedCompareExchangePointer((void * volatile *)address, (void *)value, (void *)expected); +} + +static inline int +_Py_atomic_compare_exchange_ssize(volatile Py_ssize_t *address, Py_ssize_t expected, Py_ssize_t value) +{ +#if SIZEOF_SIZE_T == 8 + return (__int64)expected == _InterlockedCompareExchange64((volatile __int64*)address, (__int64)value, (__int64)expected); +#else + return (long)expected == _InterlockedCompareExchange((volatile long*)address, (long)value, (long)expected); +#endif +} + +static inline int +_Py_atomic_compare_exchange_ptr(volatile void *address, void *expected, void *value) +{ + return (void *)expected == _InterlockedCompareExchangePointer((void * volatile *)address, (void *)value, (void *)expected); +} + +static inline int +_Py_atomic_exchange_int(volatile int *address, int value) +{ + return (int)_InterlockedExchange((volatile long*)address, (long)value); +} + +static inline int8_t +_Py_atomic_exchange_int8(volatile int8_t *address, int8_t value) +{ + return (int8_t)_InterlockedExchange8((volatile char*)address, (char)value); +} + +static inline int16_t +_Py_atomic_exchange_int16(volatile int16_t *address, int16_t value) +{ + return (int16_t)_InterlockedExchange16((volatile short*)address, (short)value); +} + +static inline int32_t +_Py_atomic_exchange_int32(volatile int32_t *address, int32_t value) +{ + return (int32_t)_InterlockedExchange((volatile long*)address, (long)value); +} + +static inline int64_t +_Py_atomic_exchange_int64(volatile int64_t *address, int64_t value) +{ +#if defined(_M_X64) || defined(_M_ARM64) + return (int64_t)_InterlockedExchange64((volatile __int64*)address, (__int64)value); +#else + for (;;) { + int64_t old_value = *address; + int64_t new_value = value; + if (_InterlockedCompareExchange64((volatile __int64*)address, (__int64)new_value, (__int64)old_value)) { + return old_value; + } + } +#endif +} + +static inline intptr_t +_Py_atomic_exchange_intptr(volatile intptr_t *address, intptr_t value) +{ + return (intptr_t)_InterlockedExchangePointer((void * volatile *)address, (void *)value); +} + +static inline unsigned int +_Py_atomic_exchange_uint(volatile unsigned int *address, unsigned int value) +{ + return (unsigned int)_InterlockedExchange((volatile long*)address, (long)value); +} + +static inline uint8_t +_Py_atomic_exchange_uint8(volatile uint8_t *address, uint8_t value) +{ + return (uint8_t)_InterlockedExchange8((volatile char*)address, (char)value); +} + +static inline uint16_t +_Py_atomic_exchange_uint16(volatile uint16_t *address, uint16_t value) +{ + return (uint16_t)_InterlockedExchange16((volatile short*)address, (short)value); +} + +static inline uint32_t +_Py_atomic_exchange_uint32(volatile uint32_t *address, uint32_t value) +{ + return (uint32_t)_InterlockedExchange((volatile long*)address, (long)value); +} + +static inline uint64_t +_Py_atomic_exchange_uint64(volatile uint64_t *address, uint64_t value) +{ + return (uint64_t)_Py_atomic_exchange_int64((volatile __int64*)address, (__int64)value); +} + +static inline uintptr_t +_Py_atomic_exchange_uintptr(volatile uintptr_t *address, uintptr_t value) +{ + return (uintptr_t)_InterlockedExchangePointer((void * volatile *)address, (void *)value); +} + +static inline Py_ssize_t +_Py_atomic_exchange_ssize(volatile Py_ssize_t *address, Py_ssize_t value) +{ +#if SIZEOF_SIZE_T == 8 + return (Py_ssize_t)_InterlockedExchange64((volatile __int64*)address, (__int64)value); +#else + return (Py_ssize_t)_InterlockedExchange((volatile long*)address, (long)value); +#endif +} + +static inline void * +_Py_atomic_exchange_ptr(volatile void *address, void *value) +{ + return (void *)_InterlockedExchangePointer((void * volatile *)address, (void *)value); +} + +static inline uint8_t +_Py_atomic_and_uint8(volatile uint8_t *address, uint8_t value) +{ + return (uint8_t)_InterlockedAnd8((volatile char*)address, (char)value); +} + +static inline uint16_t +_Py_atomic_and_uint16(volatile uint16_t *address, uint16_t value) +{ + return (uint16_t)_InterlockedAnd16((volatile short*)address, (short)value); +} + +static inline uint32_t +_Py_atomic_and_uint32(volatile uint32_t *address, uint32_t value) +{ + return (uint32_t)_InterlockedAnd((volatile long*)address, (long)value); +} + +static inline uint64_t +_Py_atomic_and_uint64(volatile uint64_t *address, uint64_t value) +{ +#if defined(_M_X64) || defined(_M_ARM64) + return (uint64_t)_InterlockedAnd64((volatile __int64*)address, (__int64)value); +#else + for (;;) { + uint64_t old_value = *address; + uint64_t new_value = old_value & value; + if (_InterlockedCompareExchange64((volatile __int64*)address, (__int64)new_value, (__int64)old_value)) { + return old_value; + } + } +#endif +} + +static inline uintptr_t +_Py_atomic_and_uintptr(volatile uintptr_t *address, uintptr_t value) +{ +#if SIZEOF_VOID_P == 8 + return (uintptr_t)_InterlockedAnd64((volatile __int64*)address, (__int64)value); +#else + return (uintptr_t)_InterlockedAnd((volatile long*)address, (long)value); +#endif +} + +static inline uint8_t +_Py_atomic_or_uint8(volatile uint8_t *address, uint8_t value) +{ + return (uint8_t)_InterlockedOr8((volatile char*)address, (char)value); +} + +static inline uint16_t +_Py_atomic_or_uint16(volatile uint16_t *address, uint16_t value) +{ + return (uint16_t)_InterlockedOr16((volatile short*)address, (short)value); +} + +static inline uint32_t +_Py_atomic_or_uint32(volatile uint32_t *address, uint32_t value) +{ + return (uint32_t)_InterlockedOr((volatile long*)address, (long)value); +} + +static inline uint64_t +_Py_atomic_or_uint64(volatile uint64_t *address, uint64_t value) +{ +#if defined(_M_X64) || defined(_M_ARM64) + return (uint64_t)_InterlockedOr64((volatile __int64*)address, (__int64)value); +#else + for (;;) { + uint64_t old_value = *address; + uint64_t new_value = old_value | value; + if (_InterlockedCompareExchange64((volatile __int64*)address, (__int64)new_value, (__int64)old_value)) { + return old_value; + } + } +#endif +} + +static inline uintptr_t +_Py_atomic_or_uintptr(volatile uintptr_t *address, uintptr_t value) +{ +#if SIZEOF_VOID_P == 8 + return (uintptr_t)_InterlockedOr64((volatile __int64*)address, (__int64)value); +#else + return (uintptr_t)_InterlockedOr((volatile long*)address, (long)value); +#endif +} + +static inline int +_Py_atomic_load_int(const volatile int *address) +{ +#if defined(_M_X64) || defined(_M_IX86) + return *address; +#elif defined(_M_ARM64) + return (int)__ldar32((unsigned __int32 volatile*)address); +#else +#error no implementation of _Py_atomic_load_int +#endif +} + +static inline int8_t +_Py_atomic_load_int8(const volatile int8_t *address) +{ +#if defined(_M_X64) || defined(_M_IX86) + return *address; +#elif defined(_M_ARM64) + return (int8_t)__ldar8((unsigned __int8 volatile*)address); +#else +#error no implementation of _Py_atomic_load_int8 +#endif +} + +static inline int16_t +_Py_atomic_load_int16(const volatile int16_t *address) +{ +#if defined(_M_X64) || defined(_M_IX86) + return *address; +#elif defined(_M_ARM64) + return (int16_t)__ldar16((unsigned __int16 volatile*)address); +#else +#error no implementation of _Py_atomic_load_int16 +#endif +} + +static inline int32_t +_Py_atomic_load_int32(const volatile int32_t *address) +{ +#if defined(_M_X64) || defined(_M_IX86) + return *address; +#elif defined(_M_ARM64) + return (int32_t)__ldar32((unsigned __int32 volatile*)address); +#else +#error no implementation of _Py_atomic_load_int32 +#endif +} + +static inline int64_t +_Py_atomic_load_int64(const volatile int64_t *address) +{ +#if defined(_M_X64) || defined(_M_IX86) + return *address; +#elif defined(_M_ARM64) + return __ldar64((unsigned __int64 volatile*)address); +#else +#error no implementation of _Py_atomic_load_int64 +#endif +} + +static inline intptr_t +_Py_atomic_load_intptr(const volatile intptr_t *address) +{ +#if defined(_M_X64) || defined(_M_IX86) + return *address; +#elif defined(_M_ARM64) + return __ldar64((unsigned __int64 volatile*)address); +#else +#error no implementation of _Py_atomic_load_intptr +#endif +} + +static inline uint8_t +_Py_atomic_load_uint8(const volatile uint8_t *address) +{ +#if defined(_M_X64) || defined(_M_IX86) + return *address; +#elif defined(_M_ARM64) + return __ldar8((unsigned __int8 volatile*)address); +#else +#error no implementation of _Py_atomic_load_uint8 +#endif +} + +static inline uint16_t +_Py_atomic_load_uint16(const volatile uint16_t *address) +{ +#if defined(_M_X64) || defined(_M_IX86) + return *address; +#elif defined(_M_ARM64) + return __ldar16((unsigned __int16 volatile*)address); +#else +#error no implementation of _Py_atomic_load_uint16 +#endif +} + +static inline uint32_t +_Py_atomic_load_uint32(const volatile uint32_t *address) +{ +#if defined(_M_X64) || defined(_M_IX86) + return *address; +#elif defined(_M_ARM64) + return __ldar32((unsigned __int32 volatile*)address); +#else +#error no implementation of _Py_atomic_load_uint32 +#endif +} + +static inline uint64_t +_Py_atomic_load_uint64(const volatile uint64_t *address) +{ +#if defined(_M_X64) || defined(_M_IX86) + return *address; +#elif defined(_M_ARM64) + return __ldar64((unsigned __int64 volatile*)address); +#else +#error no implementation of _Py_atomic_load_uint64 +#endif +} + +static inline uintptr_t +_Py_atomic_load_uintptr(const volatile uintptr_t *address) +{ +#if defined(_M_X64) || defined(_M_IX86) + return *address; +#elif defined(_M_ARM64) + return __ldar64((unsigned __int64 volatile*)address); +#else +#error no implementation of _Py_atomic_load_uintptr +#endif +} + +static inline unsigned int +_Py_atomic_load_uint(const volatile unsigned int *address) +{ +#if defined(_M_X64) || defined(_M_IX86) + return *address; +#elif defined(_M_ARM64) + return __ldar32((unsigned __int32 volatile*)address); +#else +#error no implementation of _Py_atomic_load_uint +#endif +} + +static inline Py_ssize_t +_Py_atomic_load_ssize(const volatile Py_ssize_t* address) +{ +#if defined(_M_X64) || defined(_M_IX86) + return *address; +#elif defined(_M_ARM64) + return __ldar64((unsigned __int64 volatile*)address); +#else +#error no implementation of _Py_atomic_load_ssize +#endif +} + +static inline void * +_Py_atomic_load_ptr(const volatile void *address) +{ +#if defined(_M_X64) || defined(_M_IX86) + return *(void* volatile*)address; +#elif defined(_M_ARM64) + return (void *)__ldar64((unsigned __int64 volatile*)address); +#else +#error no implementation of _Py_atomic_load_ptr +#endif +} + +static inline int +_Py_atomic_load_int_relaxed(const volatile int* address) +{ + return *address; +} + +static inline int8_t +_Py_atomic_load_int8_relaxed(const volatile int8_t* address) +{ + return *address; +} + +static inline int16_t +_Py_atomic_load_int16_relaxed(const volatile int16_t* address) +{ + return *address; +} + +static inline int32_t +_Py_atomic_load_int32_relaxed(const volatile int32_t* address) +{ + return *address; +} + +static inline int64_t +_Py_atomic_load_int64_relaxed(const volatile int64_t* address) +{ + return *address; +} + +static inline intptr_t +_Py_atomic_load_intptr_relaxed(const volatile intptr_t* address) +{ + return *address; +} + +static inline uint8_t +_Py_atomic_load_uint8_relaxed(const volatile uint8_t* address) +{ + return *address; +} + +static inline uint16_t +_Py_atomic_load_uint16_relaxed(const volatile uint16_t* address) +{ + return *address; +} + +static inline uint32_t +_Py_atomic_load_uint32_relaxed(const volatile uint32_t* address) +{ + return *address; +} + +static inline uint64_t +_Py_atomic_load_uint64_relaxed(const volatile uint64_t* address) +{ + return *address; +} + +static inline uintptr_t +_Py_atomic_load_uintptr_relaxed(const volatile uintptr_t* address) +{ + return *address; +} + +static inline unsigned int +_Py_atomic_load_uint_relaxed(const volatile unsigned int *address) +{ + return *address; +} + +static inline Py_ssize_t +_Py_atomic_load_ssize_relaxed(const volatile Py_ssize_t* address) +{ + return *address; +} + +static inline void* +_Py_atomic_load_ptr_relaxed(const volatile void* address) +{ + return *(void * volatile *)address; +} + + + +static inline void +_Py_atomic_store_int(volatile int *address, int value) +{ + _InterlockedExchange((volatile long*)address, (long)value); +} + +static inline void +_Py_atomic_store_int8(volatile int8_t *address, int8_t value) +{ + _InterlockedExchange8((volatile char*)address, (char)value); +} + +static inline void +_Py_atomic_store_int16(volatile int16_t *address, int16_t value) +{ + _InterlockedExchange16((volatile short*)address, (short)value); +} + +static inline void +_Py_atomic_store_int32(volatile int32_t *address, int32_t value) +{ + _InterlockedExchange((volatile long*)address, (long)value); +} + +static inline void +_Py_atomic_store_int64(volatile int64_t *address, int64_t value) +{ + _Py_atomic_exchange_int64(address, value); +} + +static inline void +_Py_atomic_store_intptr(volatile intptr_t *address, intptr_t value) +{ + _InterlockedExchangePointer((void * volatile *)address, (void *)value); +} + +static inline void +_Py_atomic_store_uint8(volatile uint8_t *address, uint8_t value) +{ + _InterlockedExchange8((volatile char*)address, (char)value); +} + +static inline void +_Py_atomic_store_uint16(volatile uint16_t *address, uint16_t value) +{ + _InterlockedExchange16((volatile short*)address, (short)value); +} + +static inline void +_Py_atomic_store_uint32(volatile uint32_t *address, uint32_t value) +{ + _InterlockedExchange((volatile long*)address, (long)value); +} + +static inline void +_Py_atomic_store_uint64(volatile uint64_t *address, uint64_t value) +{ + _Py_atomic_exchange_int64((volatile __int64*)address, (__int64)value); +} + +static inline void +_Py_atomic_store_uintptr(volatile uintptr_t *address, uintptr_t value) +{ + _InterlockedExchangePointer((void * volatile *)address, (void *)value); +} + +static inline void +_Py_atomic_store_uint(volatile unsigned int *address, unsigned int value) +{ + _InterlockedExchange((volatile long*)address, (long)value); +} + +static inline void +_Py_atomic_store_ptr(volatile void *address, void *value) +{ + _InterlockedExchangePointer((void * volatile *)address, (void *)value); +} + +static inline void +_Py_atomic_store_ssize(volatile Py_ssize_t* address, Py_ssize_t value) +{ +#if SIZEOF_SIZE_T == 8 + _InterlockedExchange64((volatile __int64*)address, (__int64)value); +#else + _InterlockedExchange((volatile long*)address, (long)value); +#endif +} + + +static inline void +_Py_atomic_store_int_relaxed(volatile int* address, int value) +{ + *address = value; +} + +static inline void +_Py_atomic_store_int8_relaxed(volatile int8_t* address, int8_t value) +{ + *address = value; +} + +static inline void +_Py_atomic_store_int16_relaxed(volatile int16_t* address, int16_t value) +{ + *address = value; +} + +static inline void +_Py_atomic_store_int32_relaxed(volatile int32_t* address, int32_t value) +{ + *address = value; +} + +static inline void +_Py_atomic_store_int64_relaxed(volatile int64_t* address, int64_t value) +{ + *address = value; +} + +static inline void +_Py_atomic_store_intptr_relaxed(volatile intptr_t* address, intptr_t value) +{ + *address = value; +} + +static inline void +_Py_atomic_store_uint8_relaxed(volatile uint8_t* address, uint8_t value) +{ + *address = value; +} + +static inline void +_Py_atomic_store_uint16_relaxed(volatile uint16_t* address, uint16_t value) +{ + *address = value; +} + +static inline void +_Py_atomic_store_uint32_relaxed(volatile uint32_t* address, uint32_t value) +{ + *address = value; +} + +static inline void +_Py_atomic_store_uint64_relaxed(volatile uint64_t* address, uint64_t value) +{ + *address = value; +} + +static inline void +_Py_atomic_store_uintptr_relaxed(volatile uintptr_t* address, uintptr_t value) +{ + *address = value; +} + +static inline void +_Py_atomic_store_uint_relaxed(volatile unsigned int *address, unsigned int value) +{ + *address = value; +} + +static inline void +_Py_atomic_store_ptr_relaxed(volatile void* address, void* value) +{ + *(void * volatile *)address = value; +} + +static inline void +_Py_atomic_store_ssize_relaxed(volatile Py_ssize_t* address, Py_ssize_t value) +{ + *address = value; +} + +static inline void +_Py_atomic_store_uint64_release(volatile uint64_t* address, uint64_t value) +{ +#if defined(_M_X64) || defined(_M_IX86) + *address = value; +#elif defined(_M_ARM64) + __stlr64(address, value); +#else +#error no implementation of _Py_atomic_store_uint64_release +#endif +} + +static inline void +_Py_atomic_store_ptr_release(volatile void* address, void* value) +{ +#if defined(_M_X64) || defined(_M_IX86) + *(void * volatile *)address = value; +#elif defined(_M_ARM64) + __stlr64(address, (uintptr_t)value); +#else +#error no implementation of _Py_atomic_store_ptr_release +#endif +} + + static inline void +_Py_atomic_fence_seq_cst(void) +{ +#if defined(_M_ARM64) + __dmb(_ARM64_BARRIER_ISH); +#elif defined(_M_X64) + __faststorefence(); +#elif defined(_M_IX86) + _mm_mfence(); +#else +#error no implementation of _Py_atomic_fence_seq_cst +#endif +} + + static inline void +_Py_atomic_fence_release(void) +{ +#if defined(_M_ARM64) + __dmb(_ARM64_BARRIER_ISH); +#elif defined(_M_X64) || defined(_M_IX86) + _ReadWriteBarrier(); +#else +#error no implementation of _Py_atomic_fence_release +#endif +} diff --git a/Include/pyatomic_std.h b/Include/pyatomic_std.h new file mode 100644 index 000000000000000..afc4e76eed120d9 --- /dev/null +++ b/Include/pyatomic_std.h @@ -0,0 +1,799 @@ +#ifndef Py_ATOMIC_STD_H +# error "this header file must not be included directly" +#endif + +// This is the implementation of Python atomic operations using C++11 or C11 +// atomics. Note that the pyatomic_gcc.h implementation is preferred for GCC +// compatible compilers, even if they support C++11 atomics. + +#ifdef __cplusplus +extern "C++" { +#include +} +#define _Py_USING_STD using namespace std; +#define _Atomic(tp) atomic +#else +#include +#define _Py_USING_STD +#endif + + +static inline int +_Py_atomic_add_int(volatile int *address, int value) +{ + _Py_USING_STD + return atomic_fetch_add((volatile _Atomic(int)*)address, value); +} + +static inline int8_t +_Py_atomic_add_int8(volatile int8_t *address, int8_t value) +{ + _Py_USING_STD + return atomic_fetch_add((volatile _Atomic(int8_t)*)address, value); +} + +static inline int16_t +_Py_atomic_add_int16(volatile int16_t *address, int16_t value) +{ + _Py_USING_STD + return atomic_fetch_add((volatile _Atomic(int16_t)*)address, value); +} + +static inline int32_t +_Py_atomic_add_int32(volatile int32_t *address, int32_t value) +{ + _Py_USING_STD + return atomic_fetch_add((volatile _Atomic(int32_t)*)address, value); +} + +static inline int64_t +_Py_atomic_add_int64(volatile int64_t *address, int64_t value) +{ + _Py_USING_STD + return atomic_fetch_add((volatile _Atomic(int64_t)*)address, value); +} + +static inline intptr_t +_Py_atomic_add_intptr(volatile intptr_t *address, intptr_t value) +{ + _Py_USING_STD + return atomic_fetch_add((volatile _Atomic(intptr_t)*)address, value); +} + +static inline unsigned int +_Py_atomic_add_uint(volatile unsigned int *address, unsigned int value) +{ + _Py_USING_STD + return atomic_fetch_add((volatile _Atomic(unsigned int)*)address, value); +} + +static inline uint8_t +_Py_atomic_add_uint8(volatile uint8_t *address, uint8_t value) +{ + _Py_USING_STD + return atomic_fetch_add((volatile _Atomic(uint8_t)*)address, value); +} + +static inline uint16_t +_Py_atomic_add_uint16(volatile uint16_t *address, uint16_t value) +{ + _Py_USING_STD + return atomic_fetch_add((volatile _Atomic(uint16_t)*)address, value); +} + +static inline uint32_t +_Py_atomic_add_uint32(volatile uint32_t *address, uint32_t value) +{ + _Py_USING_STD + return atomic_fetch_add((volatile _Atomic(uint32_t)*)address, value); +} + +static inline uint64_t +_Py_atomic_add_uint64(volatile uint64_t *address, uint64_t value) +{ + _Py_USING_STD + return atomic_fetch_add((volatile _Atomic(uint64_t)*)address, value); +} + +static inline uintptr_t +_Py_atomic_add_uintptr(volatile uintptr_t *address, uintptr_t value) +{ + _Py_USING_STD + return atomic_fetch_add((volatile _Atomic(uintptr_t)*)address, value); +} + +static inline Py_ssize_t +_Py_atomic_add_ssize(volatile Py_ssize_t *address, Py_ssize_t value) +{ + _Py_USING_STD + return atomic_fetch_add((volatile _Atomic(Py_ssize_t)*)address, value); +} + +static inline int +_Py_atomic_compare_exchange_int(volatile int *address, int expected, int value) +{ + _Py_USING_STD + return atomic_compare_exchange_strong((volatile _Atomic(int)*)address, &expected, value); +} + +static inline int +_Py_atomic_compare_exchange_int8(volatile int8_t *address, int8_t expected, int8_t value) +{ + _Py_USING_STD + return atomic_compare_exchange_strong((volatile _Atomic(int8_t)*)address, &expected, value); +} + +static inline int +_Py_atomic_compare_exchange_int16(volatile int16_t *address, int16_t expected, int16_t value) +{ + _Py_USING_STD + return atomic_compare_exchange_strong((volatile _Atomic(int16_t)*)address, &expected, value); +} + +static inline int +_Py_atomic_compare_exchange_int32(volatile int32_t *address, int32_t expected, int32_t value) +{ + _Py_USING_STD + return atomic_compare_exchange_strong((volatile _Atomic(int32_t)*)address, &expected, value); +} + +static inline int +_Py_atomic_compare_exchange_int64(volatile int64_t *address, int64_t expected, int64_t value) +{ + _Py_USING_STD + return atomic_compare_exchange_strong((volatile _Atomic(int64_t)*)address, &expected, value); +} + +static inline int +_Py_atomic_compare_exchange_intptr(volatile intptr_t *address, intptr_t expected, intptr_t value) +{ + _Py_USING_STD + return atomic_compare_exchange_strong((volatile _Atomic(intptr_t)*)address, &expected, value); +} + +static inline int +_Py_atomic_compare_exchange_uint(volatile unsigned int *address, unsigned int expected, unsigned int value) +{ + _Py_USING_STD + return atomic_compare_exchange_strong((volatile _Atomic(unsigned int)*)address, &expected, value); +} + +static inline int +_Py_atomic_compare_exchange_uint8(volatile uint8_t *address, uint8_t expected, uint8_t value) +{ + _Py_USING_STD + return atomic_compare_exchange_strong((volatile _Atomic(uint8_t)*)address, &expected, value); +} + +static inline int +_Py_atomic_compare_exchange_uint16(volatile uint16_t *address, uint16_t expected, uint16_t value) +{ + _Py_USING_STD + return atomic_compare_exchange_strong((volatile _Atomic(uint16_t)*)address, &expected, value); +} + +static inline int +_Py_atomic_compare_exchange_uint32(volatile uint32_t *address, uint32_t expected, uint32_t value) +{ + _Py_USING_STD + return atomic_compare_exchange_strong((volatile _Atomic(uint32_t)*)address, &expected, value); +} + +static inline int +_Py_atomic_compare_exchange_uint64(volatile uint64_t *address, uint64_t expected, uint64_t value) +{ + _Py_USING_STD + return atomic_compare_exchange_strong((volatile _Atomic(uint64_t)*)address, &expected, value); +} + +static inline int +_Py_atomic_compare_exchange_uintptr(volatile uintptr_t *address, uintptr_t expected, uintptr_t value) +{ + _Py_USING_STD + return atomic_compare_exchange_strong((volatile _Atomic(uintptr_t)*)address, &expected, value); +} + +static inline int +_Py_atomic_compare_exchange_ssize(volatile Py_ssize_t *address, Py_ssize_t expected, Py_ssize_t value) +{ + _Py_USING_STD + return atomic_compare_exchange_strong((volatile _Atomic(Py_ssize_t)*)address, &expected, value); +} + +static inline int +_Py_atomic_compare_exchange_ptr(volatile void *address, void *expected, void *value) +{ + _Py_USING_STD + return atomic_compare_exchange_strong((volatile _Atomic(void *)*)address, &expected, value); +} + + +static inline int +_Py_atomic_exchange_int(volatile int *address, int value) +{ + _Py_USING_STD + return atomic_exchange((volatile _Atomic(int)*)address, value); +} + +static inline int8_t +_Py_atomic_exchange_int8(volatile int8_t *address, int8_t value) +{ + _Py_USING_STD + return atomic_exchange((volatile _Atomic(int8_t)*)address, value); +} + +static inline int16_t +_Py_atomic_exchange_int16(volatile int16_t *address, int16_t value) +{ + _Py_USING_STD + return atomic_exchange((volatile _Atomic(int16_t)*)address, value); +} + +static inline int32_t +_Py_atomic_exchange_int32(volatile int32_t *address, int32_t value) +{ + _Py_USING_STD + return atomic_exchange((volatile _Atomic(int32_t)*)address, value); +} + +static inline int64_t +_Py_atomic_exchange_int64(volatile int64_t *address, int64_t value) +{ + _Py_USING_STD + return atomic_exchange((volatile _Atomic(int64_t)*)address, value); +} + +static inline intptr_t +_Py_atomic_exchange_intptr(volatile intptr_t *address, intptr_t value) +{ + _Py_USING_STD + return atomic_exchange((volatile _Atomic(intptr_t)*)address, value); +} + +static inline unsigned int +_Py_atomic_exchange_uint(volatile unsigned int *address, unsigned int value) +{ + _Py_USING_STD + return atomic_exchange((volatile _Atomic(unsigned int)*)address, value); +} + +static inline uint8_t +_Py_atomic_exchange_uint8(volatile uint8_t *address, uint8_t value) +{ + _Py_USING_STD + return atomic_exchange((volatile _Atomic(uint8_t)*)address, value); +} + +static inline uint16_t +_Py_atomic_exchange_uint16(volatile uint16_t *address, uint16_t value) +{ + _Py_USING_STD + return atomic_exchange((volatile _Atomic(uint16_t)*)address, value); +} + +static inline uint32_t +_Py_atomic_exchange_uint32(volatile uint32_t *address, uint32_t value) +{ + _Py_USING_STD + return atomic_exchange((volatile _Atomic(uint32_t)*)address, value); +} + +static inline uint64_t +_Py_atomic_exchange_uint64(volatile uint64_t *address, uint64_t value) +{ + _Py_USING_STD + return atomic_exchange((volatile _Atomic(uint64_t)*)address, value); +} + +static inline uintptr_t +_Py_atomic_exchange_uintptr(volatile uintptr_t *address, uintptr_t value) +{ + _Py_USING_STD + return atomic_exchange((volatile _Atomic(uintptr_t)*)address, value); +} + +static inline Py_ssize_t +_Py_atomic_exchange_ssize(volatile Py_ssize_t *address, Py_ssize_t value) +{ + _Py_USING_STD + return atomic_exchange((volatile _Atomic(Py_ssize_t)*)address, value); +} + +static inline void * +_Py_atomic_exchange_ptr(volatile void *address, void *value) +{ + _Py_USING_STD + return atomic_exchange((volatile _Atomic(void *)*)address, value); +} + +static inline uint8_t +_Py_atomic_and_uint8(volatile uint8_t *address, uint8_t value) +{ + _Py_USING_STD + return atomic_fetch_and((volatile _Atomic(uint8_t)*)address, value); +} + +static inline uint16_t +_Py_atomic_and_uint16(volatile uint16_t *address, uint16_t value) +{ + _Py_USING_STD + return atomic_fetch_and((volatile _Atomic(uint16_t)*)address, value); +} + + +static inline uint32_t +_Py_atomic_and_uint32(volatile uint32_t *address, uint32_t value) +{ + _Py_USING_STD + return atomic_fetch_and((volatile _Atomic(uint32_t)*)address, value); +} + +static inline uint64_t +_Py_atomic_and_uint64(volatile uint64_t *address, uint64_t value) +{ + _Py_USING_STD + return atomic_fetch_and((volatile _Atomic(uint64_t)*)address, value); +} + +static inline uintptr_t +_Py_atomic_and_uintptr(volatile uintptr_t *address, uintptr_t value) +{ + _Py_USING_STD + return atomic_fetch_and((volatile _Atomic(uintptr_t)*)address, value); +} + +static inline uint8_t +_Py_atomic_or_uint8(volatile uint8_t *address, uint8_t value) +{ + _Py_USING_STD + return atomic_fetch_or((volatile _Atomic(uint8_t)*)address, value); +} + +static inline uint16_t +_Py_atomic_or_uint16(volatile uint16_t *address, uint16_t value) +{ + _Py_USING_STD + return atomic_fetch_or((volatile _Atomic(uint16_t)*)address, value); +} + +static inline uint32_t +_Py_atomic_or_uint32(volatile uint32_t *address, uint32_t value) +{ + _Py_USING_STD + return atomic_fetch_or((volatile _Atomic(uint32_t)*)address, value); +} + +static inline uint64_t +_Py_atomic_or_uint64(volatile uint64_t *address, uint64_t value) +{ + _Py_USING_STD + return atomic_fetch_or((volatile _Atomic(uint64_t)*)address, value); +} + +static inline uintptr_t +_Py_atomic_or_uintptr(volatile uintptr_t *address, uintptr_t value) +{ + _Py_USING_STD + return atomic_fetch_or((volatile _Atomic(uintptr_t)*)address, value); +} + +static inline int +_Py_atomic_load_int(const volatile int *address) +{ + _Py_USING_STD + return atomic_load((const volatile _Atomic(int)*)address); +} + +static inline int8_t +_Py_atomic_load_int8(const volatile int8_t *address) +{ + _Py_USING_STD + return atomic_load((const volatile _Atomic(int8_t)*)address); +} + +static inline int16_t +_Py_atomic_load_int16(const volatile int16_t *address) +{ + _Py_USING_STD + return atomic_load((const volatile _Atomic(int16_t)*)address); +} + +static inline int32_t +_Py_atomic_load_int32(const volatile int32_t *address) +{ + _Py_USING_STD + return atomic_load((const volatile _Atomic(int32_t)*)address); +} + +static inline int64_t +_Py_atomic_load_int64(const volatile int64_t *address) +{ + _Py_USING_STD + return atomic_load((const volatile _Atomic(int64_t)*)address); +} + +static inline intptr_t +_Py_atomic_load_intptr(const volatile intptr_t *address) +{ + _Py_USING_STD + return atomic_load((const volatile _Atomic(intptr_t)*)address); +} + +static inline uint8_t +_Py_atomic_load_uint8(const volatile uint8_t *address) +{ + _Py_USING_STD + return atomic_load((const volatile _Atomic(uint8_t)*)address); +} + +static inline uint16_t +_Py_atomic_load_uint16(const volatile uint16_t *address) +{ + _Py_USING_STD + return atomic_load((const volatile _Atomic(uint32_t)*)address); +} + +static inline uint32_t +_Py_atomic_load_uint32(const volatile uint32_t *address) +{ + _Py_USING_STD + return atomic_load((const volatile _Atomic(uint32_t)*)address); +} + +static inline uint64_t +_Py_atomic_load_uint64(const volatile uint64_t *address) +{ + _Py_USING_STD + return atomic_load((const volatile _Atomic(uint64_t)*)address); +} + +static inline uintptr_t +_Py_atomic_load_uintptr(const volatile uintptr_t *address) +{ + _Py_USING_STD + return atomic_load((const volatile _Atomic(uintptr_t)*)address); +} + +static inline unsigned int +_Py_atomic_load_uint(const volatile unsigned int *address) +{ + _Py_USING_STD + return atomic_load((const volatile _Atomic(unsigned int)*)address); +} + +static inline Py_ssize_t +_Py_atomic_load_ssize(const volatile Py_ssize_t *address) +{ + _Py_USING_STD + return atomic_load((const volatile _Atomic(Py_ssize_t)*)address); +} + +static inline void * +_Py_atomic_load_ptr(const volatile void *address) +{ + _Py_USING_STD + return atomic_load((const volatile _Atomic(void*)*)address); +} + + +static inline int +_Py_atomic_load_int_relaxed(const volatile int *address) +{ + _Py_USING_STD + return atomic_load_explicit((const volatile _Atomic(int)*)address, memory_order_relaxed); +} + +static inline int8_t +_Py_atomic_load_int8_relaxed(const volatile int8_t *address) +{ + _Py_USING_STD + return atomic_load_explicit((const volatile _Atomic(int8_t)*)address, memory_order_relaxed); +} + +static inline int16_t +_Py_atomic_load_int16_relaxed(const volatile int16_t *address) +{ + _Py_USING_STD + return atomic_load_explicit((const volatile _Atomic(int16_t)*)address, memory_order_relaxed); +} + +static inline int32_t +_Py_atomic_load_int32_relaxed(const volatile int32_t *address) +{ + _Py_USING_STD + return atomic_load_explicit((const volatile _Atomic(int32_t)*)address, memory_order_relaxed); +} + +static inline int64_t +_Py_atomic_load_int64_relaxed(const volatile int64_t *address) +{ + _Py_USING_STD + return atomic_load_explicit((const volatile _Atomic(int64_t)*)address, memory_order_relaxed); +} + +static inline intptr_t +_Py_atomic_load_intptr_relaxed(const volatile intptr_t *address) +{ + _Py_USING_STD + return atomic_load_explicit((const volatile _Atomic(intptr_t)*)address, memory_order_relaxed); +} + +static inline uint8_t +_Py_atomic_load_uint8_relaxed(const volatile uint8_t *address) +{ + _Py_USING_STD + return atomic_load_explicit((const volatile _Atomic(uint8_t)*)address, memory_order_relaxed); +} + +static inline uint16_t +_Py_atomic_load_uint16_relaxed(const volatile uint16_t *address) +{ + _Py_USING_STD + return atomic_load_explicit((const volatile _Atomic(uint16_t)*)address, memory_order_relaxed); +} + +static inline uint32_t +_Py_atomic_load_uint32_relaxed(const volatile uint32_t *address) +{ + _Py_USING_STD + return atomic_load_explicit((const volatile _Atomic(uint32_t)*)address, memory_order_relaxed); +} + +static inline uint64_t +_Py_atomic_load_uint64_relaxed(const volatile uint64_t *address) +{ + _Py_USING_STD + return atomic_load_explicit((const volatile _Atomic(uint64_t)*)address, memory_order_relaxed); +} + +static inline uintptr_t +_Py_atomic_load_uintptr_relaxed(const volatile uintptr_t *address) +{ + _Py_USING_STD + return atomic_load_explicit((const volatile _Atomic(uintptr_t)*)address, memory_order_relaxed); +} + +static inline unsigned int +_Py_atomic_load_uint_relaxed(const volatile unsigned int *address) +{ + _Py_USING_STD + return atomic_load_explicit((const volatile _Atomic(unsigned int)*)address, memory_order_relaxed); +} + +static inline Py_ssize_t +_Py_atomic_load_ssize_relaxed(const volatile Py_ssize_t *address) +{ + _Py_USING_STD + return atomic_load_explicit((const volatile _Atomic(Py_ssize_t)*)address, memory_order_relaxed); +} + +static inline void * +_Py_atomic_load_ptr_relaxed(const volatile void *address) +{ + _Py_USING_STD + return atomic_load_explicit((const volatile _Atomic(void*)*)address, memory_order_relaxed); +} + +static inline void +_Py_atomic_store_int(volatile int *address, int value) +{ + _Py_USING_STD + atomic_store((volatile _Atomic(int)*)address, value); +} + +static inline void +_Py_atomic_store_int8(volatile int8_t *address, int8_t value) +{ + _Py_USING_STD + atomic_store((volatile _Atomic(int8_t)*)address, value); +} + +static inline void +_Py_atomic_store_int16(volatile int16_t *address, int16_t value) +{ + _Py_USING_STD + atomic_store((volatile _Atomic(int16_t)*)address, value); +} + +static inline void +_Py_atomic_store_int32(volatile int32_t *address, int32_t value) +{ + _Py_USING_STD + atomic_store((volatile _Atomic(int32_t)*)address, value); +} + +static inline void +_Py_atomic_store_int64(volatile int64_t *address, int64_t value) +{ + _Py_USING_STD + atomic_store((volatile _Atomic(int64_t)*)address, value); +} + +static inline void +_Py_atomic_store_intptr(volatile intptr_t *address, intptr_t value) +{ + _Py_USING_STD + atomic_store((volatile _Atomic(intptr_t)*)address, value); +} + +static inline void +_Py_atomic_store_uint8(volatile uint8_t *address, uint8_t value) +{ + _Py_USING_STD + atomic_store((volatile _Atomic(uint8_t)*)address, value); +} + +static inline void +_Py_atomic_store_uint16(volatile uint16_t *address, uint16_t value) +{ + _Py_USING_STD + atomic_store((volatile _Atomic(uint16_t)*)address, value); +} + +static inline void +_Py_atomic_store_uint32(volatile uint32_t *address, uint32_t value) +{ + _Py_USING_STD + atomic_store((volatile _Atomic(uint32_t)*)address, value); +} + +static inline void +_Py_atomic_store_uint64(volatile uint64_t *address, uint64_t value) +{ + _Py_USING_STD + atomic_store((volatile _Atomic(uint64_t)*)address, value); +} + +static inline void +_Py_atomic_store_uintptr(volatile uintptr_t *address, uintptr_t value) +{ + _Py_USING_STD + atomic_store((volatile _Atomic(uintptr_t)*)address, value); +} + +static inline void +_Py_atomic_store_uint(volatile unsigned int *address, unsigned int value) +{ + _Py_USING_STD + atomic_store((volatile _Atomic(unsigned int)*)address, value); +} + +static inline void +_Py_atomic_store_ptr(volatile void *address, void *value) +{ + _Py_USING_STD + atomic_store((volatile _Atomic(void*)*)address, value); +} + +static inline void +_Py_atomic_store_ssize(volatile Py_ssize_t *address, Py_ssize_t value) +{ + _Py_USING_STD + atomic_store((volatile _Atomic(Py_ssize_t)*)address, value); +} + +static inline void +_Py_atomic_store_int_relaxed(volatile int *address, int value) +{ + _Py_USING_STD + atomic_store_explicit((volatile _Atomic(int)*)address, value, memory_order_relaxed); +} + +static inline void +_Py_atomic_store_int8_relaxed(volatile int8_t *address, int8_t value) +{ + _Py_USING_STD + atomic_store_explicit((volatile _Atomic(int8_t)*)address, value, memory_order_relaxed); +} + +static inline void +_Py_atomic_store_int16_relaxed(volatile int16_t *address, int16_t value) +{ + _Py_USING_STD + atomic_store_explicit((volatile _Atomic(int16_t)*)address, value, memory_order_relaxed); +} + +static inline void +_Py_atomic_store_int32_relaxed(volatile int32_t *address, int32_t value) +{ + _Py_USING_STD + atomic_store_explicit((volatile _Atomic(int32_t)*)address, value, memory_order_relaxed); +} + +static inline void +_Py_atomic_store_int64_relaxed(volatile int64_t *address, int64_t value) +{ + _Py_USING_STD + atomic_store_explicit((volatile _Atomic(int64_t)*)address, value, memory_order_relaxed); +} + +static inline void +_Py_atomic_store_intptr_relaxed(volatile intptr_t *address, intptr_t value) +{ + _Py_USING_STD + atomic_store_explicit((volatile _Atomic(intptr_t)*)address, value, memory_order_relaxed); +} + +static inline void +_Py_atomic_store_uint8_relaxed(volatile uint8_t *address, uint8_t value) +{ + _Py_USING_STD + atomic_store_explicit((volatile _Atomic(uint8_t)*)address, value, memory_order_relaxed); +} + +static inline void +_Py_atomic_store_uint16_relaxed(volatile uint16_t *address, uint16_t value) +{ + _Py_USING_STD + atomic_store_explicit((volatile _Atomic(uint16_t)*)address, value, memory_order_relaxed); +} + +static inline void +_Py_atomic_store_uint32_relaxed(volatile uint32_t *address, uint32_t value) +{ + _Py_USING_STD + atomic_store_explicit((volatile _Atomic(uint32_t)*)address, value, memory_order_relaxed); +} + +static inline void +_Py_atomic_store_uint64_relaxed(volatile uint64_t *address, uint64_t value) +{ + _Py_USING_STD + atomic_store_explicit((volatile _Atomic(uint64_t)*)address, value, memory_order_relaxed); +} + +static inline void +_Py_atomic_store_uintptr_relaxed(volatile uintptr_t *address, uintptr_t value) +{ + _Py_USING_STD + atomic_store_explicit((volatile _Atomic(uintptr_t)*)address, value, memory_order_relaxed); +} + +static inline void +_Py_atomic_store_uint_relaxed(volatile unsigned int *address, unsigned int value) +{ + _Py_USING_STD + atomic_store_explicit((volatile _Atomic(unsigned int)*)address, value, memory_order_relaxed); +} + +static inline void +_Py_atomic_store_ptr_relaxed(volatile void *address, void *value) +{ + _Py_USING_STD + atomic_store_explicit((volatile _Atomic(void*)*)address, value, memory_order_relaxed); +} + +static inline void +_Py_atomic_store_ssize_relaxed(volatile Py_ssize_t *address, Py_ssize_t value) +{ + _Py_USING_STD + atomic_store_explicit((volatile _Atomic(Py_ssize_t)*)address, value, memory_order_relaxed); +} + +static inline void +_Py_atomic_store_uint64_release(volatile uint64_t *address, uint64_t value) +{ + _Py_USING_STD + atomic_store_explicit((volatile _Atomic(uint64_t)*)address, value, memory_order_release); +} + +static inline void +_Py_atomic_store_ptr_release(volatile void *address, void *value) +{ + _Py_USING_STD + atomic_store_explicit((volatile _Atomic(void*)*)address, value, memory_order_release); +} + + static inline void +_Py_atomic_fence_seq_cst(void) +{ + _Py_USING_STD + atomic_thread_fence(memory_order_seq_cst); +} + + static inline void +_Py_atomic_fence_release(void) +{ + _Py_USING_STD + atomic_thread_fence(memory_order_release); +} diff --git a/Lib/test/test_capi/test_pyatomic.py b/Lib/test/test_capi/test_pyatomic.py new file mode 100644 index 000000000000000..846d6d50c25969b --- /dev/null +++ b/Lib/test/test_capi/test_pyatomic.py @@ -0,0 +1,15 @@ +import unittest +from test.support import import_helper + +# Skip this test if the _testcapi module isn't available. +_testcapi = import_helper.import_module('_testcapi') + +class PyAtomicTests(unittest.TestCase): + pass + +for name in sorted(dir(_testcapi)): + if name.startswith('test_atomic'): + setattr(PyAtomicTests, name, getattr(_testcapi, name)) + +if __name__ == "__main__": + unittest.main() diff --git a/Makefile.pre.in b/Makefile.pre.in index 9be5c3b50eb9ee2..04b230b15b8a4d8 100644 --- a/Makefile.pre.in +++ b/Makefile.pre.in @@ -1654,6 +1654,9 @@ PYTHON_HEADERS= \ $(srcdir)/Include/osdefs.h \ $(srcdir)/Include/osmodule.h \ $(srcdir)/Include/patchlevel.h \ + $(srcdir)/Include/pyatomic.h \ + $(srcdir)/Include/pyatomic_gcc.h \ + $(srcdir)/Include/pyatomic_std.h \ $(srcdir)/Include/pybuffer.h \ $(srcdir)/Include/pycapsule.h \ $(srcdir)/Include/pydtrace.h \ diff --git a/Modules/Setup.stdlib.in b/Modules/Setup.stdlib.in index 689f1d42ef0eeee..e913ee405573d35 100644 --- a/Modules/Setup.stdlib.in +++ b/Modules/Setup.stdlib.in @@ -159,7 +159,7 @@ @MODULE__XXTESTFUZZ_TRUE@_xxtestfuzz _xxtestfuzz/_xxtestfuzz.c _xxtestfuzz/fuzzer.c @MODULE__TESTBUFFER_TRUE@_testbuffer _testbuffer.c @MODULE__TESTINTERNALCAPI_TRUE@_testinternalcapi _testinternalcapi.c -@MODULE__TESTCAPI_TRUE@_testcapi _testcapimodule.c _testcapi/vectorcall.c _testcapi/vectorcall_limited.c _testcapi/heaptype.c _testcapi/abstract.c _testcapi/unicode.c _testcapi/dict.c _testcapi/getargs.c _testcapi/datetime.c _testcapi/docstring.c _testcapi/mem.c _testcapi/watchers.c _testcapi/long.c _testcapi/float.c _testcapi/structmember.c _testcapi/exceptions.c _testcapi/code.c _testcapi/buffer.c _testcapi/pyos.c _testcapi/immortal.c _testcapi/heaptype_relative.c _testcapi/gc.c +@MODULE__TESTCAPI_TRUE@_testcapi _testcapimodule.c _testcapi/vectorcall.c _testcapi/vectorcall_limited.c _testcapi/heaptype.c _testcapi/abstract.c _testcapi/unicode.c _testcapi/dict.c _testcapi/getargs.c _testcapi/datetime.c _testcapi/docstring.c _testcapi/mem.c _testcapi/watchers.c _testcapi/long.c _testcapi/float.c _testcapi/structmember.c _testcapi/exceptions.c _testcapi/code.c _testcapi/buffer.c _testcapi/pyatomic.c _testcapi/pyos.c _testcapi/immortal.c _testcapi/heaptype_relative.c _testcapi/gc.c @MODULE__TESTCLINIC_TRUE@_testclinic _testclinic.c # Some testing modules MUST be built as shared libraries. diff --git a/Modules/_testcapi/parts.h b/Modules/_testcapi/parts.h index 65ebf80bcd1e95a..8e51b064b7531fd 100644 --- a/Modules/_testcapi/parts.h +++ b/Modules/_testcapi/parts.h @@ -40,6 +40,7 @@ int _PyTestCapi_Init_Structmember(PyObject *module); int _PyTestCapi_Init_Exceptions(PyObject *module); int _PyTestCapi_Init_Code(PyObject *module); int _PyTestCapi_Init_Buffer(PyObject *module); +int _PyTestCapi_Init_PyAtomic(PyObject *module); int _PyTestCapi_Init_PyOS(PyObject *module); int _PyTestCapi_Init_Immortal(PyObject *module); int _PyTestCapi_Init_GC(PyObject *mod); diff --git a/Modules/_testcapi/pyatomic.c b/Modules/_testcapi/pyatomic.c new file mode 100644 index 000000000000000..f435dc55ac26b9f --- /dev/null +++ b/Modules/_testcapi/pyatomic.c @@ -0,0 +1,165 @@ +/* + * C Extension module to smoke test pyatomic.h API. + * + * This only tests basic functionality, not any synchronizing ordering. + */ + +/* Always enable assertions */ +#undef NDEBUG + +#include "Python.h" +#include "pyatomic.h" +#include "parts.h" + +// We define atomic bitwise operations on these types +#define FOR_BITWISE_TYPES(V) \ + V(uint8, uint8_t) \ + V(uint16, uint16_t) \ + V(uint32, uint32_t) \ + V(uint64, uint64_t) \ + V(uintptr, uintptr_t) + +// We define atomic addition on these types +#define FOR_ARITHMETIC_TYPES(V) \ + FOR_BITWISE_TYPES(V) \ + V(int, int) \ + V(uint, unsigned int) \ + V(int8, int8_t) \ + V(int16, int16_t) \ + V(int32, int32_t) \ + V(int64, int64_t) \ + V(intptr, intptr_t) \ + V(ssize, Py_ssize_t) + +// We define atomic load, store, exchange, and compare_exchange on these types +#define FOR_ALL_TYPES(V) \ + FOR_ARITHMETIC_TYPES(V) \ + V(ptr, void*) + +#define IMPL_TEST_ADD(suffix, dtype) \ +static PyObject * \ +test_atomic_add_##suffix(PyObject *self, PyObject *obj) { \ + dtype x = 0; \ + assert(_Py_atomic_add_##suffix(&x, 1) == 0); \ + assert("a" && x == 1); \ + assert(_Py_atomic_add_##suffix(&x, 2) == 1); \ + assert(x == 3); \ + assert(_Py_atomic_add_##suffix(&x, -2) == 3); \ + assert("b" && x == 1); \ + assert(_Py_atomic_add_##suffix(&x, -1) == 1); \ + assert(x == 0); \ + assert(_Py_atomic_add_##suffix(&x, -1) == 0); \ + assert(x == (dtype)-1); \ + assert(_Py_atomic_add_##suffix(&x, -2) == (dtype)-1); \ + assert(x == (dtype)-3); \ + assert(_Py_atomic_add_##suffix(&x, 2) == (dtype)-3); \ + assert(x == (dtype)-1); \ + Py_RETURN_NONE; \ +} +FOR_ARITHMETIC_TYPES(IMPL_TEST_ADD) + +#define IMPL_TEST_COMPARE_EXCHANGE(suffix, dtype) \ +static PyObject * \ +test_atomic_compare_exchange_##suffix(PyObject *self, PyObject *obj) { \ + dtype x = (dtype)0; \ + dtype y = (dtype)1; \ + dtype z = (dtype)2; \ + assert(_Py_atomic_compare_exchange_##suffix(&x, y, z) == 0); \ + assert(x == 0); \ + assert(_Py_atomic_compare_exchange_##suffix(&x, 0, z) == 1); \ + assert(x == z); \ + assert(_Py_atomic_compare_exchange_##suffix(&x, y, z) == 0); \ + assert(x == z); \ + Py_RETURN_NONE; \ +} +FOR_ALL_TYPES(IMPL_TEST_COMPARE_EXCHANGE) + +#define IMPL_TEST_EXCHANGE(suffix, dtype) \ +static PyObject * \ +test_atomic_exchange_##suffix(PyObject *self, PyObject *obj) { \ + dtype x = (dtype)0; \ + dtype y = (dtype)1; \ + dtype z = (dtype)2; \ + assert(_Py_atomic_exchange_##suffix(&x, y) == (dtype)0); \ + assert(x == (dtype)1); \ + assert(_Py_atomic_exchange_##suffix(&x, z) == (dtype)1); \ + assert(x == (dtype)2); \ + assert(_Py_atomic_exchange_##suffix(&x, y) == (dtype)2); \ + assert(x == (dtype)1); \ + Py_RETURN_NONE; \ +} +FOR_ALL_TYPES(IMPL_TEST_EXCHANGE) + +#define IMPL_TEST_LOAD_STORE(suffix, dtype) \ +static PyObject * \ +test_atomic_load_store_##suffix(PyObject *self, PyObject *obj) { \ + dtype x = (dtype)0; \ + dtype y = (dtype)1; \ + dtype z = (dtype)2; \ + assert(_Py_atomic_load_##suffix(&x) == (dtype)0); \ + assert(x == (dtype)0); \ + _Py_atomic_store_##suffix(&x, y); \ + assert(_Py_atomic_load_##suffix(&x) == (dtype)1); \ + assert(x == (dtype)1); \ + _Py_atomic_store_##suffix##_relaxed(&x, z); \ + assert(_Py_atomic_load_##suffix##_relaxed(&x) == (dtype)2); \ + assert(x == (dtype)2); \ + Py_RETURN_NONE; \ +} +FOR_ALL_TYPES(IMPL_TEST_LOAD_STORE) + +#define IMPL_TEST_AND_OR(suffix, dtype) \ +static PyObject * \ +test_atomic_and_or_##suffix(PyObject *self, PyObject *obj) { \ + dtype x = (dtype)0; \ + dtype y = (dtype)1; \ + dtype z = (dtype)3; \ + assert(_Py_atomic_or_##suffix(&x, z) == (dtype)0); \ + assert(x == (dtype)3); \ + assert(_Py_atomic_and_##suffix(&x, y) == (dtype)3); \ + assert(x == (dtype)1); \ + Py_RETURN_NONE; \ +} +FOR_BITWISE_TYPES(IMPL_TEST_AND_OR) + +static PyObject * +test_atomic_fences(PyObject *self, PyObject *obj) { + // Just make sure that the fences compile. We are not + // testing any synchronizing ordering. + _Py_atomic_fence_seq_cst(); + _Py_atomic_fence_release(); + Py_RETURN_NONE; +} + +// NOTE: all tests should start with "test_atomic_" to be included +// in test_pyatomic.py + +#define BIND_TEST_ADD(suffix, dtype) \ + {"test_atomic_add_" #suffix, test_atomic_add_##suffix, METH_NOARGS}, +#define BIND_TEST_COMPARE_EXCHANGE(suffix, dtype) \ + {"test_atomic_compare_exchange_" #suffix, test_atomic_compare_exchange_##suffix, METH_NOARGS}, +#define BIND_TEST_EXCHANGE(suffix, dtype) \ + {"test_atomic_exchange_" #suffix, test_atomic_exchange_##suffix, METH_NOARGS}, +#define BIND_TEST_LOAD_STORE(suffix, dtype) \ + {"test_atomic_load_store_" #suffix, test_atomic_load_store_##suffix, METH_NOARGS}, +#define BIND_TEST_AND_OR(suffix, dtype) \ + {"test_atomic_and_or_" #suffix, test_atomic_and_or_##suffix, METH_NOARGS}, + +static PyMethodDef test_methods[] = { + FOR_ARITHMETIC_TYPES(BIND_TEST_ADD) + FOR_ALL_TYPES(BIND_TEST_COMPARE_EXCHANGE) + FOR_ALL_TYPES(BIND_TEST_EXCHANGE) + FOR_ALL_TYPES(BIND_TEST_LOAD_STORE) + FOR_BITWISE_TYPES(BIND_TEST_AND_OR) + {"test_atomic_fences", test_atomic_fences, METH_NOARGS}, + {NULL, NULL} /* sentinel */ +}; + +int +_PyTestCapi_Init_PyAtomic(PyObject *mod) +{ + if (PyModule_AddFunctions(mod, test_methods) < 0) { + return -1; + } + return 0; +} diff --git a/Modules/_testcapimodule.c b/Modules/_testcapimodule.c index a7a98d1eea5bd15..1094b4c544b2790 100644 --- a/Modules/_testcapimodule.c +++ b/Modules/_testcapimodule.c @@ -4325,6 +4325,9 @@ PyInit__testcapi(void) if (_PyTestCapi_Init_GC(m) < 0) { return NULL; } + if (_PyTestCapi_Init_PyAtomic(m) < 0) { + return NULL; + } #ifndef LIMITED_API_AVAILABLE PyModule_AddObjectRef(m, "LIMITED_API_AVAILABLE", Py_False); diff --git a/PCbuild/_testcapi.vcxproj b/PCbuild/_testcapi.vcxproj index 8c0fd0cf052b0ec..0a02929db438b80 100644 --- a/PCbuild/_testcapi.vcxproj +++ b/PCbuild/_testcapi.vcxproj @@ -112,6 +112,7 @@ + diff --git a/PCbuild/_testcapi.vcxproj.filters b/PCbuild/_testcapi.vcxproj.filters index 87d33ebe28e4750..4ba6011d8af5b97 100644 --- a/PCbuild/_testcapi.vcxproj.filters +++ b/PCbuild/_testcapi.vcxproj.filters @@ -66,6 +66,9 @@ Source Files + + Source Files + Source Files diff --git a/PCbuild/pythoncore.vcxproj b/PCbuild/pythoncore.vcxproj index b0e62864421e17c..1107d479c036f58 100644 --- a/PCbuild/pythoncore.vcxproj +++ b/PCbuild/pythoncore.vcxproj @@ -300,6 +300,8 @@ + + diff --git a/PCbuild/pythoncore.vcxproj.filters b/PCbuild/pythoncore.vcxproj.filters index d5f61e9c5d7c899..9f0a3f27d6381f6 100644 --- a/PCbuild/pythoncore.vcxproj.filters +++ b/PCbuild/pythoncore.vcxproj.filters @@ -774,6 +774,12 @@ Include\internal + + Include + + + Include +