From eddcbd7585152c1e404ed0481ffe44c61d504e77 Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Fri, 30 Nov 2018 16:06:42 -0500 Subject: [PATCH] Completely remove undefined behavior --- src/julia_internal.h | 7 ------- src/support/MurmurHash3.c | 24 +++++++++--------------- 2 files changed, 9 insertions(+), 22 deletions(-) diff --git a/src/julia_internal.h b/src/julia_internal.h index 796d402ee470e..86d0689f27819 100644 --- a/src/julia_internal.h +++ b/src/julia_internal.h @@ -1019,13 +1019,6 @@ void jl_register_fptrs(uint64_t sysimage_base, const struct _jl_sysimg_fptrs_t * extern arraylist_t partial_inst; -STATIC_INLINE uint64_t jl_load_unaligned_i64(const void *ptr) JL_NOTSAFEPOINT; -STATIC_INLINE uint32_t jl_load_unaligned_i32(const void *ptr) JL_NOTSAFEPOINT; -STATIC_INLINE uint16_t jl_load_unaligned_i16(const void *ptr) JL_NOTSAFEPOINT; -STATIC_INLINE void jl_store_unaligned_i64(void *ptr, uint64_t val) JL_NOTSAFEPOINT; -STATIC_INLINE void jl_store_unaligned_i32(void *ptr, uint32_t val) JL_NOTSAFEPOINT; -STATIC_INLINE void jl_store_unaligned_i16(void *ptr, uint16_t val) JL_NOTSAFEPOINT; - #if jl_has_builtin(__builtin_assume_aligned) || defined(_COMPILER_GCC_) #define jl_assume_aligned(ptr, align) __builtin_assume_aligned(ptr, align) #elif defined(_COMPILER_INTEL_) diff --git a/src/support/MurmurHash3.c b/src/support/MurmurHash3.c index 48b63e6d0d0fc..94069eab02732 100644 --- a/src/support/MurmurHash3.c +++ b/src/support/MurmurHash3.c @@ -91,11 +91,11 @@ void MurmurHash3_x86_32 ( const void * key, int len, //---------- // body - const uint32_t * blocks = (const uint32_t *)(data + nblocks*4); + const uint8_t * tail = data + nblocks*4; for(int i = -nblocks; i; i++) { - uint32_t k1 = jl_load_unaligned_i32(blocks + i); + uint32_t k1 = jl_load_unaligned_i32(tail + sizeof(uint32_t)*i); k1 *= c1; k1 = ROTL32(k1,15); @@ -109,8 +109,6 @@ void MurmurHash3_x86_32 ( const void * key, int len, //---------- // tail - const uint8_t * tail = (const uint8_t*)(data + nblocks*4); - uint32_t k1 = 0; switch(len & 3) @@ -152,14 +150,14 @@ void MurmurHash3_x86_128 ( const void * key, const int len, //---------- // body - const uint32_t * blocks = (const uint32_t *)(data + nblocks*16); + const uint8_t *tail = data + nblocks*16; for(int i = -nblocks; i; i++) { - uint32_t k1 = jl_load_unaligned_i32(blocks + i*4 + 0); - uint32_t k2 = jl_load_unaligned_i32(blocks + i*4 + 1); - uint32_t k3 = jl_load_unaligned_i32(blocks + i*4 + 2); - uint32_t k4 = jl_load_unaligned_i32(blocks + i*4 + 3); + uint32_t k1 = jl_load_unaligned_i32(tail + sizeof(uint32_t)*(i*4 + 0)); + uint32_t k2 = jl_load_unaligned_i32(tail + sizeof(uint32_t)*(i*4 + 1)); + uint32_t k3 = jl_load_unaligned_i32(tail + sizeof(uint32_t)*(i*4 + 2)); + uint32_t k4 = jl_load_unaligned_i32(tail + sizeof(uint32_t)*(i*4 + 3)); k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1; @@ -181,8 +179,6 @@ void MurmurHash3_x86_128 ( const void * key, const int len, //---------- // tail - const uint8_t * tail = (const uint8_t*)(data + nblocks*16); - uint32_t k1 = 0; uint32_t k2 = 0; uint32_t k3 = 0; @@ -256,12 +252,10 @@ void MurmurHash3_x64_128 ( const void * key, const int len, //---------- // body - const uint64_t * blocks = (const uint64_t *)(data); - for(int i = 0; i < nblocks; i++) { - uint64_t k1 = jl_load_unaligned_i64(blocks + i*2 + 0); - uint64_t k2 = jl_load_unaligned_i64(blocks + i*2 + 1); + uint64_t k1 = jl_load_unaligned_i64(data + sizeof(uint64_t)*(i*2 + 0)); + uint64_t k2 = jl_load_unaligned_i64(data + sizeof(uint64_t)*(i*2 + 1)); k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1;