diff --git a/src/test/codegen/intrinsics/nearby.rs b/src/test/codegen/intrinsics/nearby.rs new file mode 100644 index 0000000000000..520fe2f1886eb --- /dev/null +++ b/src/test/codegen/intrinsics/nearby.rs @@ -0,0 +1,18 @@ +#![crate_type = "lib"] +#![feature(core_intrinsics)] + +use std::intrinsics; + +// CHECK-LABEL: @nearbyintf32 +#[no_mangle] +pub unsafe fn nearbyintf32(a: f32) -> f32 { + // CHECK: llvm.nearbyint.f32 + intrinsics::nearbyintf32(a) +} + +// CHECK-LABEL: @nearbyintf64 +#[no_mangle] +pub unsafe fn nearbyintf64(a: f64) -> f64 { + // CHECK: llvm.nearbyint.f64 + intrinsics::nearbyintf64(a) +} diff --git a/src/test/codegen/intrinsics/volatile.rs b/src/test/codegen/intrinsics/volatile.rs new file mode 100644 index 0000000000000..1970517e73262 --- /dev/null +++ b/src/test/codegen/intrinsics/volatile.rs @@ -0,0 +1,55 @@ +// compile-flags: -C no-prepopulate-passes + +#![crate_type = "lib"] +#![feature(core_intrinsics)] + +use std::intrinsics; + +// CHECK-LABEL: @volatile_copy_memory +#[no_mangle] +pub unsafe fn volatile_copy_memory(a: *mut u8, b: *const u8) { + // CHECK: llvm.memmove.p0i8.p0i8.{{\w*(.*true)}} + intrinsics::volatile_copy_memory(a, b, 1) +} + +// CHECK-LABEL: @volatile_copy_nonoverlapping_memory +#[no_mangle] +pub unsafe fn volatile_copy_nonoverlapping_memory(a: *mut u8, b: *const u8) { + // CHECK: llvm.memcpy.p0i8.p0i8.{{\w*(.*true)}} + intrinsics::volatile_copy_nonoverlapping_memory(a, b, 1) +} + +// CHECK-LABEL: @volatile_set_memory +#[no_mangle] +pub unsafe fn volatile_set_memory(a: *mut u8, b: u8) { + // CHECK: llvm.memset.p0i8.{{\w*(.*true)}} + intrinsics::volatile_set_memory(a, b, 1) +} + +// CHECK-LABEL: @volatile_load +#[no_mangle] +pub unsafe fn volatile_load(a: *const u8) -> u8 { + // CHECK: load volatile + intrinsics::volatile_load(a) +} + +// CHECK-LABEL: @volatile_store +#[no_mangle] +pub unsafe fn volatile_store(a: *mut u8, b: u8) { + // CHECK: store volatile + intrinsics::volatile_store(a, b) +} + +// CHECK-LABEL: @unaligned_volatile_load +#[no_mangle] +pub unsafe fn unaligned_volatile_load(a: *const u8) -> u8 { + // CHECK: load volatile + intrinsics::unaligned_volatile_load(a) +} + +// CHECK-LABEL: @unaligned_volatile_store +#[no_mangle] +pub unsafe fn unaligned_volatile_store(a: *mut u8, b: u8) { + // CHECK: store volatile + intrinsics::unaligned_volatile_store(a, b) +} diff --git a/src/test/codegen/intrinsics/volatile_order.rs b/src/test/codegen/intrinsics/volatile_order.rs new file mode 100644 index 0000000000000..29331219ba6ee --- /dev/null +++ b/src/test/codegen/intrinsics/volatile_order.rs @@ -0,0 +1,18 @@ +#![crate_type = "lib"] +#![feature(core_intrinsics)] + +use std::intrinsics::*; + +pub unsafe fn test_volatile_order() { + let mut a: Box = Box::new(0); + // CHECK: load volatile + let x = volatile_load(&*a); + // CHECK: load volatile + let x = volatile_load(&*a); + // CHECK: store volatile + volatile_store(&mut *a, 12); + // CHECK: store volatile + unaligned_volatile_store(&mut *a, 12); + // CHECK: llvm.memset.p0i8 + volatile_set_memory(&mut *a, 12, 1) +} diff --git a/src/test/ui/intrinsics/intrinsic-nearby.rs b/src/test/ui/intrinsics/intrinsic-nearby.rs new file mode 100644 index 0000000000000..7b1d1eeaadbd0 --- /dev/null +++ b/src/test/ui/intrinsics/intrinsic-nearby.rs @@ -0,0 +1,11 @@ +// run-pass +#![feature(core_intrinsics)] + +use std::intrinsics::*; + +fn main() { + unsafe { + assert_eq!(nearbyintf32(5.234f32), 5f32); + assert_eq!(nearbyintf64(6.777f64), 7f64); + } +} diff --git a/src/test/ui/intrinsics/intrinsic-volatile.rs b/src/test/ui/intrinsics/intrinsic-volatile.rs new file mode 100644 index 0000000000000..7b2c825a2084b --- /dev/null +++ b/src/test/ui/intrinsics/intrinsic-volatile.rs @@ -0,0 +1,44 @@ +// run-pass + +#![feature(core_intrinsics)] + +use std::intrinsics::*; + +pub fn main() { + unsafe { + let mut x: Box = Box::new(0); + let mut y: Box = Box::new(0); + + // test volatile load + assert_eq!(volatile_load(&*x), 0); + *x = 1; + assert_eq!(volatile_load(&*x), 1); + + // test volatile store + volatile_store(&mut *x, 2); + assert_eq!(*x, 2); + + // test volatile copy memory + volatile_copy_memory(&mut *y, &*x, 1); + assert_eq!(*y, 2); + + // test volatile copy non-overlapping memory + *x = 3; + volatile_copy_nonoverlapping_memory(&mut *y, &*x, 1); + assert_eq!(*y, 3); + + // test volatile set memory + volatile_set_memory(&mut *x, 4, 1); + assert_eq!(*x, 4); + + // test unaligned volatile load + let arr: [u8; 3] = [1, 2, 3]; + let ptr = arr[1..].as_ptr() as *const u16; + assert_eq!(unaligned_volatile_load(ptr), u16::from_ne_bytes([arr[1], arr[2]])); + + // test unaligned volatile store + let ptr = arr[1..].as_ptr() as *mut u16; + unaligned_volatile_store(ptr, 0); + assert_eq!(arr, [1, 0, 0]); + } +}