diff --git a/src/impl_owned_array.rs b/src/impl_owned_array.rs
index 63af63917..d148a115b 100644
--- a/src/impl_owned_array.rs
+++ b/src/impl_owned_array.rs
@@ -1,5 +1,6 @@
use alloc::vec::Vec;
+use std::mem;
use std::mem::MaybeUninit;
use rawpointer::PointerExt;
@@ -33,7 +34,7 @@ impl Array {
/// assert_eq!(scalar, Foo);
/// ```
pub fn into_scalar(self) -> A {
- let size = ::std::mem::size_of::();
+ let size = mem::size_of::();
if size == 0 {
// Any index in the `Vec` is fine since all elements are identical.
self.data.into_vec().remove(0)
@@ -155,6 +156,51 @@ impl Array {
impl Array
where D: Dimension
{
+ /// Move all elements from self into `new_array`, which must be of the same shape but
+ /// can have a different memory layout. The destination is overwritten completely.
+ ///
+ /// The destination should be a mut reference to an array or an `ArrayViewMut` with
+ /// `A` elements.
+ ///
+ /// ***Panics*** if the shapes don't agree.
+ ///
+ /// ## Example
+ ///
+ /// ```
+ /// use ndarray::Array;
+ ///
+ /// // Usage example of move_into in safe code
+ /// let mut a = Array::default((10, 10));
+ /// let b = Array::from_shape_fn((10, 10), |(i, j)| (i + j).to_string());
+ /// b.move_into(&mut a);
+ /// ```
+ pub fn move_into<'a, AM>(self, new_array: AM)
+ where
+ AM: Into>,
+ A: 'a,
+ {
+ // Remove generic parameter P and call the implementation
+ let new_array = new_array.into();
+ if mem::needs_drop::() {
+ self.move_into_needs_drop(new_array);
+ } else {
+ // If `A` doesn't need drop, we can overwrite the destination.
+ // Safe because: move_into_uninit only writes initialized values
+ unsafe {
+ self.move_into_uninit(new_array.into_maybe_uninit())
+ }
+ }
+ }
+
+ fn move_into_needs_drop(mut self, new_array: ArrayViewMut) {
+ // Simple case where `A` has a destructor: just swap values between self and new_array.
+ // Afterwards, `self` drops full of initialized values and dropping works as usual.
+ // This avoids moving out of owned values in `self` while at the same time managing
+ // the dropping if the values being overwritten in `new_array`.
+ Zip::from(&mut self).and(new_array)
+ .for_each(|src, dst| mem::swap(src, dst));
+ }
+
/// Move all elements from self into `new_array`, which must be of the same shape but
/// can have a different memory layout. The destination is overwritten completely.
///
@@ -167,12 +213,26 @@ impl Array
/// drop of any such element, other elements may be leaked.
///
/// ***Panics*** if the shapes don't agree.
- pub fn move_into<'a, AM>(self, new_array: AM)
+ ///
+ /// ## Example
+ ///
+ /// ```
+ /// use ndarray::Array;
+ ///
+ /// let a = Array::from_iter(0..100).into_shape((10, 10)).unwrap();
+ /// let mut b = Array::uninit((10, 10));
+ /// a.move_into_uninit(&mut b);
+ /// unsafe {
+ /// // we can now promise we have fully initialized `b`.
+ /// let b = b.assume_init();
+ /// }
+ /// ```
+ pub fn move_into_uninit<'a, AM>(self, new_array: AM)
where
AM: Into, D>>,
A: 'a,
{
- // Remove generic parameter P and call the implementation
+ // Remove generic parameter AM and call the implementation
self.move_into_impl(new_array.into())
}
@@ -180,7 +240,8 @@ impl Array
unsafe {
// Safety: copy_to_nonoverlapping cannot panic
let guard = AbortIfPanic(&"move_into: moving out of owned value");
- // Move all reachable elements
+ // Move all reachable elements; we move elements out of `self`.
+ // and thus must not panic for the whole section until we call `self.data.set_len(0)`.
Zip::from(self.raw_view_mut())
.and(new_array)
.for_each(|src, dst| {
@@ -208,7 +269,7 @@ impl Array
let data_len = self.data.len();
let has_unreachable_elements = self_len != data_len;
- if !has_unreachable_elements || std::mem::size_of::() == 0 {
+ if !has_unreachable_elements || mem::size_of::() == 0 || !mem::needs_drop::() {
unsafe {
self.data.set_len(0);
}
@@ -270,7 +331,7 @@ impl Array
// dummy array -> self.
// old_self elements are moved -> new_array.
let old_self = std::mem::replace(self, Self::empty());
- old_self.move_into(new_array.view_mut());
+ old_self.move_into_uninit(new_array.view_mut());
// new_array -> self.
unsafe {
diff --git a/src/impl_views/conversions.rs b/src/impl_views/conversions.rs
index cfd7f9aa0..5dba16d98 100644
--- a/src/impl_views/conversions.rs
+++ b/src/impl_views/conversions.rs
@@ -7,6 +7,7 @@
// except according to those terms.
use alloc::slice;
+use std::mem::MaybeUninit;
use crate::imp_prelude::*;
@@ -133,6 +134,27 @@ where
self.into_raw_view_mut().cast::>().deref_into_view()
}
}
+
+ /// Return the array view as a view of `MaybeUninit` elements
+ ///
+ /// This conversion leaves the elements as they were (presumably initialized), but
+ /// they are represented with the `MaybeUninit` type. Effectively this means that
+ /// the elements can be overwritten without dropping the old element in its place.
+ /// (In some situations this is not what you want, while for `Copy` elements it makes
+ /// no difference at all.)
+ ///
+ /// # Safety
+ ///
+ /// This method allows writing uninitialized data into the view, which could leave any
+ /// original array that we borrow from in an inconsistent state. This is not allowed
+ /// when using the resulting array view.
+ pub(crate) unsafe fn into_maybe_uninit(self) -> ArrayViewMut<'a, MaybeUninit, D> {
+ // Safe because: A and MaybeUninit have the same representation;
+ // and we can go from initialized to (maybe) not unconditionally in terms of
+ // representation. However, the user must be careful to not write uninit elements
+ // through the view.
+ self.into_raw_view_mut().cast::>().deref_into_view_mut()
+ }
}
/// Private array view methods
diff --git a/tests/assign.rs b/tests/assign.rs
index 19156cce8..5c300943d 100644
--- a/tests/assign.rs
+++ b/tests/assign.rs
@@ -41,14 +41,14 @@ fn move_into_copy() {
let a = arr2(&[[1., 2.], [3., 4.]]);
let acopy = a.clone();
let mut b = Array::uninit(a.dim());
- a.move_into(b.view_mut());
+ a.move_into_uninit(b.view_mut());
let b = unsafe { b.assume_init() };
assert_eq!(acopy, b);
let a = arr2(&[[1., 2.], [3., 4.]]).reversed_axes();
let acopy = a.clone();
let mut b = Array::uninit(a.dim());
- a.move_into(b.view_mut());
+ a.move_into_uninit(b.view_mut());
let b = unsafe { b.assume_init() };
assert_eq!(acopy, b);
}
@@ -74,7 +74,7 @@ fn move_into_owned() {
let acopy = a.clone();
let mut b = Array::uninit(a.dim());
- a.move_into(b.view_mut());
+ a.move_into_uninit(b.view_mut());
let b = unsafe { b.assume_init() };
assert_eq!(acopy, b);
@@ -85,7 +85,7 @@ fn move_into_owned() {
#[test]
fn move_into_slicing() {
- // Count correct number of drops when using move_into and discontiguous arrays (with holes).
+ // Count correct number of drops when using move_into_uninit and discontiguous arrays (with holes).
for &use_f_order in &[false, true] {
for &invert_axis in &[0b00, 0b01, 0b10, 0b11] { // bitmask for axis to invert
let counter = DropCounter::default();
@@ -102,7 +102,7 @@ fn move_into_slicing() {
}
let mut b = Array::uninit(a.dim());
- a.move_into(b.view_mut());
+ a.move_into_uninit(b.view_mut());
let b = unsafe { b.assume_init() };
let total = m * n;
@@ -118,7 +118,7 @@ fn move_into_slicing() {
#[test]
fn move_into_diag() {
- // Count correct number of drops when using move_into and discontiguous arrays (with holes).
+ // Count correct number of drops when using move_into_uninit and discontiguous arrays (with holes).
for &use_f_order in &[false, true] {
let counter = DropCounter::default();
{
@@ -128,7 +128,7 @@ fn move_into_diag() {
let a = a.into_diag();
let mut b = Array::uninit(a.dim());
- a.move_into(b.view_mut());
+ a.move_into_uninit(b.view_mut());
let b = unsafe { b.assume_init() };
let total = m * n;
@@ -143,7 +143,7 @@ fn move_into_diag() {
#[test]
fn move_into_0dim() {
- // Count correct number of drops when using move_into and discontiguous arrays (with holes).
+ // Count correct number of drops when using move_into_uninit and discontiguous arrays (with holes).
for &use_f_order in &[false, true] {
let counter = DropCounter::default();
{
@@ -155,7 +155,7 @@ fn move_into_0dim() {
assert_eq!(a.ndim(), 0);
let mut b = Array::uninit(a.dim());
- a.move_into(b.view_mut());
+ a.move_into_uninit(b.view_mut());
let b = unsafe { b.assume_init() };
let total = m * n;
@@ -170,7 +170,7 @@ fn move_into_0dim() {
#[test]
fn move_into_empty() {
- // Count correct number of drops when using move_into and discontiguous arrays (with holes).
+ // Count correct number of drops when using move_into_uninit and discontiguous arrays (with holes).
for &use_f_order in &[false, true] {
let counter = DropCounter::default();
{
@@ -181,7 +181,7 @@ fn move_into_empty() {
let a = a.slice_move(s![..0, 1..1]);
assert!(a.is_empty());
let mut b = Array::uninit(a.dim());
- a.move_into(b.view_mut());
+ a.move_into_uninit(b.view_mut());
let b = unsafe { b.assume_init() };
let total = m * n;
@@ -194,6 +194,35 @@ fn move_into_empty() {
}
}
+#[test]
+fn move_into() {
+ // Test various memory layouts and holes while moving String elements with move_into
+ for &use_f_order in &[false, true] {
+ for &invert_axis in &[0b00, 0b01, 0b10, 0b11] { // bitmask for axis to invert
+ for &slice in &[false, true] {
+ let mut a = Array::from_shape_fn((5, 4).set_f(use_f_order),
+ |idx| format!("{:?}", idx));
+ if slice {
+ a.slice_collapse(s![1..-1, ..;2]);
+ }
+
+ if invert_axis & 0b01 != 0 {
+ a.invert_axis(Axis(0));
+ }
+ if invert_axis & 0b10 != 0 {
+ a.invert_axis(Axis(1));
+ }
+
+ let acopy = a.clone();
+ let mut b = Array::default(a.dim().set_f(!use_f_order ^ !slice));
+ a.move_into(&mut b);
+
+ assert_eq!(acopy, b);
+ }
+ }
+ }
+}
+
/// This counter can create elements, and then count and verify
/// the number of which have actually been dropped again.