diff --git a/docs/source/tensor.rst b/docs/source/tensor.rst index f1577e38d3f..a8a305ac9c8 100644 --- a/docs/source/tensor.rst +++ b/docs/source/tensor.rst @@ -194,4 +194,5 @@ OneFlow Tensor Class zero_, nms, pin_memory, + is_pinned, diff --git a/oneflow/api/python/framework/tensor.cpp b/oneflow/api/python/framework/tensor.cpp index 142eb4f573c..0ddd612b698 100644 --- a/oneflow/api/python/framework/tensor.cpp +++ b/oneflow/api/python/framework/tensor.cpp @@ -180,6 +180,12 @@ static PyObject* PyTensorObject_pin_memory(PyObject* self, PyObject* unused) { END_HANDLE_ERRORS } +static PyObject* PyTensorObject_is_pinned(PyObject* self, PyObject* unused) { + HANDLE_ERRORS + return functional::CastToPyObject(CHECK_JUST(PyTensor_Unpack(self)->is_pinned())); + END_HANDLE_ERRORS +} + static PyObject* PyTensorObject_requires_grad_(PyObject* self, PyObject* args, PyObject* kwargs) { HANDLE_ERRORS int requires_grad = 1; @@ -381,6 +387,7 @@ static PyMethodDef PyTensorObject_methods[] = { {"contiguous", PyTensorObject_contiguous, METH_NOARGS, NULL}, {"contiguous_", PyTensorObject_contiguous_, METH_NOARGS, NULL}, {"pin_memory", PyTensorObject_pin_memory, METH_NOARGS, NULL}, + {"is_pinned", PyTensorObject_is_pinned, METH_NOARGS, NULL}, {"requires_grad_", (PyCFunction)PyTensorObject_requires_grad_, METH_VARARGS | METH_KEYWORDS, NULL}, {"retain_grad", PyTensorObject_retain_grad, METH_NOARGS, NULL}, diff --git a/oneflow/api/python/functional/tensor_api.cpp b/oneflow/api/python/functional/tensor_api.cpp index 4f952254120..b1a867e8ea7 100644 --- a/oneflow/api/python/functional/tensor_api.cpp +++ b/oneflow/api/python/functional/tensor_api.cpp @@ -120,11 +120,9 @@ class TensorWithOtherCtorFunctor { Maybe operator()(const std::shared_ptr& other) const { // NOTE(chengcheng): flow.Tensor or flow.tensor ONLY created by EagerTensor now. LazyMode::Guard lazy_mode_disabled_guard(/*is_enabled*/ false); - bool pin_memory = false; - if (other->is_local()) { - pin_memory = JUST(JUST(other->AsMirroredTensor())->eager_blob_object())->pin_memory(); - } - return MakeTensorFromOtherTensor(other, pin_memory); + bool is_pinned = false; + if (other->is_local()) { is_pinned = JUST(CHECK_JUST(other->AsMirroredTensor())->is_pinned()); } + return MakeTensorFromOtherTensor(other, is_pinned); } }; @@ -145,9 +143,7 @@ class TensorWithDataCtorFunctor { if (PyTensor_Check(data)) { const auto& other = PyTensor_Unpack(data); const bool pin_memory = - other->is_local() - ? JUST(JUST(other->AsMirroredTensor())->eager_blob_object())->pin_memory() - : false; + other->is_local() ? JUST(JUST(other->AsMirroredTensor())->is_pinned()) : false; return MakeTensorFromOtherTensor(other, dtype, device, /*requires_grad=*/false, /*pin_memory=*/pin_memory); } diff --git a/oneflow/core/framework/tensor.cpp b/oneflow/core/framework/tensor.cpp index e1817ef9836..9383d40055d 100644 --- a/oneflow/core/framework/tensor.cpp +++ b/oneflow/core/framework/tensor.cpp @@ -87,7 +87,7 @@ Maybe MirroredTensor::clone() const { const auto& device_type = JUST(this->device())->type(); int64_t device_id = JUST(this->device())->device_id(); std::shared_ptr input = std::const_pointer_cast(shared_from_this()); - const bool pin_memory = JUST(JUST(input->AsMirroredTensor())->eager_blob_object())->pin_memory(); + const bool pin_memory = JUST(JUST(input->AsMirroredTensor())->is_pinned()); return JUST(functional::Copy(input, device_type, device_id, /*pin_memory=*/pin_memory)); } diff --git a/oneflow/core/framework/tensor.h b/oneflow/core/framework/tensor.h index b12ee18907b..faaa90b5b2e 100644 --- a/oneflow/core/framework/tensor.h +++ b/oneflow/core/framework/tensor.h @@ -60,6 +60,7 @@ class Tensor : public std::enable_shared_from_this { virtual bool is_lazy() const = 0; virtual bool is_eager() const { return !is_lazy(); } virtual bool is_contiguous() const = 0; + virtual Maybe is_pinned() const = 0; virtual const TensorMeta& tensor_meta() const = 0; virtual Maybe data() = 0; virtual std::shared_ptr pin_memory() const = 0; @@ -204,6 +205,7 @@ class StaticZerosTensor final : public Tensor { PRINT_BUG_PROMPT_AND_ABORT(); return true; } + Maybe is_pinned() const override { RETURN_ERROR_WITH_BUG_PROMPT(); } std::shared_ptr grad_fn_node() const override { PRINT_BUG_PROMPT_AND_ABORT(); return nullptr; @@ -360,6 +362,7 @@ class ProxyTensor : public TensorIf { virtual bool is_leaf() const override { return tensor_->is_leaf(); } virtual bool retain_grad() const override { return tensor_->retain_grad(); } virtual bool is_contiguous() const override { return tensor_->is_contiguous(); } + virtual Maybe is_pinned() const override { return tensor_->is_pinned(); } virtual Maybe acc_grad() const override { return tensor_->acc_grad(); } virtual Maybe current_grad() const override { return tensor_->current_grad(); } virtual Maybe detach() const override { return tensor_->detach(); } @@ -488,6 +491,7 @@ class MirroredTensor final : public TensorIf { bool is_leaf() const override { return impl_->is_leaf(); } bool retain_grad() const override { return impl_->retain_grad(); } bool is_contiguous() const override { return impl_->is_contiguous(); } + Maybe is_pinned() const override { return impl_->is_pinned(); }; // Setters for autograd Maybe set_acc_grad(const std::shared_ptr& grad) override { @@ -606,6 +610,9 @@ class ConsistentTensor final : public TensorIf { bool is_leaf() const override { return impl_->is_leaf(); } bool retain_grad() const override { return impl_->retain_grad(); } bool is_contiguous() const override { return impl_->is_contiguous(); } + Maybe is_pinned() const override { + OF_RUNTIME_ERROR() << "Global tensor has no is_pinned method"; + } // Setters for autograd Maybe set_acc_grad(const std::shared_ptr& grad) override { diff --git a/oneflow/core/framework/tensor_impl.cpp b/oneflow/core/framework/tensor_impl.cpp index 8b0c074efc7..558b57a72c1 100644 --- a/oneflow/core/framework/tensor_impl.cpp +++ b/oneflow/core/framework/tensor_impl.cpp @@ -122,6 +122,11 @@ Maybe EagerMirroredTensorImpl::InitEagerBlobObject( return Maybe::Ok(); } +Maybe EagerMirroredTensorImpl::is_pinned() const { + if (!eager_blob_object_) { return false; } + return eager_blob_object_->pin_memory(); +} + Maybe EagerMirroredTensorImpl::set_eager_blob_object( std::shared_ptr eager_blob_object) { eager_blob_object_ = eager_blob_object; diff --git a/oneflow/core/framework/tensor_impl.h b/oneflow/core/framework/tensor_impl.h index 3ddfefd28a8..d204f20689a 100644 --- a/oneflow/core/framework/tensor_impl.h +++ b/oneflow/core/framework/tensor_impl.h @@ -64,6 +64,7 @@ class TensorImpl { virtual Maybe has_eager_blob_object() const = 0; virtual Maybe storage_offset() const { OF_UNIMPLEMENTED(); } virtual bool is_contiguous() const = 0; + virtual Maybe is_pinned() const { OF_UNIMPLEMENTED(); } // Getters for autograd Maybe acc_grad() const; @@ -201,6 +202,7 @@ class LazyMirroredTensorImpl final : public MirroredTensorImpl { // but should return real status while stride/view mechanism is ready in lazy-mirrored mode return true; } + Maybe is_pinned() const override { RETURN_ERROR_WITH_BUG_PROMPT(); } // Getters valid only for EagerMirroredTensorImpl Maybe eager_blob_object() const override { RETURN_ERROR_WITH_BUG_PROMPT(); } @@ -229,6 +231,7 @@ class EagerMirroredTensorImpl final : public MirroredTensorImpl { Maybe detach() const override; bool is_lazy() const override { return false; } bool is_contiguous() const override { return tensor_meta_->is_contiguous(); } + Maybe is_pinned() const override; // Getters valid only for EagerMirroredTensorImpl Maybe eager_blob_object() const override { diff --git a/oneflow/core/framework/tensor_methods.cpp b/oneflow/core/framework/tensor_methods.cpp index 6ba21fbb722..cc7b7aa08dc 100644 --- a/oneflow/core/framework/tensor_methods.cpp +++ b/oneflow/core/framework/tensor_methods.cpp @@ -75,7 +75,7 @@ Maybe BasicView(const std::shared_ptr& input, const Shape& targe auto tensor_impl = std::make_shared( tensor_meta, JUST(input->tensor_storage()), requires_grad, /*is_leaf=*/!requires_grad); - const bool pin_memory = JUST(JUST(input->AsMirroredTensor())->eager_blob_object())->pin_memory(); + const bool pin_memory = JUST(JUST(input->AsMirroredTensor())->is_pinned()); JUST(tensor_impl->InitEagerBlobObject(JUST(blob_object->compute_local_dep_object()), /*pin_memory=*/pin_memory)); diff --git a/oneflow/core/functional/impl/array_functor.cpp b/oneflow/core/functional/impl/array_functor.cpp index b44a3635207..b0fefaf0fae 100644 --- a/oneflow/core/functional/impl/array_functor.cpp +++ b/oneflow/core/functional/impl/array_functor.cpp @@ -3020,7 +3020,7 @@ class PinMemoryFunctor { CHECK_OR_RETURN(input->is_local() && !(LazyMode::is_enabled())) << Error::RuntimeError() << "Tensor.pin_memory() only support local tensor for now!"; // if tensor already pinned, then just return - if (JUST(JUST(input->AsMirroredTensor())->eager_blob_object())->pin_memory()) { return input; } + if (JUST(JUST(input->AsMirroredTensor())->is_pinned())) { return input; } auto shape = input->shape(); auto device = JUST(input->device()); const bool requires_grad = input->requires_grad(); diff --git a/python/oneflow/framework/docstr/tensor.py b/python/oneflow/framework/docstr/tensor.py index 45e6b890c19..ba295357946 100644 --- a/python/oneflow/framework/docstr/tensor.py +++ b/python/oneflow/framework/docstr/tensor.py @@ -2081,6 +2081,15 @@ """, ) +add_docstr( + oneflow.Tensor.is_pinned, + r""" + Tensor.is_pinned() -> bool + + Returns true if this tensor resides in pinned memory. + """, +) + add_docstr( oneflow.Tensor.type, r"""Returns the type if dtype is not provided, else casts this object to the specified type. diff --git a/python/oneflow/test/tensor/test_tensor_pin_memory.py b/python/oneflow/test/tensor/test_tensor_pin_memory.py index e619dd412df..4675c4b9abc 100644 --- a/python/oneflow/test/tensor/test_tensor_pin_memory.py +++ b/python/oneflow/test/tensor/test_tensor_pin_memory.py @@ -70,6 +70,17 @@ def test_tensor_construct_with_pin_memory_param(test_case): ) return x + @unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases") + @flow.unittest.skip_unless_1n1d() + @autotest(n=5, auto_backward=True, check_graph=False) + def test_tensor_is_pinned(test_case): + device = random_device() + x = random_tensor(ndim=4).to(device) + y = x.pin_memory() + test_case.assertTrue(x.oneflow.is_pinned() == x.pytorch.is_pinned()) + test_case.assertTrue(y.oneflow.is_pinned() == y.pytorch.is_pinned()) + return y + if __name__ == "__main__": unittest.main()