Skip to content

Commit

Permalink
Add nn functional fold (#8667)
Browse files Browse the repository at this point in the history
* add fold

* update fold.py

* add test

* fix doc

* fix comment

Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
  • Loading branch information
zhongshsh and mergify[bot] authored Jul 20, 2022
1 parent 000072f commit c677eea
Show file tree
Hide file tree
Showing 13 changed files with 329 additions and 76 deletions.
2 changes: 2 additions & 0 deletions docs/source/nn.functional.rst
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,8 @@ Convolution functions
conv_transpose1d
conv_transpose2d
conv_transpose3d
fold
unfold

Pooling functions
----------------------------------
Expand Down
4 changes: 2 additions & 2 deletions oneflow/core/autograd/gradient_funcs/fold.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -66,8 +66,8 @@ Maybe<void> Fold::Apply(const FoldInterpState* ctx, const TensorTuple& out_grads
if (!ctx->requires_grad) { return Maybe<void>::Ok(); }
CHECK_EQ_OR_RETURN(out_grads.size(), 1);
in_grads->resize(1);
in_grads->at(0) = JUST(functional::Unfold(out_grads.at(0), ctx->data_format, ctx->kernel_size,
ctx->dilation_rate, ctx->padding, ctx->strides));
in_grads->at(0) = JUST(functional::Unfold(out_grads.at(0), ctx->kernel_size, ctx->dilation_rate,
ctx->padding, ctx->strides, ctx->data_format));
return Maybe<void>::Ok();
}

Expand Down
4 changes: 2 additions & 2 deletions oneflow/core/autograd/gradient_funcs/unfold.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -73,8 +73,8 @@ Maybe<void> Unfold::Apply(const UnfoldInterpState* ctx, const TensorTuple& out_g
CHECK_EQ_OR_RETURN(out_grads.size(), 1); // NOLINT(maybe-need-error-msg)
in_grads->resize(1);
in_grads->at(0) =
JUST(functional::Fold(out_grads.at(0), ctx->data_format, ctx->output_size, ctx->kernel_size,
ctx->dilation_rate, ctx->padding, ctx->strides));
JUST(functional::Fold(out_grads.at(0), ctx->output_size, ctx->kernel_size, ctx->dilation_rate,
ctx->padding, ctx->strides, ctx->data_format));
return Maybe<void>::Ok();
}

Expand Down
11 changes: 4 additions & 7 deletions oneflow/core/functional/functional_api.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2032,16 +2032,13 @@

- name: "unfold"
signature:
'Tensor (Tensor x, String data_format="channels_first", Int32List kernel_size,
Int32List dilation_rate, Int32List padding,
Int32List strides) => Unfold'
'Tensor (Tensor x, Int32List[2] kernel_size, Int32List[2] dilation=1, Int32List[2] padding=0,
Int32List[2] stride=1, String data_format="channels_first") => Unfold'
bind_python: True

- name: "fold"
signature: 'Tensor (Tensor x, String data_format="channels_first",
Int32List output_size, Int32List kernel_size,
Int32List dilation_rate, Int32List padding,
Int32List strides) => Fold'
signature: 'Tensor (Tensor x, Int32List[1] output_size, Int32List[2] kernel_size, Int32List[2] dilation=1,
Int32List[2] padding=0, Int32List[2] stride=1, String data_format="channels_first") => Fold'
bind_python: True

- name: "split"
Expand Down
13 changes: 7 additions & 6 deletions oneflow/core/functional/impl/nn_functor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2329,11 +2329,11 @@ class UnfoldFunctor {
UnfoldFunctor() {
unfold_op_ = CHECK_JUST(one::OpBuilder("unfold").Input("x").Output("y").Build());
}
Maybe<Tensor> operator()(const std::shared_ptr<one::Tensor>& x, const std::string& data_format,
Maybe<Tensor> operator()(const std::shared_ptr<one::Tensor>& x,
const std::vector<int32_t>& kernel_size,
const std::vector<int32_t>& dilation_rate,
const std::vector<int32_t>& padding,
const std::vector<int32_t>& strides) const {
const std::vector<int32_t>& padding, const std::vector<int32_t>& strides,
const std::string& data_format) const {
const auto& x_shape = x->shape();
// Only Support 4d tensor now.
CHECK_EQ_OR_RETURN(x_shape->NumAxes(), 4)
Expand All @@ -2355,17 +2355,18 @@ class UnfoldFunctor {
class FoldFunctor {
public:
FoldFunctor() { fold_op_ = CHECK_JUST(one::OpBuilder("fold").Input("x").Output("y").Build()); }
Maybe<Tensor> operator()(const std::shared_ptr<one::Tensor>& x, const std::string& data_format,
Maybe<Tensor> operator()(const std::shared_ptr<one::Tensor>& x,
const std::vector<int32_t>& output_size,
const std::vector<int32_t>& kernel_size,
const std::vector<int32_t>& dilation_rate,
const std::vector<int32_t>& padding,
const std::vector<int32_t>& strides) const {
const std::vector<int32_t>& padding, const std::vector<int32_t>& strides,
const std::string& data_format) const {
const auto& x_shape = x->shape();
// Only Support 3d tensor fold now. format is (N, C*K*K, L)
CHECK_EQ_OR_RETURN(x_shape->NumAxes(), 3)
<< Error::RuntimeError() << "Input Tensor dim should == 3";
MutableAttrMap attrs;

JUST(attrs.SetAttr<std::string>("data_format", data_format));
JUST(attrs.SetAttr<std::vector<int32_t>>("output_size", output_size));
JUST(attrs.SetAttr<std::vector<int32_t>>("kernel_size", kernel_size));
Expand Down
1 change: 1 addition & 0 deletions python/oneflow/framework/docstr/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,3 +75,4 @@
from .amin import *
from .deconv import *
from .logical_ops import *
from .convolution import *
58 changes: 58 additions & 0 deletions python/oneflow/framework/docstr/convolution.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow
from oneflow.framework.docstr.utils import add_docstr

add_docstr(
oneflow.nn.functional.fold,
r"""
fold(input, output_size, kernel_size, dilation=1, padding=0, stride=1)
The documentation is referenced from: https://pytorch.org/docs/1.10/generated/torch.nn.functional.fold.html.
Combines an array of sliding local blocks into a large containing tensor.
.. warning::
Currently, only 3-D input tensors (batched image-like tensors) are supported, and only unbatched (3D)
or batched (4D) image-like output tensors are supported.
See :class:`oneflow.nn.Fold` for details.
""",
)

add_docstr(
oneflow.nn.functional.unfold,
r"""
unfold(input, kernel_size, dilation=1, padding=0, stride=1)
The documentation is referenced from: https://pytorch.org/docs/1.10/generated/torch.nn.functional.unfold.html.
Extracts sliding local blocks from a batched input tensor.
.. warning::
Currently, only 4-D input tensors (batched image-like tensors) are supported.
.. warning::
More than one element of the unfolded tensor may refer to a single
memory location. As a result, in-place operations (especially ones that
are vectorized) may result in incorrect behavior. If you need to write
to the tensor, please clone it first.
See :class:`oneflow.nn.Unfold` for details.
""",
)
2 changes: 2 additions & 0 deletions python/oneflow/nn/functional/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,3 +69,5 @@
from oneflow.nn.modules.linear import linear
from oneflow.nn.modules.activation import relu6
from oneflow.nn.modules.upsampling import Upsample as upsample
from oneflow._C import unfold
from oneflow._C import fold
Loading

0 comments on commit c677eea

Please sign in to comment.