-
Notifications
You must be signed in to change notification settings - Fork 5.7k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[AutoParallel] Support disttensor for Tensor.copy_ #58369
Changes from 4 commits
ee348bb
9554f34
4e252ab
015b195
51f00bc
efc708d
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -27,7 +27,11 @@ limitations under the License. */ | |
#include "paddle/phi/core/tensor_utils.h" | ||
#include "paddle/phi/infermeta/unary.h" | ||
// clang-format off | ||
|
||
#ifdef PADDLE_WITH_DISTRIBUTE | ||
#include "paddle/phi/infermeta/spmd_rules/rules.h" | ||
#include "paddle/phi/core/distributed/auto_parallel/reshard_utils.h" | ||
#include "paddle/phi/api/lib/data_transform.h" | ||
#endif | ||
namespace paddle { | ||
namespace experimental { | ||
// declare cast api | ||
|
@@ -87,9 +91,7 @@ void Tensor::copy_(const Tensor &src, | |
VLOG(8) << "Src is empty, skip copy"; | ||
return; | ||
} | ||
// Prepare copy kernel key and outputs | ||
auto kernel_key_set = ParseKernelKeyByInputArgs(src); | ||
KernelType kernel_type = ParseKernelTypeByInputArgs(src); | ||
|
||
VLOG(3) << "Deep copy Tensor from " << src.name() << " to " << name(); | ||
if (initialized()) { | ||
PADDLE_ENFORCE_EQ(dtype(), | ||
|
@@ -114,6 +116,12 @@ void Tensor::copy_(const Tensor &src, | |
"Copy cannot be performed!", | ||
target_place, | ||
place())); | ||
} | ||
|
||
// Prepare copy kernel key and outputs | ||
auto kernel_key_set = ParseKernelKeyByInputArgs(src); | ||
KernelType kernel_type = ParseKernelTypeByInputArgs(src); | ||
if (initialized()) { | ||
kernel_key_set.backend_set = kernel_key_set.backend_set | | ||
BackendSet(phi::TransToPhiBackend(place())); | ||
} else { | ||
|
@@ -128,6 +136,60 @@ void Tensor::copy_(const Tensor &src, | |
auto *dev_ctx = pool.GetMutable( | ||
place.GetType() == target_place.GetType() ? target_place : place); | ||
|
||
#ifdef PADDLE_WITH_DISTRIBUTE | ||
bool run_auto_parallel = AllInputsAreDistTensor(src); | ||
bool rank_is_in_current_mesh = false; | ||
if (run_auto_parallel) { | ||
auto mesh = std::static_pointer_cast<phi::distributed::DistTensor>( | ||
src.impl())->dist_attr().process_mesh(); | ||
rank_is_in_current_mesh = phi::distributed::IsCurRankInMesh(mesh); | ||
|
||
// 1. InferSpmd (Infer DistAttr of Inputs&Outputs) | ||
auto meta_dist_input_x = MakeDistMetaTensor(*src.impl()); | ||
auto spmd_info = phi::distributed::ElementwiseUnaryInferSpmd( | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 这里咱们是不是直接创建一个输出,把输入的设置上去就行,不用走InferSpmd,这个elementwise的逻辑还挺多的 There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 这里不会出现:这个Tensor不需要在这个卡上计算的情况吗? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 哦,我理解错了 There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. done, thx! |
||
meta_dist_input_x); | ||
|
||
// 2. Create API Output & Prepare Dist and Dense Output | ||
auto dist_out = SetKernelDistOutput(this, spmd_info.second[0]); | ||
auto dense_out = dist_out->unsafe_mutable_value(); | ||
if (!rank_is_in_current_mesh) { | ||
*dense_out = phi::DenseTensor( | ||
std::make_shared<phi::Allocation>(nullptr, | ||
0, phi::distributed::GetDefaultPlace()), | ||
phi::DenseTensorMeta()); | ||
} | ||
|
||
// 3. Infer DistTensor's Global Shape | ||
phi::MetaTensor meta_dist_out(dist_out); | ||
phi::UnchangedInferMeta(MakeMetaTensor(*(src.impl_)), &meta_dist_out); | ||
|
||
if (rank_is_in_current_mesh) { | ||
// 4. Select Kernel | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 这里的注释感觉和代码对不上,是不是可以删一删 |
||
|
||
// 5. Reshard Input | ||
auto dist_input_x = ReshardApiInputToKernelInput( | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 好像也不需要reshard,原先的值给输出copy过去就行 There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. done, thx! |
||
dev_ctx, src, spmd_info.first[0]); | ||
|
||
// 6. PrepareData (DataTransform & Prepare Dense Input) | ||
auto input_x = &dist_input_x->value(); | ||
|
||
// 7. Infer Local DenseTensor Meta | ||
phi::MetaTensor meta_dense_out(dense_out); | ||
phi::UnchangedInferMeta(MakeMetaTensor(*input_x), &meta_dense_out); | ||
|
||
// 8. DenseTensor Kernel Call | ||
phi::Copy(*dev_ctx, *input_x, target_place, blocking, dense_out); | ||
|
||
// 9. Reshard Partial Output to Replicated (Temporary) | ||
ReshardOutputPartialAxisToReplicated(dev_ctx, dist_out); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 这里也可以省略 There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. done, thx! |
||
} | ||
|
||
// 10. Set Output Dist Attr For Default Impl | ||
// API `copy_` does not need to set DistAttr for output. | ||
return; | ||
} | ||
#endif | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 这里的条件宏是不是没必要呀 |
||
|
||
if (kernel_type == KernelType::DENSE_TENSOR_KENREL) { | ||
SetKernelOutput(this); | ||
phi::MetaTensor meta_out(impl_.get()); | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,57 @@ | ||
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
|
||
|
||
import numpy as np | ||
from semi_auto_parallel_simple_net import MPDemoNetRecompute | ||
|
||
import paddle | ||
import paddle.distributed as dist | ||
from paddle import nn | ||
|
||
BATCH_SIZE = 16 | ||
BATCH_NUM = 4 | ||
IMAGE_SIZE = 784 | ||
CLASS_NUM = 10 | ||
|
||
|
||
def run_dynamic(layer, image, label): | ||
# create loss | ||
loss_fn = nn.MSELoss() | ||
# run forward and backward | ||
image = paddle.to_tensor(image) | ||
image.stop_gradient = False | ||
out = layer(image) | ||
|
||
label = paddle.to_tensor(label) | ||
loss = loss_fn(out, label) | ||
|
||
loss.backward() | ||
return loss, layer.w0.grad, layer.w1.grad | ||
|
||
|
||
class TestSemiAutoParallelRecompute: | ||
def test_recompute(): | ||
mesh = dist.ProcessMesh([0, 1], dim_names=["x"]) | ||
image = np.random.random([BATCH_SIZE, IMAGE_SIZE]).astype('float32') | ||
label = np.random.random([BATCH_SIZE, CLASS_NUM]).astype('float32') | ||
w0 = np.random.random([IMAGE_SIZE, IMAGE_SIZE]).astype('float32') | ||
w1 = np.random.random([IMAGE_SIZE, CLASS_NUM]).astype('float32') | ||
run_dynamic( | ||
layer=MPDemoNetRecompute(w0, w1, mesh), image=image, label=label | ||
) | ||
|
||
|
||
if __name__ == "__main__": | ||
TestSemiAutoParallelRecompute.test_recompute() |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
auto parallel可以放在DenseTensor kernel的分支,是DenseTensor Kernel的一个扩展机制
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
done, thx!