Skip to content

Commit

Permalink
【PIR Dist Op Reg No.22】 reg pull_sparse_v2 (PaddlePaddle#63014)
Browse files Browse the repository at this point in the history
* feat(pir): reg pull_sparse_v2

* feat(pir): reg pull_sparse_v2

* feat(pir): reg pull_sparse_v2

* feat(pir): reg pull_sparse_v2

* feat(pir): reg pull_sparse_v2

* feat(pir): reg pull_sparse_v2

* feat(pir): reg pull_sparse_v2

* feat(pir): reg pull_sparse_v2

* feat(pir): reg pull_sparse_v2

* feat(pir): reg pull_sparse_v2

* feat(pir): reg pull_sparse_v2

* feat(pir): reg pull_sparse_v2

* feat(pir): reg pull_sparse_v2

* feat(pir): reg pull_sparse_v2
  • Loading branch information
xiaoyewww authored and co63oc committed May 10, 2024
1 parent 7898da1 commit 9831401
Show file tree
Hide file tree
Showing 8 changed files with 131 additions and 2 deletions.
1 change: 1 addition & 0 deletions paddle/fluid/pir/dialect/op_generator/ops_api_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -195,6 +195,7 @@
'prune_gate_by_capacity',
'push_sparse_v2',
'push_sparse_v2_',
'pull_sparse_v2',
'partial_concat',
'partial_send',
'partial_recv',
Expand Down
12 changes: 11 additions & 1 deletion paddle/fluid/pir/dialect/operator/ir/ops.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1362,6 +1362,16 @@
optional : w
backward: push_gpups_sparse

- op : pull_sparse_v2
args : (Tensor[] ids, Tensor[] w, int embedding_dim = 11, int table_id = 0, str accessor_class = "", str ctr_label_name = "", int padding_id = 0, bool scale_sparse_grad = true, str[] input_names = {}, bool is_distributed = true)
output : Tensor[](out){w.size()}
infer_meta :
func : PullSparseV2InferMeta
kernel :
func : pull_sparse_v2
data_type : DataType::FLOAT32
backward : pull_sparse_v2_grad

- op : push_dense
args : (Tensor[] ids, int table_id = -1, float scale_data_norm = -1.0f, str[] input_names = {})
output :
Expand All @@ -1373,7 +1383,7 @@
data_type : DataType::FLOAT32

- op : push_sparse_v2
args : (Tensor[] ids, Tensor[] w, Tensor[] out_grad_in, int embeddingdim = 11, int tableid = 0, str accessorclass = "", str ctrlabelname = "", int paddingid = 0, bool scalesparsegrad = true, str[] inputnames = {}, bool is_distributed = true)
args : (Tensor[] ids, Tensor[] w, Tensor[] out_grad_in, int embedding_dim = 11, int table_id = 0, str accessor_class = "", str ctr_label_name = "", int padding_id = 0, bool scale_sparse_grad = true, str[] input_names = {}, bool is_distributed = true)
output : Tensor[](out_grad_out){out_grad_in.size()}
infer_meta :
func : UnchangedMultiInferMeta
Expand Down
6 changes: 6 additions & 0 deletions paddle/phi/api/yaml/backward.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1870,6 +1870,12 @@
data_type : x
optional : boxes_num

- backward_op : pull_sparse_v2_grad
forward : pull_sparse_v2 (Tensor[] ids, Tensor[] w, int embedding_dim = 11, int table_id = 0, str accessor_class = "", str ctrlabel_name = "", int padding_id = 0, bool scale_sparse_grad = true, str[] input_names = {}, bool is_distributed = true) -> Tensor[](out)
args : (Tensor[] ids, Tensor[] w, Tensor[] out_grad, int embedding_dim = 11, int table_id = 0, str accessor_class = "", str ctrlabel_name = "", int padding_id = 0, bool scale_sparse_grad = true, str[] input_names = {}, bool is_distributed = true)
output : Tensor[](out_grad_out)
invoke : push_sparse_v2(ids, w, out_grad, embedding_dim, table_id, accessor_class, ctrlabel_name, padding_id, scale_sparse_grad, input_names, is_distributed)

- backward_op : push_gpups_sparse
forward : pull_gpups_sparse (Tensor w, Tensor[] ids, int[] size={}, bool is_sparse=false, bool is_distributed=false) -> Tensor[](out)
args : (Tensor[] ids, Tensor[] out_grad, int[] size, bool is_sparse, bool is_distributed)
Expand Down
10 changes: 9 additions & 1 deletion paddle/phi/api/yaml/op_compat.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2686,6 +2686,14 @@
outputs :
out : Out

- op : pull_sparse_v2
inputs :
{ ids : Ids, w : W}
outputs :
out : Out
extra :
attrs : [int embedding_dim = 11, int table_id = 0, str accessor_class = "", str ctr_label_name = "", int padding_id = 0, bool scale_sparse_grad = true, 'str[] input_names = {}', bool is_distributed = true]

- op : push_dense
inputs :
ids : Ids
Expand All @@ -2698,7 +2706,7 @@
outputs :
out : Out
extra :
attrs : [int embeddingdim = 11, int tableid = 0, str accessorclass = "", str ctrlabelname = "", int paddingid = 0, bool scalesparsegrad = true, 'str[] inputnames = {}', bool is_distributed = true]
attrs : [int embedding_dim = 11, int table_id = 0, str accessor_class = "", str ctr_labe_lname = "", int padding_id = 0, bool scales_parse_grad = true, 'str[] input_names = {}', bool is_distributed = true]

- op : put_along_axis
backward : put_along_axis_grad
Expand Down
38 changes: 38 additions & 0 deletions paddle/phi/infermeta/binary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2787,6 +2787,44 @@ void PullGpupsSparseInferMeta(const MetaTensor& w,
}
}

void PullSparseV2InferMeta(const std::vector<const MetaTensor*>& ids,
const std::vector<const MetaTensor*>& w,
int embedding_dim,
int table_id,
const std::string& accessor_class,
const std::string& ctrlabel_name,
int padding_id,
bool scale_sparse_grad,
const std::vector<std::string>& input_names,
bool is_distributed,
std::vector<MetaTensor*> out) {
PADDLE_ENFORCE_GE(ids.size(),
1UL,
phi::errors::InvalidArgument(
"Input(Ids) of PullSparseV2Op can not be null"));
PADDLE_ENFORCE_GE(out.size(),
1UL,
phi::errors::InvalidArgument(
"Output(Out) of PullSparseV2Op can not be null"));

auto hidden_size = embedding_dim;
const size_t n_ids = ids.size();
std::vector<phi::DDim> outs_dims;
outs_dims.resize(n_ids);
for (size_t i = 0; i < n_ids; ++i) {
const auto ids_dims = ids[i]->dims();
auto out_dim = common::vectorize(ids_dims);
out_dim.push_back(hidden_size);
outs_dims[i] = common::make_ddim(out_dim);
}

for (size_t i = 0; i < n_ids; ++i) {
out[i]->set_dims(outs_dims[i]);
out[i]->share_lod(*ids[i], i);
out[i]->set_dtype(w[i]->dtype());
}
}

void ApplyPerChannelScaleInferMeta(const MetaTensor& x,
const MetaTensor& scales,
MetaTensor* out) {
Expand Down
12 changes: 12 additions & 0 deletions paddle/phi/infermeta/binary.h
Original file line number Diff line number Diff line change
Expand Up @@ -480,6 +480,18 @@ void PullGpupsSparseInferMeta(const MetaTensor& w,
bool is_distributed,
std::vector<MetaTensor*> out);

void PullSparseV2InferMeta(const std::vector<const MetaTensor*>& ids,
const std::vector<const MetaTensor*>& w,
int embedding_dim,
int table_id,
const std::string& accessor_class,
const std::string& ctrlabel_name,
int padding_id,
bool scale_sparse_grad,
const std::vector<std::string>& input_names,
bool is_distributed,
std::vector<MetaTensor*> out);

void RepeatInterleaveWithTensorIndexInferMeta(const MetaTensor& x,
const MetaTensor& repeats,
int dim,
Expand Down
1 change: 1 addition & 0 deletions test/ir/pir/translator/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ list(APPEND DISTRIBUTED_OP_TRANSLATOR_TEST test_partial_recv_translator)
list(APPEND DISTRIBUTED_OP_TRANSLATOR_TEST
test_prune_gate_by_capacity_translator)
list(APPEND DISTRIBUTED_OP_TRANSLATOR_TEST test_pull_gpups_sparse_translator)
list(APPEND DISTRIBUTED_OP_TRANSLATOR_TEST test_pull_sparse_v2_translator)
list(APPEND DISTRIBUTED_OP_TRANSLATOR_TEST test_random_routing_translator)
list(APPEND DISTRIBUTED_OP_TRANSLATOR_TEST test_limit_by_capacity_translator)
list(APPEND DISTRIBUTED_OP_TRANSLATOR_TEST test_global_scatter_translator)
Expand Down
53 changes: 53 additions & 0 deletions test/ir/pir/translator/test_pull_sparse_v2_translator.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest

import test_op_translator

import paddle
from paddle.base import core
from paddle.base.layer_helper import LayerHelper


class TestPullSparseV2OpTranslator(
test_op_translator.TestOpWithBackwardTranslator
):
def setUp(self):
self.place = core.Place()
self.place.set_place(paddle.CPUPlace())
self.new_scope = paddle.static.Scope()
self.main_program = paddle.static.Program()
self.forward_op_type = "pull_sparse_v2"
self.backward_op_type = "push_sparse_v2"

def append_op(self):
self.op_type = "pull_sparse_v2"
ids = paddle.ones(shape=(1,), dtype='int64')
w = paddle.ones(shape=(1,), dtype='int64')
out = paddle.ones(shape=(1,), dtype='int64')
helper = LayerHelper(self.op_type)
helper.append_op(
type=self.op_type,
inputs={"Ids": [ids], "W": [w]},
outputs={"Out": [out]},
)
return out

def test_translator(self):
self.check()


if __name__ == "__main__":
unittest.main()

0 comments on commit 9831401

Please sign in to comment.