From 785699938161ac65d5fc3b87c4d67cac1395f8fc Mon Sep 17 00:00:00 2001 From: Yeounoh Chung Date: Fri, 15 Mar 2024 09:59:18 -0700 Subject: [PATCH] Revert "Skip the execution if all Pending IRs are device data (#6642)" This reverts commit 709622020872b4a25fcc771ae1f3b491960fc82c. --- torch_xla/csrc/xla_graph_executor.cpp | 23 +++-------------------- 1 file changed, 3 insertions(+), 20 deletions(-) diff --git a/torch_xla/csrc/xla_graph_executor.cpp b/torch_xla/csrc/xla_graph_executor.cpp index 84b9eeba0142..a21fbf27f0fa 100644 --- a/torch_xla/csrc/xla_graph_executor.cpp +++ b/torch_xla/csrc/xla_graph_executor.cpp @@ -629,26 +629,9 @@ XLAGraphExecutor::SyncTensorCollection XLAGraphExecutor::CollectSyncTensors( torch::lazy::Value ir_value = tensors[i]->CurrentIrValue(); if (ir_value) { if (ShouldSyncIrValue(ir_value)) { - auto device_data = torch_xla::DeviceData::Cast(ir_value.node.get()); - // If current tensor is cloned from another tensor, we want to assign - // a new XlaData to it after current execution. Cloned tensor might - // share the same storage with the origional tensor but origional - // tensor might alias its storage with the output. It is safer to - // allocate a new buffer for the cloned tensor. - if (device_data != nullptr && !tensors[i]->data()->is_cloned) { - // current IR is a devicedata, we don't need to include it as a - // result of the computation. Call `GetXlaData` to extract the - // XlaData from the DeviceData Node and reset the IR. We also want - // to update XlaData's tensorID to make it match with the current - // XLATensor. - tensors[i]->GetXlaData()->SetInfo( - std::make_shared( - tensors[i]->GetUniqueId(), /*=read_only=*/false)); - } else { - // Add only tensors which need to be synced. - coll.hash = torch::lazy::HashCombine(coll.hash, ir_value.hash()); - coll.indices.push_back(i); - } + // Add only tensors which need to be synced. + coll.hash = torch::lazy::HashCombine(coll.hash, ir_value.hash()); + coll.indices.push_back(i);ß } } else if (config.force_ltc_data) { // The tensor only has at::Tensor data. We need to queue it for a