We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Embedding op isn't working in EmitC path.
IR:
func.func @embedding(%arg0: tensor<32x32xbf16>, %arg1: tensor<512x128xbf16>) -> tensor<32x32x128xbf16> { %0 = tensor.empty() : tensor<32x32x128xbf16> %1 = "ttir.embedding"(%arg0, %arg1, %0) : (tensor<32x32xbf16>, tensor<512x128xbf16>, tensor<32x32x128xbf16>) -> tensor<32x32x128xbf16> return %1 : tensor<32x32x128xbf16> }
gets translated to following C++:
#include "ttnn-precompiled.hpp" ttnn::Tensor embedding(ttnn::Tensor v1, ttnn::Tensor v2) { ttnn::IDevice* v3 = ttnn::DeviceGetter::getInstance(); ttnn::MemoryConfig v4 = ttnn::MemoryConfig(ttnn::TensorMemoryLayout::INTERLEAVED, ttnn::BufferType::DRAM); ttnn::Tensor v5 = ttnn::to_device(v2, v3, v4); ttnn::Tensor v6 = ttnn::to_layout(v5, ttnn::Layout::TILE, std::nullopt, std::nullopt, static_cast<::ttnn::IDevice *>(nullptr)); ttnn::deallocate(v5, false); ttnn::SimpleShape v7 = ttnn::SimpleShape(tt::tt_metal::LegacyShape({32, 32, 128, })); ttnn::MemoryConfig v8 = ttnn::MemoryConfig(ttnn::TensorMemoryLayout::INTERLEAVED, ttnn::BufferType::DRAM); ttnn::Tensor v9 = ttnn::empty(v7, ttnn::DataType::BFLOAT16, ttnn::Layout::TILE, v3, v8); ttnn::Tensor v10 = ttnn::typecast(v1, ttnn::DataType::UINT32); ttnn::MemoryConfig v11 = ttnn::MemoryConfig(ttnn::TensorMemoryLayout::INTERLEAVED, ttnn::BufferType::DRAM); ttnn::Tensor v12 = ttnn::to_device(v10, v3, v11); ttnn::deallocate(v10, false); ttnn::Tensor v13 = ttnn::embedding(v12, v6); ttnn::deallocate(v12, false); ttnn::deallocate(v6, false); ttnn::Tensor v14 = ttnn::from_device(v13); ttnn::deallocate(v9, false); ttnn::Tensor v15 = ttnn::to_layout(v14, ttnn::Layout::ROW_MAJOR, std::nullopt, std::nullopt, static_cast<::ttnn::IDevice *>(nullptr)); ttnn::deallocate(v14, false); return v15; } std::tuple<ttnn::Tensor, ttnn::Tensor> createInputsFor_embedding() { ttnn::SimpleShape v1 = ttnn::SimpleShape(tt::tt_metal::LegacyShape({32, 32, })); ttnn::Tensor v2 = ttnn::ones(v1, ttnn::DataType::BFLOAT16, ttnn::Layout::ROW_MAJOR, std::nullopt, std::nullopt); ttnn::SimpleShape v3 = ttnn::SimpleShape(tt::tt_metal::LegacyShape({512, 128, })); ttnn::Tensor v4 = ttnn::ones(v3, ttnn::DataType::BFLOAT16, ttnn::Layout::ROW_MAJOR, std::nullopt, std::nullopt); return std::make_tuple(v2, v4); } int32_t main() { ttnn::Tensor v1; ttnn::Tensor v2; std::tie(v1, v2) = createInputsFor_embedding(); ttnn::Tensor v3 = embedding(v1, v2); int32_t v4 = 0; return v4; }
Line:
ttnn::Tensor v10 = ttnn::typecast(v1, ttnn::DataType::UINT32);
throws at
third_party/tt-metal/src/tt-metal/ttnn/cpp/ttnn/tensor/tensor.hpp:328 TT_THROW("Cannot get the device from a tensor with host storage");
This works in regular TTRT path, due to a workaround in
runtime/lib/ttnn/operations/layout/typecast.cpp:24 out = ::ttnn::to_dtype(inputTensor, targetDataType);
The text was updated successfully, but these errors were encountered:
unsupport embedding.mlir until #1938 is fixed
dd9db46
sdjordjevicTT
No branches or pull requests
Embedding op isn't working in EmitC path.
IR:
gets translated to following C++:
Line:
throws at
This works in regular TTRT path, due to a workaround in
The text was updated successfully, but these errors were encountered: