Skip to content

Commit

Permalink
#0: updated test
Browse files Browse the repository at this point in the history
  • Loading branch information
dmakoviichuk-tt committed Aug 21, 2024
1 parent dfc58e4 commit 4eaf57f
Show file tree
Hide file tree
Showing 5 changed files with 33 additions and 34 deletions.
8 changes: 4 additions & 4 deletions tests/ttnn/unit_tests/test_graph_capture.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,8 @@ def test_graph_capture(device, scalar, size, mode):
output_tensor = ttnn.to_torch(output_tensor, torch_rank=1)
captured_graph = ttnn.graph.end_graph_capture()

assert captured_graph[0]["node_name"] == "capture_start"
assert captured_graph[1]["node_name"] == "function_start"
assert captured_graph[0]["node_type"] == "capture_start"
assert captured_graph[1]["node_type"] == "function_start"
assert captured_graph[1]["params"]["name"] == "tt::tt_metal::detail::convert_python_tensor_to_tt_tensor"
assert captured_graph[-2]["node_name"] == "buffer_deallocate"
assert captured_graph[-1]["node_name"] == "capture_end"
assert captured_graph[-2]["node_type"] == "buffer_deallocate"
assert captured_graph[-1]["node_type"] == "capture_end"
2 changes: 1 addition & 1 deletion ttnn/cpp/ttnn/graph/graph_consts.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

namespace ttnn::graph {
// Vertex struct
constexpr auto kNodeName = "node_name";
constexpr auto kNodeType = "node_type";
constexpr auto kCounter = "counter";
constexpr auto kConnections = "connections";
constexpr auto kParams = "params";
Expand Down
25 changes: 12 additions & 13 deletions ttnn/cpp/ttnn/graph/graph_processor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ std::type_info const& get_type_in_var(const Variant& v){
nlohmann::json to_json(const ttnn::graph::GraphProcessor::Vertex& data) {
nlohmann::json j;
j[ttnn::graph::kCounter] = data.counter;
j[ttnn::graph::kNodeName] = data.node_name;
j[ttnn::graph::kNodeType] = data.node_type;
j[ttnn::graph::kParams] = data.params;
j[ttnn::graph::kConnections] = data.connections;
return j;
Expand Down Expand Up @@ -107,7 +107,7 @@ void GraphProcessor::track_allocate(tt::tt_metal::Buffer* buffer, bool bottom_up
{
graph.push_back(Vertex{
.counter = counter,
.node_name = kNodeBufferAllocate,
.node_type = kNodeBufferAllocate,
.params = params,
.connections = {buf_id}
});
Expand All @@ -127,7 +127,7 @@ void GraphProcessor::track_deallocate(tt::tt_metal::Buffer* buffer) {
{
graph.push_back(Vertex{
.counter = counter,
.node_name = kNodeBufferDeallocate,
.node_type = kNodeBufferDeallocate,
.params = params,
.connections = {buffer_idx}
});
Expand All @@ -147,7 +147,7 @@ void GraphProcessor::track_allocate_cb(const CoreRangeSet &core_range_set, uint6
{
graph.push_back({
.counter = counter,
.node_name = kNodeCBAllocate,
.node_type = kNodeCBAllocate,
.params = params,
.connections = {}
});
Expand All @@ -162,7 +162,7 @@ void GraphProcessor::track_deallocate_cb() {
{
graph.push_back(Vertex{
.counter = counter,
.node_name = kNodeCBDeallocateAll,
.node_type = kNodeCBDeallocateAll,
.params = {},
.connections = {current_op_id.top()}
});
Expand Down Expand Up @@ -195,7 +195,7 @@ void GraphProcessor::track_function_start(std::string_view function_name, std::s
{
graph.push_back(Vertex{
.counter = counter,
.node_name = kNodeFunctionStart,
.node_type = kNodeFunctionStart,
.params = params,
.connections = {/*current_op_id.top()*/}
});
Expand All @@ -208,7 +208,7 @@ void GraphProcessor::track_function_start(std::string_view function_name, std::s

}

for (int i = 0; auto& any : input_parameters) {
for (auto& any : input_parameters) {
std::type_index any_type = any.type();
auto it = begin_function_any_map.find(any_type);

Expand All @@ -217,7 +217,6 @@ void GraphProcessor::track_function_start(std::string_view function_name, std::s
} else {
tt::log_info("input any type name ignored: {}", demangle(any.type().name()));
}
i++;
}

}
Expand All @@ -230,7 +229,7 @@ void GraphProcessor::track_function_end_impl() {
{
graph.push_back(Vertex{
.counter = counter,
.node_name = kNodeFunctionEnd,
.node_type = kNodeFunctionEnd,
.params = {{kName, name}},
.connections = {}
});
Expand Down Expand Up @@ -288,7 +287,7 @@ int GraphProcessor::add_tensor(const Tensor& t) {
if (id_to_counter.count(alloc_id) == 0) {
graph.push_back(Vertex{
.counter = tensor_counter,
.node_name = kNodeTensor,
.node_type = kNodeTensor,
.params = params,
.connections = {}
});
Expand Down Expand Up @@ -317,7 +316,7 @@ int GraphProcessor::add_buffer(tt::tt_metal::Buffer* buffer) {

graph.push_back(Vertex{
.counter = counter,
.node_name = kNodeBuffer,
.node_type = kNodeBuffer,
.params = params,
.connections = {}
});
Expand Down Expand Up @@ -429,7 +428,7 @@ void GraphProcessor::begin_capture(RunMode mode) {
id_to_counter.clear();
graph.push_back(Vertex{
.counter = 0,
.node_name = kNodeCaptureStart,
.node_type = kNodeCaptureStart,
.params = {},
.connections = {}
});
Expand All @@ -446,7 +445,7 @@ nlohmann::json GraphProcessor::end_capture() {
int counter = graph.size();
graph.push_back(Vertex{
.counter = counter,
.node_name = kNodeCaptureEnd,
.node_type = kNodeCaptureEnd,
.params = {},
.connections = {}
});
Expand Down
2 changes: 1 addition & 1 deletion ttnn/cpp/ttnn/graph/graph_processor.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ namespace ttnn::graph {

struct Vertex {
int counter = 0;
std::string node_name;
std::string node_type;
std::unordered_map<std::string, std::string> params;
std::vector<int> connections;
};
Expand Down
30 changes: 15 additions & 15 deletions ttnn/cpp/ttnn/graph/graph_trace_utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -56,13 +56,13 @@ uint32_t extract_peak_L1_memory_usage(const nlohmann::json& trace) {
for (size_t i = 0; i < trace.size(); ++i) {
const auto& v = trace[i];

if (v[kNodeName] == kNodeFunctionStart) {
if (v[kNodeType] == kNodeFunctionStart) {
if (current_op.empty()) {
while (++i < trace.size()) {
const auto& inner_v = trace[i];
if (inner_v[kNodeName] == "buffer" && inner_v[kParams][kType] == "L1") {
if (inner_v[kNodeType] == "buffer" && inner_v[kParams][kType] == "L1") {
total_buffer += std::stoi(inner_v[kParams][kSize].get<std::string>());
} else if (inner_v[kNodeName] == kNodeTensor) {
} else if (inner_v[kNodeType] == kNodeTensor) {
continue;
} else {
break;
Expand All @@ -71,19 +71,19 @@ uint32_t extract_peak_L1_memory_usage(const nlohmann::json& trace) {
--i; // adjust for loop increment
}
current_op.push_back(v[kParams][kName]);
} else if (v[kNodeName] == kNodeCBAllocate) {
} else if (v[kNodeType] == kNodeCBAllocate) {
total_cb += stoi(v[kParams][kSize].get<std::string>());
} else if (v[kNodeName] == kNodeCBDeallocateAll) {
} else if (v[kNodeType] == kNodeCBDeallocateAll) {
total_cb = 0;
} else if (v[kNodeName] == kNodeBufferAllocate && v[kParams][kType] == "L1") {
} else if (v[kNodeType] == kNodeBufferAllocate && v[kParams][kType] == "L1") {
total_buffer += stoi(v[kParams][kSize].get<std::string>());
} else if (v[kNodeName] == kNodeBufferDeallocate) {
} else if (v[kNodeType] == kNodeBufferDeallocate) {
auto connection = v[kConnections][0].get<int>();
auto buffer = trace[connection];
if(buffer[kParams][kType] == "L1") {
total_buffer -= stoi(buffer[kParams][kSize].get<std::string>());
}
} else if (v[kNodeName] == kNodeFunctionEnd) {
} else if (v[kNodeType] == kNodeFunctionEnd) {
current_op.pop_back();
}

Expand All @@ -106,10 +106,10 @@ std::pair<uint32_t, uint32_t> count_intermediate_and_output_tensors(const nlohma

for (int i = 0; i < trace.size(); ++i) {
const auto& v = trace[i];
if (v[kNodeName] == kNodeFunctionStart && !first_begin_found) {
if (v[kNodeType] == kNodeFunctionStart && !first_begin_found) {
first_begin_found = true;
first_begin_index = i;
} else if (v[kNodeName] == kNodeFunctionEnd) {
} else if (v[kNodeType] == kNodeFunctionEnd) {
last_end_found = true;
last_end_index = i;

Expand All @@ -126,7 +126,7 @@ std::pair<uint32_t, uint32_t> count_intermediate_and_output_tensors(const nlohma
auto connections = trace[last_end_index][kConnections].get<std::unordered_set<uint32_t>>();
for(auto index : connections) {
// It can be tensor or some other node like
if(trace[index][kNodeName] == kNodeTensor) {
if(trace[index][kNodeType] == kNodeTensor) {
output_tensors.insert(index);
}
}
Expand All @@ -147,7 +147,7 @@ std::vector<std::string> extract_calltrace(const nlohmann::json& trace){
const auto& v = trace[i];
i++;

if (v[kNodeName] == kNodeFunctionStart) {
if (v[kNodeType] == kNodeFunctionStart) {
op_calls.push_back(v[kParams][kName]);
}
}
Expand All @@ -161,7 +161,7 @@ std::unordered_set<uint32_t> extract_output_tensors(const nlohmann::json& trace)
auto find_function_end_node = [](const auto& trace) -> const nlohmann::json& {
for(int i = trace.size() - 1; i >= 0; --i) {
const auto& v = trace[i];
if (v[kNodeName] == kNodeFunctionEnd) {
if (v[kNodeType] == kNodeFunctionEnd) {
return v;
}
}
Expand All @@ -176,7 +176,7 @@ std::unordered_set<uint32_t> extract_output_tensors(const nlohmann::json& trace)
auto connections = function_end_node[kConnections].get<std::unordered_set<uint32_t>>();
for (const auto& output_id : connections) {
const auto& output_node = trace[output_id];
if (output_node[kNodeName] == kNodeTensor) {
if (output_node[kNodeType] == kNodeTensor) {
output.insert(output_id);
}
}
Expand All @@ -193,7 +193,7 @@ std::vector<TensorInfo> extract_output_info(const nlohmann::json& trace)
auto output_tensors = extract_output_tensors(trace);

for (const auto& node : trace) {
if (node[kNodeName] != kNodeBuffer )
if (node[kNodeType] != kNodeBuffer )
continue;

auto connections = node[kConnections].get<std::unordered_set<uint32_t>>();
Expand Down

0 comments on commit 4eaf57f

Please sign in to comment.