Skip to content

Commit

Permalink
[Improvement] Added some graph names in the separate file (#11732)
Browse files Browse the repository at this point in the history
* added graph tracker string constants
* updated tests
  • Loading branch information
dmakoviichuk-tt authored Aug 22, 2024
1 parent 737ded5 commit 731ef35
Show file tree
Hide file tree
Showing 9 changed files with 120 additions and 83 deletions.
8 changes: 4 additions & 4 deletions tests/ttnn/unit_tests/test_graph_capture.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,8 @@ def test_graph_capture(device, scalar, size, mode):
output_tensor = ttnn.to_torch(output_tensor, torch_rank=1)
captured_graph = ttnn.graph.end_graph_capture()

assert captured_graph[0]["name"] == "capture_start"
assert captured_graph[1]["name"] == "function_start"
assert captured_graph[0]["node_type"] == "capture_start"
assert captured_graph[1]["node_type"] == "function_start"
assert captured_graph[1]["params"]["name"] == "tt::tt_metal::detail::convert_python_tensor_to_tt_tensor"
assert captured_graph[-2]["name"] == "buffer_deallocate"
assert captured_graph[-1]["name"] == "capture_end"
assert captured_graph[-2]["node_type"] == "buffer_deallocate"
assert captured_graph[-1]["node_type"] == "capture_end"
2 changes: 1 addition & 1 deletion ttnn/cpp/pybind11/__init__.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ PYBIND11_MODULE(_ttnn, module) {
module.doc() = "Python bindings for TTNN";

auto m_graph = module.def_submodule("graph", "Contains graph capture functions");
ttnn::py_graph_module(m_graph);
ttnn::graph::py_graph_module(m_graph);

auto m_deprecated = module.def_submodule("deprecated", "Contains deprecated tt_lib bindings for tensor, device, profiler");
tt::bind_deprecated(m_deprecated);
Expand Down
34 changes: 34 additions & 0 deletions ttnn/cpp/ttnn/graph/graph_consts.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
// SPDX-FileCopyrightText: © 2024 Tenstorrent Inc.
//
// SPDX-License-Identifier: Apache-2.0

#pragma once

namespace ttnn::graph {
// Vertex struct
constexpr auto kNodeType = "node_type";
constexpr auto kCounter = "counter";
constexpr auto kConnections = "connections";
constexpr auto kParams = "params";
// params keys
constexpr auto kName = "name";
constexpr auto kInputs = "inputs";
constexpr auto kTensorId = "tensor_id";
constexpr auto kType= "type";
constexpr auto kAddress = "address";
constexpr auto kSize = "size";
constexpr auto kLayout= "layout";
constexpr auto kShape = "shape";

// node names
constexpr auto kNodeBuffer = "buffer";
constexpr auto kNodeBufferAllocate = "buffer_allocate";
constexpr auto kNodeBufferDeallocate = "buffer_deallocate";
constexpr auto kNodeTensor = "tensor";
constexpr auto kNodeCBAllocate = "circular_buffer_allocate";
constexpr auto kNodeCBDeallocateAll = "circular_buffer_deallocate_all";\
constexpr auto kNodeFunctionStart = "function_start";
constexpr auto kNodeFunctionEnd = "function_end";
constexpr auto kNodeCaptureStart = "capture_start";
constexpr auto kNodeCaptureEnd = "capture_end";
}
12 changes: 6 additions & 6 deletions ttnn/cpp/ttnn/graph/graph_operation_queries.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,31 +14,31 @@ namespace ttnn::graph {

template <class Callable>
auto query_trace(Callable&& callable) {
ttnn::GraphProcessor::begin_graph_capture(tt::tt_metal::IGraphProcessor::RunMode::NO_DISPATCH);
GraphProcessor::begin_graph_capture(tt::tt_metal::IGraphProcessor::RunMode::NO_DISPATCH);
{
auto output = callable();
}
auto json_trace = ttnn::GraphProcessor::end_graph_capture();
auto json_trace = GraphProcessor::end_graph_capture();
return json_trace;
}

template <class Callable>
auto query_peak_L1_memory_usage(Callable&& callable) {
ttnn::GraphProcessor::begin_graph_capture(tt::tt_metal::IGraphProcessor::RunMode::NO_DISPATCH);
GraphProcessor::begin_graph_capture(tt::tt_metal::IGraphProcessor::RunMode::NO_DISPATCH);
{
auto output = callable();
}
auto json_trace = ttnn::GraphProcessor::end_graph_capture();
auto json_trace = GraphProcessor::end_graph_capture();
return graph::extract_peak_L1_memory_usage(json_trace);
}

template <class Callable>
auto query_output_info(Callable&& callable) {
ttnn::GraphProcessor::begin_graph_capture(tt::tt_metal::IGraphProcessor::RunMode::NO_DISPATCH);
GraphProcessor::begin_graph_capture(tt::tt_metal::IGraphProcessor::RunMode::NO_DISPATCH);
{
auto output = callable();
}
auto json_trace = ttnn::GraphProcessor::end_graph_capture();
auto json_trace = GraphProcessor::end_graph_capture();
return graph::extract_output_info(json_trace);
}

Expand Down
75 changes: 38 additions & 37 deletions ttnn/cpp/ttnn/graph/graph_processor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
#include "ttnn/types.hpp"
#include "tt_metal/impl/buffers/circular_buffer.hpp"
#include "tt_metal/impl/program/program.hpp"

#include "ttnn/graph/graph_consts.hpp"
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <cxxabi.h>
Expand Down Expand Up @@ -53,16 +53,16 @@ std::type_info const& get_type_in_var(const Variant& v){
return std::visit( [](auto&&x)->decltype(auto){ return typeid(x); }, v );
}

nlohmann::json to_json(const ttnn::GraphProcessor::Vertex& data) {
nlohmann::json to_json(const ttnn::graph::GraphProcessor::Vertex& data) {
nlohmann::json j;
j["counter"] = data.counter;
j["name"] = data.name;
j["params"] = data.params;
j["connections"] = data.connections;
j[ttnn::graph::kCounter] = data.counter;
j[ttnn::graph::kNodeType] = data.node_type;
j[ttnn::graph::kParams] = data.params;
j[ttnn::graph::kConnections] = data.connections;
return j;
}

nlohmann::json to_json(const std::vector<ttnn::GraphProcessor::Vertex>& data) {
nlohmann::json to_json(const std::vector<ttnn::graph::GraphProcessor::Vertex>& data) {
nlohmann::json j = nlohmann::json::array();
for (const auto& item : data) {
j.push_back(to_json(item));
Expand All @@ -72,7 +72,8 @@ nlohmann::json to_json(const std::vector<ttnn::GraphProcessor::Vertex>& data) {

}

namespace ttnn {
namespace ttnn::graph {

GraphProcessor::GraphProcessor(RunMode mode) : run_mode(mode) {
begin_capture(mode);
begin_function_any_map[typeid(std::reference_wrapper<std::vector<Tensor>>)] = [ptr = this] (const std::any& val) mutable {ptr->begin_function_process_ref_vector(val);};
Expand All @@ -98,15 +99,15 @@ void GraphProcessor::track_allocate(tt::tt_metal::Buffer* buffer, bool bottom_up
auto counter = graph.size();

std::unordered_map<std::string, std::string> params = {
{"size", std::to_string(buffer->size())},
{"address", std::to_string(buffer->address())},
{"type", buffer->is_dram() ? "DRAM" : "L1"},
{"layout", tensorMemoryLayoutToString(buffer->buffer_layout())}
{kSize, std::to_string(buffer->size())},
{kAddress, std::to_string(buffer->address())},
{kType, buffer->is_dram() ? "DRAM" : "L1"},
{kLayout, tensorMemoryLayoutToString(buffer->buffer_layout())}
};
{
graph.push_back(Vertex{
.counter = counter,
.name = "buffer_allocate",
.node_type = kNodeBufferAllocate,
.params = params,
.connections = {buf_id}
});
Expand All @@ -119,14 +120,14 @@ void GraphProcessor::track_deallocate(tt::tt_metal::Buffer* buffer) {
auto counter = graph.size();
auto buffer_idx = add_buffer(buffer);
std::unordered_map<std::string, std::string> params = {
{"size", std::to_string(buffer->size())},
{"type", buffer->is_dram() ? "DRAM" : "L1"},
{"layout", tensorMemoryLayoutToString(buffer->buffer_layout())}
{kSize, std::to_string(buffer->size())},
{kType, buffer->is_dram() ? "DRAM" : "L1"},
{kLayout, tensorMemoryLayoutToString(buffer->buffer_layout())}
};
{
graph.push_back(Vertex{
.counter = counter,
.name = "buffer_deallocate",
.node_type = kNodeBufferDeallocate,
.params = params,
.connections = {buffer_idx}
});
Expand All @@ -138,15 +139,15 @@ void GraphProcessor::track_deallocate(tt::tt_metal::Buffer* buffer) {
void GraphProcessor::track_allocate_cb(const CoreRangeSet &core_range_set, uint64_t addr, uint64_t size) {
const std::lock_guard<std::mutex> lock(mutex);
std::unordered_map<std::string, std::string> params = {
{"size", std::to_string(size)},
{"address", std::to_string(addr)},
{kSize, std::to_string(size)},
{kAddress, std::to_string(addr)},
{"core_range_set", core_range_set.str()}
};
auto counter = graph.size();
{
graph.push_back({
.counter = counter,
.name = "circular_buffer_allocate",
.node_type = kNodeCBAllocate,
.params = params,
.connections = {}
});
Expand All @@ -161,7 +162,7 @@ void GraphProcessor::track_deallocate_cb() {
{
graph.push_back(Vertex{
.counter = counter,
.name = "circular_buffer_deallocate_all",
.node_type = kNodeCBDeallocateAll,
.params = {},
.connections = {current_op_id.top()}
});
Expand All @@ -187,14 +188,14 @@ void GraphProcessor::track_function_start(std::string_view function_name, std::s
const std::lock_guard<std::mutex> lock(mutex);
tt::log_info("Begin op: {}", function_name);
std::unordered_map<std::string, std::string> params = {
{"inputs", std::to_string(input_parameters.size())},
{"name", std::string(function_name)},
{kInputs, std::to_string(input_parameters.size())},
{kName, std::string(function_name)},
};
auto counter = graph.size();
{
graph.push_back(Vertex{
.counter = counter,
.name = "function_start",
.node_type = kNodeFunctionStart,
.params = params,
.connections = {/*current_op_id.top()*/}
});
Expand All @@ -207,7 +208,7 @@ void GraphProcessor::track_function_start(std::string_view function_name, std::s

}

for (int i = 0; auto& any : input_parameters) {
for (auto& any : input_parameters) {
std::type_index any_type = any.type();
auto it = begin_function_any_map.find(any_type);

Expand All @@ -216,21 +217,20 @@ void GraphProcessor::track_function_start(std::string_view function_name, std::s
} else {
tt::log_info("input any type name ignored: {}", demangle(any.type().name()));
}
i++;
}

}

void GraphProcessor::track_function_end_impl() {
auto name = graph[current_op_id.top()].params["name"];
auto name = graph[current_op_id.top()].params[kName];
tt::log_info("End op: {}", name);

auto counter = graph.size();
{
graph.push_back(Vertex{
.counter = counter,
.name = fmt::format("function_end"),
.params = {{"name", name}},
.node_type = kNodeFunctionEnd,
.params = {{kName, name}},
.connections = {}
});
graph[current_op_id.top()].connections.push_back(counter);
Expand Down Expand Up @@ -281,12 +281,13 @@ int GraphProcessor::add_tensor(const Tensor& t) {
oss << shape;
std::string shape_str = oss.str();
std::unordered_map<std::string, std::string> params = {
{"shape", shape_str},
{kShape, shape_str},
{kTensorId, std::to_string(tensors_used)}
};
if (id_to_counter.count(alloc_id) == 0) {
graph.push_back(Vertex{
.counter = tensor_counter,
.name = fmt::format("tensor[{}]", tensors_used),
.node_type = kNodeTensor,
.params = params,
.connections = {}
});
Expand All @@ -308,14 +309,14 @@ int GraphProcessor::add_buffer(tt::tt_metal::Buffer* buffer) {
auto counter = id_to_counter.count(buffer_alloc_id) > 0 ? id_to_counter[buffer_alloc_id] : graph.size();
if (id_to_counter.count(buffer_alloc_id) == 0) {
std::unordered_map<std::string, std::string> params = {
{"size", std::to_string(buffer->size())},
{"type", buffer->is_dram() ? "DRAM" : "L1"},
{"layout", tensorMemoryLayoutToString(buffer->buffer_layout())}
{kSize, std::to_string(buffer->size())},
{kType, buffer->is_dram() ? "DRAM" : "L1"},
{kLayout, tensorMemoryLayoutToString(buffer->buffer_layout())}
};

graph.push_back(Vertex{
.counter = counter,
.name = "buffer",
.node_type = kNodeBuffer,
.params = params,
.connections = {}
});
Expand Down Expand Up @@ -427,7 +428,7 @@ void GraphProcessor::begin_capture(RunMode mode) {
id_to_counter.clear();
graph.push_back(Vertex{
.counter = 0,
.name = "capture_start",
.node_type = kNodeCaptureStart,
.params = {},
.connections = {}
});
Expand All @@ -444,7 +445,7 @@ nlohmann::json GraphProcessor::end_capture() {
int counter = graph.size();
graph.push_back(Vertex{
.counter = counter,
.name = "capture_end",
.node_type = kNodeCaptureEnd,
.params = {},
.connections = {}
});
Expand Down
4 changes: 2 additions & 2 deletions ttnn/cpp/ttnn/graph/graph_processor.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
#include <unordered_map>
#include <functional>
#include <any>
namespace ttnn {
namespace ttnn::graph {

class ProcessorHooks : public tt::tt_metal::IGraphHooks {
private:
Expand Down Expand Up @@ -59,7 +59,7 @@ namespace ttnn {

struct Vertex {
int counter = 0;
std::string name;
std::string node_type;
std::unordered_map<std::string, std::string> params;
std::vector<int> connections;
};
Expand Down
2 changes: 1 addition & 1 deletion ttnn/cpp/ttnn/graph/graph_pybind.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
#include "graph_processor.hpp"


namespace ttnn {
namespace ttnn::graph {

namespace py = pybind11;
using IGraphProcessor = tt::tt_metal::IGraphProcessor;
Expand Down
2 changes: 1 addition & 1 deletion ttnn/cpp/ttnn/graph/graph_pybind.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
#pragma once
#include "pybind11/pybind_fwd.hpp"

namespace ttnn {
namespace ttnn::graph {

void py_graph_module(pybind11::module& m);

Expand Down
Loading

0 comments on commit 731ef35

Please sign in to comment.