Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

reduce vertex metadata size and remove vertex locks #8

Merged
merged 2 commits into from
Mar 21, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
126 changes: 52 additions & 74 deletions libgalois/include/galois/graphs/LS_LC_CSR_Graph.h
Original file line number Diff line number Diff line change
Expand Up @@ -72,13 +72,8 @@ class LS_LC_CSR_Graph : private boost::noncopyable {
using EdgeRange = boost::iterator_range<EdgeIterator>;

std::vector<VertexMetadata> m_vertices;

LargeVector<EdgeMetadata> m_edges[2];

// To avoid deadlock between updates and compaction, at least one vertex lock
// must be held to acquire m_edges_lock.
SpinLock m_edges_lock;

SpinLock m_edges_lock; // guards resizing of edges vectors
std::atomic_uint64_t m_edges_tail = ATOMIC_VAR_INIT(0);

// returns a reference to the metadata for the pointed-to edge
Expand Down Expand Up @@ -116,46 +111,43 @@ class LS_LC_CSR_Graph : private boost::noncopyable {

int addEdgesTopologyOnly(VertexTopologyID src,
const std::vector<VertexTopologyID> dsts) {
auto& vertex_meta = m_vertices[src];

// Copies the edge list to the end of m_edges[1], prepending
// the new edges.

vertex_meta.lock(); // prevents compaction
{
uint64_t const new_degree = vertex_meta.degree + dsts.size();
uint64_t const new_begin =
m_edges_tail.fetch_add(new_degree, std::memory_order_relaxed);
uint64_t const new_end = new_begin + new_degree;

if (m_edges[1].size() < new_end) {
m_edges_lock.lock();
{
if (m_edges[1].size() < new_end)
m_edges[1].resize(std::max(m_edges[1].size() * 2, new_end));
}
m_edges_lock.unlock();
}
auto& vertex_meta = m_vertices[src];

// insert new edges
std::transform(dsts.begin(), dsts.end(), &getEdgeMetadata(1, new_begin),
[](VertexTopologyID dst) {
return EdgeMetadata{.flags = 0, .dst = dst};
});

// copy old, non-tombstoned edges
std::copy_if(&getEdgeMetadata(vertex_meta.buffer, vertex_meta.begin),
&getEdgeMetadata(vertex_meta.buffer, vertex_meta.end),
&getEdgeMetadata(1, new_begin + dsts.size()),
[](EdgeMetadata& edge) { return !edge.is_tomb(); });

// update vertex metadata
vertex_meta.buffer = 1;
vertex_meta.begin = new_begin;
vertex_meta.end = new_end;
vertex_meta.degree += dsts.size();
uint64_t const new_degree = vertex_meta.degree + dsts.size();
uint64_t const new_begin =
m_edges_tail.fetch_add(new_degree, std::memory_order_relaxed);
uint64_t const new_end = new_begin + new_degree;

if (m_edges[1].size() < new_end) {
m_edges_lock.lock();
{
if (m_edges[1].size() < new_end)
m_edges[1].resize(std::max(m_edges[1].size() * 2, new_end));
}
m_edges_lock.unlock();
}
vertex_meta.unlock();

// insert new edges
std::transform(dsts.begin(), dsts.end(), &getEdgeMetadata(1, new_begin),
[](VertexTopologyID dst) {
return EdgeMetadata{.flags = 0, .dst = dst};
});

// copy old, non-tombstoned edges
std::copy_if(&getEdgeMetadata(vertex_meta.buffer, vertex_meta.begin),
&getEdgeMetadata(vertex_meta.buffer, vertex_meta.end),
&getEdgeMetadata(1, new_begin + dsts.size()),
[](EdgeMetadata& edge) { return !edge.is_tomb(); });

// update vertex metadata
vertex_meta.buffer = 1;
vertex_meta.begin = new_begin;
vertex_meta.end = new_end;
vertex_meta.degree += dsts.size();

return 0;
}
Expand All @@ -165,32 +157,28 @@ class LS_LC_CSR_Graph : private boost::noncopyable {
std::unordered_set<VertexTopologyID> edges_set(edges.begin(), edges.end());

auto& vertex_meta = m_vertices[src];
vertex_meta.lock();
{
for (auto i = vertex_meta.begin; i < vertex_meta.end; ++i) {
EdgeMetadata& edge_meta =
getEdgeMetadata(EdgeHandle(vertex_meta.buffer, i));
if (!edge_meta.is_tomb() &&
edges_set.find(edge_meta.dst) != edges_set.end()) {
edge_meta.tomb();
--vertex_meta.degree;
// remove tombstoned edges from the start of the edge list
if (i == vertex_meta.begin)
++vertex_meta.begin;
}
for (auto i = vertex_meta.begin; i < vertex_meta.end; ++i) {
EdgeMetadata& edge_meta =
getEdgeMetadata(EdgeHandle(vertex_meta.buffer, i));
if (!edge_meta.is_tomb() &&
edges_set.find(edge_meta.dst) != edges_set.end()) {
edge_meta.tomb();
--vertex_meta.degree;
// remove tombstoned edges from the start of the edge list
if (i == vertex_meta.begin)
++vertex_meta.begin;
}
}

// remove tombstoned edges from the end of the edge list
for (auto i = vertex_meta.end; i > vertex_meta.begin; --i) {
if (getEdgeMetadata(EdgeHandle(vertex_meta.buffer, i - 1)).is_tomb()) {
--vertex_meta.end;
--vertex_meta.degree;
} else {
break;
}
// remove tombstoned edges from the end of the edge list
for (auto i = vertex_meta.end; i > vertex_meta.begin; --i) {
if (getEdgeMetadata(EdgeHandle(vertex_meta.buffer, i - 1)).is_tomb()) {
--vertex_meta.end;
--vertex_meta.degree;
} else {
break;
}
}
vertex_meta.unlock();

return 0;
}
Expand All @@ -202,15 +190,14 @@ class LS_LC_CSR_Graph : private boost::noncopyable {
// Performs the compaction algorithm by copying any vertices left in buffer 0
// to buffer 1, then swapping the buffers.
//
// Should not be called from within a Galois parallel kernel.
// Not safe to call in parallel with insertions/deletions.
void compact() {
using std::swap;

// move from buffer 0 to buffer 1
galois::do_all(galois::iterate(vertices().begin(), vertices().end()),
[&](VertexTopologyID vertex_id) {
VertexMetadata& vertex_meta = m_vertices[vertex_id];
vertex_meta.lock();

if (vertex_meta.buffer == 0) {
uint64_t new_begin;
Expand All @@ -237,29 +224,20 @@ class LS_LC_CSR_Graph : private boost::noncopyable {
// we are about to swap the buffers, so all vertices will
// be in buffer 0
vertex_meta.buffer = 0;

// don't release the vertex lock until after the edge
// arrays are swapped
});

// At this point, there are no more live edges in buffer 0.
// We also hold the lock for all vertices, so nobody else can hold
// m_edges_lock.
m_edges_lock.lock();
{
m_edges[0].resize(0);
swap(m_edges[0], m_edges[1]);
m_edges_tail.store(0, std::memory_order_relaxed); // fine because lock
}
m_edges_lock.unlock();

galois::do_all(
galois::iterate(vertices().begin(), vertices().end()),
[&](VertexTopologyID vertex_id) { m_vertices[vertex_id].unlock(); });
}

private:
struct VertexMetadata : public SpinLock {
struct VertexMetadata {
uint8_t buffer : 1;
uint64_t begin : 48; // inclusive
uint64_t end : 48; // exclusive
Expand Down