Skip to content

Commit

Permalink
remove vertex locks
Browse files Browse the repository at this point in the history
  • Loading branch information
meyerzinn committed Mar 19, 2024
1 parent 0be38c7 commit 67a249c
Showing 1 changed file with 51 additions and 81 deletions.
132 changes: 51 additions & 81 deletions libgalois/include/galois/graphs/LS_LC_CSR_Graph.h
Original file line number Diff line number Diff line change
Expand Up @@ -72,13 +72,8 @@ class LS_LC_CSR_Graph : private boost::noncopyable {
using EdgeRange = boost::iterator_range<EdgeIterator>;

std::vector<VertexMetadata> m_vertices;

LargeVector<EdgeMetadata> m_edges[2];

// To avoid deadlock between updates and compaction, at least one vertex lock
// must be held to acquire m_edges_lock.
SpinLock m_edges_lock;

SpinLock m_edges_lock; // guards resizing of edges vectors
std::atomic_uint64_t m_edges_tail = ATOMIC_VAR_INIT(0);

// returns a reference to the metadata for the pointed-to edge
Expand Down Expand Up @@ -116,46 +111,43 @@ class LS_LC_CSR_Graph : private boost::noncopyable {

int addEdgesTopologyOnly(VertexTopologyID src,
const std::vector<VertexTopologyID> dsts) {
auto& vertex_meta = m_vertices[src];

// Copies the edge list to the end of m_edges[1], prepending
// the new edges.

vertex_meta.lock(); // prevents compaction
{
uint64_t const new_degree = vertex_meta.degree + dsts.size();
uint64_t const new_begin =
m_edges_tail.fetch_add(new_degree, std::memory_order_relaxed);
uint64_t const new_end = new_begin + new_degree;

if (m_edges[1].size() < new_end) {
m_edges_lock.lock();
{
if (m_edges[1].size() < new_end)
m_edges[1].resize(std::max(m_edges[1].size() * 2, new_end));
}
m_edges_lock.unlock();
}
auto& vertex_meta = m_vertices[src];

uint64_t const new_degree = vertex_meta.degree + dsts.size();
uint64_t const new_begin =
m_edges_tail.fetch_add(new_degree, std::memory_order_relaxed);
uint64_t const new_end = new_begin + new_degree;

// insert new edges
std::transform(dsts.begin(), dsts.end(), &getEdgeMetadata(1, new_begin),
[](VertexTopologyID dst) {
return EdgeMetadata{.flags = 0, .dst = dst};
});

// copy old, non-tombstoned edges
std::copy_if(&getEdgeMetadata(vertex_meta.buffer, vertex_meta.begin),
&getEdgeMetadata(vertex_meta.buffer, vertex_meta.end),
&getEdgeMetadata(1, new_begin + dsts.size()),
[](EdgeMetadata& edge) { return !edge.is_tomb(); });

// update vertex metadata
vertex_meta.buffer = 1;
vertex_meta.begin = new_begin;
vertex_meta.end = new_end;
vertex_meta.degree += dsts.size();
if (m_edges[1].size() < new_end) {
m_edges_lock.lock();
{
if (m_edges[1].size() < new_end)
m_edges[1].resize(std::max(m_edges[1].size() * 2, new_end));
}
m_edges_lock.unlock();
}
vertex_meta.unlock();

// insert new edges
std::transform(dsts.begin(), dsts.end(), &getEdgeMetadata(1, new_begin),
[](VertexTopologyID dst) {
return EdgeMetadata{.flags = 0, .dst = dst};
});

// copy old, non-tombstoned edges
std::copy_if(&getEdgeMetadata(vertex_meta.buffer, vertex_meta.begin),
&getEdgeMetadata(vertex_meta.buffer, vertex_meta.end),
&getEdgeMetadata(1, new_begin + dsts.size()),
[](EdgeMetadata& edge) { return !edge.is_tomb(); });

// update vertex metadata
vertex_meta.buffer = 1;
vertex_meta.begin = new_begin;
vertex_meta.end = new_end;
vertex_meta.degree += dsts.size();

return 0;
}
Expand All @@ -165,32 +157,28 @@ class LS_LC_CSR_Graph : private boost::noncopyable {
std::unordered_set<VertexTopologyID> edges_set(edges.begin(), edges.end());

auto& vertex_meta = m_vertices[src];
vertex_meta.lock();
{
for (auto i = vertex_meta.begin; i < vertex_meta.end; ++i) {
EdgeMetadata& edge_meta =
getEdgeMetadata(EdgeHandle(vertex_meta.buffer, i));
if (!edge_meta.is_tomb() &&
edges_set.find(edge_meta.dst) != edges_set.end()) {
edge_meta.tomb();
--vertex_meta.degree;
// remove tombstoned edges from the start of the edge list
if (i == vertex_meta.begin)
++vertex_meta.begin;
}
for (auto i = vertex_meta.begin; i < vertex_meta.end; ++i) {
EdgeMetadata& edge_meta =
getEdgeMetadata(EdgeHandle(vertex_meta.buffer, i));
if (!edge_meta.is_tomb() &&
edges_set.find(edge_meta.dst) != edges_set.end()) {
edge_meta.tomb();
--vertex_meta.degree;
// remove tombstoned edges from the start of the edge list
if (i == vertex_meta.begin)
++vertex_meta.begin;
}
}

// remove tombstoned edges from the end of the edge list
for (auto i = vertex_meta.end; i > vertex_meta.begin; --i) {
if (getEdgeMetadata(EdgeHandle(vertex_meta.buffer, i - 1)).is_tomb()) {
--vertex_meta.end;
--vertex_meta.degree;
} else {
break;
}
// remove tombstoned edges from the end of the edge list
for (auto i = vertex_meta.end; i > vertex_meta.begin; --i) {
if (getEdgeMetadata(EdgeHandle(vertex_meta.buffer, i - 1)).is_tomb()) {
--vertex_meta.end;
--vertex_meta.degree;
} else {
break;
}
}
vertex_meta.unlock();

return 0;
}
Expand All @@ -202,15 +190,14 @@ class LS_LC_CSR_Graph : private boost::noncopyable {
// Performs the compaction algorithm by copying any vertices left in buffer 0
// to buffer 1, then swapping the buffers.
//
// Should not be called from within a Galois parallel kernel.
// Not safe to call in parallel with insertions/deletions.
void compact() {
using std::swap;

// move from buffer 0 to buffer 1
galois::do_all(galois::iterate(vertices().begin(), vertices().end()),
[&](VertexTopologyID vertex_id) {
VertexMetadata& vertex_meta = m_vertices[vertex_id];
vertex_meta.lock();

if (vertex_meta.buffer == 0) {
uint64_t new_begin;
Expand All @@ -237,30 +224,20 @@ class LS_LC_CSR_Graph : private boost::noncopyable {
// we are about to swap the buffers, so all vertices will
// be in buffer 0
vertex_meta.buffer = 0;

// don't release the vertex lock until after the edge
// arrays are swapped
});

// At this point, there are no more live edges in buffer 0.
// We also hold the lock for all vertices, so nobody else can hold
// m_edges_lock.
m_edges_lock.lock();
{
m_edges[0].resize(0);
swap(m_edges[0], m_edges[1]);
m_edges_tail.store(0, std::memory_order_relaxed); // fine because lock
}
m_edges_lock.unlock();

galois::do_all(
galois::iterate(vertices().begin(), vertices().end()),
[&](VertexTopologyID vertex_id) { m_vertices[vertex_id].unlock(); });
}

private:
struct VertexMetadata {
std::atomic_uint8_t spinlock = ATOMIC_VAR_INIT(0);
uint8_t buffer : 1;
uint64_t begin : 48; // inclusive
uint64_t end : 48; // exclusive
Expand All @@ -275,13 +252,6 @@ class LS_LC_CSR_Graph : private boost::noncopyable {
VertexMetadata(VertexMetadata&& other)
: buffer(std::move(other.buffer)), begin(std::move(other.begin)),
end(std::move(other.end)), degree(std::move(other.degree)) {}

inline void lock() {
while (spinlock.exchange(1, std::memory_order_acquire))
;
}

inline void unlock() { spinlock.store(0, std::memory_order_release); }
};

struct EdgeMetadata {
Expand Down

0 comments on commit 67a249c

Please sign in to comment.