Skip to content

Commit

Permalink
add info for compaction policy (#7)
Browse files Browse the repository at this point in the history
* add info for compaction policy
* align atomics to cache line
  • Loading branch information
meyerzinn authored Mar 21, 2024
1 parent 700cc9b commit 0db6557
Showing 1 changed file with 39 additions and 3 deletions.
42 changes: 39 additions & 3 deletions libgalois/include/galois/graphs/LS_LC_CSR_Graph.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
#include <iterator>
#include <cstddef>
#include <atomic>
#include <new>

#include <boost/range/iterator_range_core.hpp>
#include <boost/range/counting_range.hpp>
Expand All @@ -32,6 +33,12 @@
#include "galois/config.h"
#include "galois/LargeVector.h"

#ifdef __cpp_lib_hardware_interference_size
using std::hardware_destructive_interference_size;
#else
constexpr std::size_t hardware_destructive_interference_size = 64;
#endif

namespace galois::graphs {

/**
Expand Down Expand Up @@ -74,7 +81,11 @@ class LS_LC_CSR_Graph : private boost::noncopyable {
std::vector<VertexMetadata> m_vertices;
LargeVector<EdgeMetadata> m_edges[2];
SpinLock m_edges_lock; // guards resizing of edges vectors
std::atomic_uint64_t m_edges_tail = ATOMIC_VAR_INIT(0);

alignas(hardware_destructive_interference_size) std::atomic_uint64_t
m_edges_tail = ATOMIC_VAR_INIT(0);
alignas(hardware_destructive_interference_size) std::atomic_uint64_t m_holes =
ATOMIC_VAR_INIT(0);

// returns a reference to the metadata for the pointed-to edge
inline EdgeMetadata& getEdgeMetadata(EdgeHandle const& handle) {
Expand All @@ -91,6 +102,23 @@ class LS_LC_CSR_Graph : private boost::noncopyable {

inline uint64_t size() const noexcept { return m_vertices.size(); }

// returns an estimated memory footprint
inline uint64_t getFootprint() {
uint64_t estimate;
m_edges_lock.lock();
{
estimate =
(m_edges[0].size() + m_edges_tail.load(std::memory_order_relaxed)) *
sizeof(EdgeMetadata);
}
m_edges_lock.unlock();
return estimate;
}

inline uint64_t numHoles() const noexcept {
return m_holes.load(std::memory_order_relaxed);
}

inline VertexTopologyID begin() const noexcept {
return static_cast<VertexTopologyID>(0);
}
Expand Down Expand Up @@ -147,6 +175,8 @@ class LS_LC_CSR_Graph : private boost::noncopyable {
vertex_meta.buffer = 1;
vertex_meta.begin = new_begin;
vertex_meta.end = new_end;

m_holes.fetch_add(vertex_meta.degree, std::memory_order_relaxed);
vertex_meta.degree += dsts.size();

return 0;
Expand All @@ -156,14 +186,16 @@ class LS_LC_CSR_Graph : private boost::noncopyable {
const std::vector<VertexTopologyID>& edges) {
std::unordered_set<VertexTopologyID> edges_set(edges.begin(), edges.end());

auto& vertex_meta = m_vertices[src];
auto& vertex_meta = m_vertices[src];
uint64_t holes_added = 0;
for (auto i = vertex_meta.begin; i < vertex_meta.end; ++i) {
EdgeMetadata& edge_meta =
getEdgeMetadata(EdgeHandle(vertex_meta.buffer, i));
if (!edge_meta.is_tomb() &&
edges_set.find(edge_meta.dst) != edges_set.end()) {
edge_meta.tomb();
--vertex_meta.degree;
++holes_added;
// remove tombstoned edges from the start of the edge list
if (i == vertex_meta.begin)
++vertex_meta.begin;
Expand All @@ -180,6 +212,8 @@ class LS_LC_CSR_Graph : private boost::noncopyable {
}
}

m_holes.fetch_add(holes_added, std::memory_order_relaxed);

return 0;
}

Expand Down Expand Up @@ -231,7 +265,9 @@ class LS_LC_CSR_Graph : private boost::noncopyable {
{
m_edges[0].resize(0);
swap(m_edges[0], m_edges[1]);
m_edges_tail.store(0, std::memory_order_relaxed); // fine because lock
// relaxed is fine because of locks held:
m_edges_tail.store(0, std::memory_order_relaxed);
m_holes.store(0, std::memory_order_relaxed);
}
m_edges_lock.unlock();
}
Expand Down

0 comments on commit 0db6557

Please sign in to comment.