Skip to content

Commit

Permalink
KaMinPar: speedup gain cache reset on coarse levels
Browse files Browse the repository at this point in the history
  • Loading branch information
DanielSeemaier committed Sep 4, 2023
1 parent 5a26395 commit d7f41ae
Show file tree
Hide file tree
Showing 3 changed files with 21 additions and 17 deletions.
2 changes: 1 addition & 1 deletion kaminpar/refinement/fm_refiner.h
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ class BorderNodes {
struct SharedData {
SharedData(const NodeID max_n, const BlockID max_k)
: node_tracker(max_n),
gain_cache(max_k, max_n),
gain_cache(max_n, max_k),
border_nodes(gain_cache, node_tracker),
shared_pq_handles(max_n),
target_blocks(max_n) {
Expand Down
34 changes: 19 additions & 15 deletions kaminpar/refinement/gain_cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@
#include "kaminpar/datastructures/partitioned_graph.h"

#include "common/datastructures/dynamic_map.h"
#include "common/logger.h"
#include "common/datastructures/noinit_vector.h"
#include "common/logger.h"

namespace kaminpar::shm {
template <typename GainCache, bool use_sparsehash = false> class DeltaGainCache;
Expand All @@ -26,15 +26,18 @@ class DenseGainCache {
friend class DeltaGainCache<DenseGainCache>;

public:
DenseGainCache(const BlockID k, const NodeID n)
: _k(k),
_n(n),
_gain_cache(static_cast<std::size_t>(_n) * static_cast<std::size_t>(_k)),
_weighted_degrees(_n) {}
DenseGainCache(const NodeID max_n, const BlockID max_k)
: _max_n(max_n),
_max_k(max_k),
_gain_cache(static_cast<std::size_t>(_max_n) * static_cast<std::size_t>(_max_k)),
_weighted_degrees(_max_n) {}

void initialize(const PartitionedGraph &p_graph) {
KASSERT(p_graph.k() <= _k, "gain cache is too small");
KASSERT(p_graph.n() <= _n, "gain cache is too small");
KASSERT(p_graph.n() <= _max_n, "gain cache is too small");
KASSERT(p_graph.k() <= _max_k, "gain cache is too small");

_n = p_graph.n();
_k = p_graph.k();

reset();
recompute_all(p_graph);
Expand Down Expand Up @@ -83,17 +86,15 @@ class DenseGainCache {
return __atomic_load_n(&_gain_cache[index(node, block)], __ATOMIC_RELAXED);
}

std::size_t index(const NodeID node, const BlockID b) const {
const std::size_t idx =
static_cast<std::size_t>(node) * static_cast<std::size_t>(_k) + static_cast<std::size_t>(b);
std::size_t index(const NodeID node, const BlockID block) const {
const std::size_t idx = static_cast<std::size_t>(node) * static_cast<std::size_t>(_k) +
static_cast<std::size_t>(block);
KASSERT(idx < _gain_cache.size());
return idx;
}

void reset() {
tbb::parallel_for<std::size_t>(0, _gain_cache.size(), [&](const std::size_t i) {
_gain_cache[i] = 0;
});
tbb::parallel_for<std::size_t>(0, _n * _k, [&](const std::size_t i) { _gain_cache[i] = 0; });
}

void recompute_all(const PartitionedGraph &p_graph) {
Expand Down Expand Up @@ -146,8 +147,11 @@ class DenseGainCache {
return true;
}

BlockID _k;
NodeID _max_n;
BlockID _max_k;

NodeID _n;
BlockID _k;

NoinitVector<EdgeWeight> _gain_cache;
NoinitVector<EdgeWeight> _weighted_degrees;
Expand Down
2 changes: 1 addition & 1 deletion kaminpar/refinement/jet_refiner.cc
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ bool JetRefiner::refine(PartitionedGraph &p_graph, const PartitionContext &p_ctx
DBG << "Setting c=" << c;

START_TIMER("Allocation");
DenseGainCache gain_cache(p_graph.k(), p_graph.n());
DenseGainCache gain_cache(p_graph.n(), p_graph.k());
gain_cache.initialize(p_graph);

NoinitVector<BlockID> next_partition(p_graph.n());
Expand Down

0 comments on commit d7f41ae

Please sign in to comment.