Skip to content

Commit

Permalink
add fixed vertex support in multilevel coarsener
Browse files Browse the repository at this point in the history
  • Loading branch information
kittobi1992 committed Jul 26, 2023
1 parent f7710e2 commit 34b3544
Show file tree
Hide file tree
Showing 9 changed files with 283 additions and 98 deletions.
6 changes: 5 additions & 1 deletion mt-kahypar/datastructures/dynamic_graph.h
Original file line number Diff line number Diff line change
Expand Up @@ -659,10 +659,14 @@ class DynamicGraph {
return _fixed_vertices.isFixed(hn);
}

PartitionID fixedVertexBlock(const HypernodeID hn) {
PartitionID fixedVertexBlock(const HypernodeID hn) const {
return _fixed_vertices.fixedVertexBlock(hn);
}

FixedVertexSupport<DynamicGraph> copyOfFixedVertexSupport() const {
return _fixed_vertices.copy();
}

// ####################### Contract / Uncontract #######################

DynamicGraph contract(parallel::scalable_vector<HypernodeID>&) {
Expand Down
6 changes: 5 additions & 1 deletion mt-kahypar/datastructures/dynamic_hypergraph.h
Original file line number Diff line number Diff line change
Expand Up @@ -754,10 +754,14 @@ class DynamicHypergraph {
return _fixed_vertices.isFixed(hn);
}

PartitionID fixedVertexBlock(const HypernodeID hn) {
PartitionID fixedVertexBlock(const HypernodeID hn) const {
return _fixed_vertices.fixedVertexBlock(hn);
}

FixedVertexSupport<DynamicHypergraph> copyOfFixedVertexSupport() const {
return _fixed_vertices.copy();
}

// ####################### Contract / Uncontract #######################

DynamicHypergraph contract(parallel::scalable_vector<HypernodeID>&) {
Expand Down
2 changes: 1 addition & 1 deletion mt-kahypar/datastructures/fixed_vertex_support.h
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ class FixedVertexSupport {
}

void setMaxBlockWeight(const std::vector<HypernodeWeight> max_block_weights) {
ASSERT(max_block_weights.size() == static_cast<size_t>(_k));
ASSERT(max_block_weights.size() >= static_cast<size_t>(_k));
_max_block_weights = max_block_weights;
}

Expand Down
6 changes: 5 additions & 1 deletion mt-kahypar/datastructures/static_graph.h
Original file line number Diff line number Diff line change
Expand Up @@ -738,10 +738,14 @@ class StaticGraph {
return _fixed_vertices.isFixed(hn);
}

PartitionID fixedVertexBlock(const HypernodeID hn) {
PartitionID fixedVertexBlock(const HypernodeID hn) const {
return _fixed_vertices.fixedVertexBlock(hn);
}

FixedVertexSupport<StaticGraph> copyOfFixedVertexSupport() const {
return _fixed_vertices.copy();
}

// ####################### Contract / Uncontract #######################

/*!
Expand Down
6 changes: 5 additions & 1 deletion mt-kahypar/datastructures/static_hypergraph.h
Original file line number Diff line number Diff line change
Expand Up @@ -710,10 +710,14 @@ class StaticHypergraph {
return _fixed_vertices.isFixed(hn);
}

PartitionID fixedVertexBlock(const HypernodeID hn) {
PartitionID fixedVertexBlock(const HypernodeID hn) const {
return _fixed_vertices.fixedVertexBlock(hn);
}

FixedVertexSupport<StaticHypergraph> copyOfFixedVertexSupport() const {
return _fixed_vertices.copy();
}

// ####################### Contract / Uncontract #######################

/*!
Expand Down
219 changes: 139 additions & 80 deletions mt-kahypar/partition/coarsening/multilevel_coarsener.h
Original file line number Diff line number Diff line change
Expand Up @@ -166,6 +166,63 @@ class MultilevelCoarsener : public ICoarsener,
utils::Randomize::instance().parallelShuffleVector( _current_vertices, UL(0), _current_vertices.size());
}

const HypernodeID num_hns_before_pass =
current_hg.initialNumNodes() - current_hg.numRemovedHypernodes();
HypernodeID current_num_nodes = 0;
if ( current_hg.hasFixedVertices() ) {
current_num_nodes = performClustering<true>(current_hg, cluster_ids);
} else {
current_num_nodes = performClustering<false>(current_hg, cluster_ids);
}
DBG << V(current_num_nodes);

HEAVY_COARSENING_ASSERT([&] {
parallel::scalable_vector<HypernodeWeight> expected_weights(current_hg.initialNumNodes());
// Verify that clustering is correct
for ( const HypernodeID& hn : current_hg.nodes() ) {
const HypernodeID u = hn;
const HypernodeID root_u = cluster_ids[u];
if ( root_u != cluster_ids[root_u] ) {
LOG << "Hypernode" << u << "is part of cluster" << root_u << ", but cluster"
<< root_u << "is also part of cluster" << cluster_ids[root_u];
return false;
}
expected_weights[root_u] += current_hg.nodeWeight(hn);
}

// Verify that cluster weights are aggregated correct
for ( const HypernodeID& hn : current_hg.nodes() ) {
const HypernodeID u = hn;
const HypernodeID root_u = cluster_ids[u];
if ( root_u == u && expected_weights[u] != _cluster_weight[u] ) {
LOG << "The expected weight of cluster" << u << "is" << expected_weights[u]
<< ", but currently it is" << _cluster_weight[u];
return false;
}
}
return true;
}(), "Parallel clustering computed invalid cluster ids and weights");

const double reduction_vertices_percentage =
static_cast<double>(num_hns_before_pass) /
static_cast<double>(current_num_nodes);
if ( reduction_vertices_percentage <= _context.coarsening.minimum_shrink_factor ) {
return false;
}
_progress_bar += (num_hns_before_pass - current_num_nodes);

_timer.start_timer("contraction", "Contraction");
// Perform parallel contraction
_uncoarseningData.performMultilevelContraction(std::move(cluster_ids), round_start);
_timer.stop_timer("contraction");

++_pass_nr;
return true;
}

template<bool has_fixed_vertices>
HypernodeID performClustering(const Hypergraph& current_hg,
vec<HypernodeID>& cluster_ids) {
// We iterate in parallel over all vertices of the hypergraph and compute its contraction partner.
// Matched vertices are linked in a concurrent union find data structure, that also aggregates
// weights of the resulting clusters and keep track of the number of nodes left, if we would
Expand All @@ -182,6 +239,8 @@ class MultilevelCoarsener : public ICoarsener,
HypernodeID current_num_nodes = num_hns_before_pass;
tbb::enumerable_thread_specific<HypernodeID> contracted_nodes(0);
tbb::enumerable_thread_specific<HypernodeID> num_nodes_update_threshold(0);
ds::FixedVertexSupport<Hypergraph> fixed_vertices = current_hg.copyOfFixedVertexSupport();
fixed_vertices.setMaxBlockWeight(_context.partition.max_part_weights);
tbb::parallel_for(0U, current_hg.initialNumNodes(), [&](const HypernodeID id) {
ASSERT(id < _current_vertices.size());
const HypernodeID hn = _current_vertices[id];
Expand All @@ -193,12 +252,13 @@ class MultilevelCoarsener : public ICoarsener,
if (_matching_state[u] == STATE(MatchingState::UNMATCHED)) {
if (current_num_nodes > hierarchy_contraction_limit) {
ASSERT(current_hg.nodeIsEnabled(hn));
const Rating rating = _rater.rate(current_hg, hn,
cluster_ids, _cluster_weight, _context.coarsening.max_allowed_node_weight);
const Rating rating = _rater.template rate<has_fixed_vertices>(current_hg, hn,
cluster_ids, _cluster_weight, fixed_vertices, _context.coarsening.max_allowed_node_weight);
if (rating.target != kInvalidHypernode) {
const HypernodeID v = rating.target;
HypernodeID& local_contracted_nodes = contracted_nodes.local();
matchVertices(current_hg, u, v, cluster_ids, local_contracted_nodes);
matchVertices<has_fixed_vertices>(current_hg, u, v,
cluster_ids, local_contracted_nodes, fixed_vertices);

// To maintain the current number of nodes of the hypergraph each PE sums up
// its number of contracted nodes locally. To compute the current number of
Expand Down Expand Up @@ -231,51 +291,46 @@ class MultilevelCoarsener : public ICoarsener,
_timer.stop_timer("clustering_level_" + std::to_string(_pass_nr));
}
_timer.stop_timer("clustering");
current_num_nodes = num_hns_before_pass - contracted_nodes.combine(std::plus<>());
DBG << V(current_num_nodes);

HEAVY_COARSENING_ASSERT([&] {
parallel::scalable_vector<HypernodeWeight> expected_weights(current_hg.initialNumNodes());
// Verify that clustering is correct
for ( const HypernodeID& hn : current_hg.nodes() ) {
const HypernodeID u = hn;
const HypernodeID root_u = cluster_ids[u];
if ( root_u != cluster_ids[root_u] ) {
LOG << "Hypernode" << u << "is part of cluster" << root_u << ", but cluster"
<< root_u << "is also part of cluster" << cluster_ids[root_u];
return false;
if constexpr ( has_fixed_vertices ) {
// Verify fixed vertices
ASSERT([&] {
vec<PartitionID> fixed_vertex_blocks(current_hg.initialNumNodes(), kInvalidPartition);
for ( const HypernodeID& hn : current_hg.nodes() ) {
if ( current_hg.isFixed(hn) ) {
if ( fixed_vertex_blocks[cluster_ids[hn]] != kInvalidPartition &&
fixed_vertex_blocks[cluster_ids[hn]] != current_hg.fixedVertexBlock(hn)) {
LOG << "There are two nodes assigned to same cluster that belong to different fixed vertex blocks";
return false;
}
fixed_vertex_blocks[cluster_ids[hn]] = current_hg.fixedVertexBlock(hn);
}
}
expected_weights[root_u] += current_hg.nodeWeight(hn);
}

// Verify that cluster weights are aggregated correct
for ( const HypernodeID& hn : current_hg.nodes() ) {
const HypernodeID u = hn;
const HypernodeID root_u = cluster_ids[u];
if ( root_u == u && expected_weights[u] != _cluster_weight[u] ) {
LOG << "The expected weight of cluster" << u << "is" << expected_weights[u]
<< ", but currently it is" << _cluster_weight[u];
return false;
vec<HypernodeWeight> expected_block_weights(_context.partition.k, 0);
for ( const HypernodeID& hn : current_hg.nodes() ) {
if ( fixed_vertex_blocks[cluster_ids[hn]] != kInvalidPartition ) {
if ( !fixed_vertices.isFixed(cluster_ids[hn]) ) {
LOG << "Cluster" << cluster_ids[hn] << "should be fixed to block"
<< fixed_vertex_blocks[cluster_ids[hn]];
return false;
}
expected_block_weights[fixed_vertex_blocks[cluster_ids[hn]]] += current_hg.nodeWeight(hn);
}
}
}
return true;
}(), "Parallel clustering computed invalid cluster ids and weights");

const double reduction_vertices_percentage =
static_cast<double>(num_hns_before_pass) /
static_cast<double>(current_num_nodes);
if ( reduction_vertices_percentage <= _context.coarsening.minimum_shrink_factor ) {
return false;
for ( PartitionID block = 0; block < _context.partition.k; ++block ) {
if ( fixed_vertices.fixedVertexBlockWeight(block) != expected_block_weights[block] ) {
LOG << "Fixed vertex block" << block << "should have weight" << expected_block_weights[block]
<< ", but it is" << fixed_vertices.fixedVertexBlockWeight(block);
return false;
}
}
return true;
}(), "Fixed vertex support is corrupted");
}
_progress_bar += (num_hns_before_pass - current_num_nodes);

_timer.start_timer("contraction", "Contraction");
// Perform parallel contraction
_uncoarseningData.performMultilevelContraction(std::move(cluster_ids), round_start);
_timer.stop_timer("contraction");

++_pass_nr;
return true;
return num_hns_before_pass - contracted_nodes.combine(std::plus<>());
}

void terminateImpl() override {
Expand All @@ -284,7 +339,6 @@ class MultilevelCoarsener : public ICoarsener,
_uncoarseningData.finalizeCoarsening();
}


/*!
* We maintain the invariant during clustering that each cluster has a unique
* representative and all vertices also part of that cluster point to that
Expand All @@ -301,11 +355,13 @@ class MultilevelCoarsener : public ICoarsener,
* The following functions guarantees that our invariant is fullfilled, if
* vertices are matched concurrently.
*/
template<bool has_fixed_vertices>
MT_KAHYPAR_ATTRIBUTE_ALWAYS_INLINE bool matchVertices(const Hypergraph& hypergraph,
const HypernodeID u,
const HypernodeID v,
parallel::scalable_vector<HypernodeID>& cluster_ids,
HypernodeID& contracted_nodes) {
HypernodeID& contracted_nodes,
ds::FixedVertexSupport<Hypergraph>& fixed_vertices) {
ASSERT(u < hypergraph.initialNumNodes());
ASSERT(v < hypergraph.initialNumNodes());
uint8_t unmatched = STATE(MatchingState::UNMATCHED);
Expand All @@ -327,34 +383,16 @@ class MultilevelCoarsener : public ICoarsener,
if ( matching_state_v == STATE(MatchingState::MATCHED) ) {
// Vertex v is already matched and will not change it cluster id any more.
// In that case, it is safe to set the cluster id of u to the cluster id of v.
if ( v == cluster_ids[v] ) {
// In case v is also the representative of the cluster,
// we change the cluster id of u to v, ...
cluster_ids[u] = v;
_cluster_weight[v] += weight_u;
++contracted_nodes;
success = true;
} else {
// ... otherwise, we try again to match u with the
// representative of the cluster.
const HypernodeID cluster_v = cluster_ids[v];
weight_v = _cluster_weight[cluster_v];
if ( weight_u + weight_v <= _context.coarsening.max_allowed_node_weight ) {
ASSERT(_matching_state[cluster_v] == STATE(MatchingState::MATCHED));
cluster_ids[u] = cluster_v;
_cluster_weight[cluster_v] += weight_u;
++contracted_nodes;
success = true;
}
}
const HypernodeID rep = cluster_ids[v];
ASSERT(_matching_state[rep] == STATE(MatchingState::MATCHED));
success = joinCluster<has_fixed_vertices>(hypergraph,
u, rep, cluster_ids, contracted_nodes, fixed_vertices);
} else if ( _matching_state[v].compare_exchange_strong(unmatched, match_in_progress) ) {
// Current thread has the "ownership" for u and v and can change the cluster id
// of both vertices thread-safe.
cluster_ids[u] = v;
_cluster_weight[v] += weight_u;
++contracted_nodes;
success = joinCluster<has_fixed_vertices>(hypergraph,
u, v, cluster_ids, contracted_nodes, fixed_vertices);
_matching_state[v] = STATE(MatchingState::MATCHED);
success = true;
} else {
// State of v must be either MATCHING_IN_PROGRESS or an other thread changed the state
// in the meantime to MATCHED. We have to wait until the state of v changed to
Expand All @@ -375,12 +413,9 @@ class MultilevelCoarsener : public ICoarsener,
// Vertex with smallest id starts to resolve conflict
const bool is_in_cyclic_dependency = _matching_partner[cur_u] == u;
if ( is_in_cyclic_dependency && u == smallest_node_id_in_cycle) {
cluster_ids[u] = v;
_cluster_weight[v] += weight_u;
++contracted_nodes;
success = joinCluster<has_fixed_vertices>(hypergraph,
u, v, cluster_ids, contracted_nodes, fixed_vertices);
_matching_state[v] = STATE(MatchingState::MATCHED);
_matching_state[u] = STATE(MatchingState::MATCHED);
success = true;
}
}

Expand All @@ -389,14 +424,9 @@ class MultilevelCoarsener : public ICoarsener,
// we try to match u with the representative v's cluster.
if ( _matching_state[u] == STATE(MatchingState::MATCHING_IN_PROGRESS) ) {
ASSERT( _matching_state[v] == STATE(MatchingState::MATCHED) );
const HypernodeID cluster_v = cluster_ids[v];
const HypernodeWeight weight_v = _cluster_weight[cluster_v];
if ( weight_u + weight_v <= _context.coarsening.max_allowed_node_weight ){
cluster_ids[u] = cluster_v;
_cluster_weight[cluster_v] += weight_u;
++contracted_nodes;
success = true;
}
const HypernodeID rep = cluster_ids[v];
success = joinCluster<has_fixed_vertices>(hypergraph,
u, rep, cluster_ids, contracted_nodes, fixed_vertices);
}
}
_rater.markAsMatched(u);
Expand All @@ -408,6 +438,35 @@ class MultilevelCoarsener : public ICoarsener,
return success;
}

template<bool has_fixed_vertices>
bool joinCluster(const Hypergraph& hypergraph,
const HypernodeID u,
const HypernodeID rep,
vec<HypernodeID>& cluster_ids,
HypernodeID& contracted_nodes,
ds::FixedVertexSupport<Hypergraph>& fixed_vertices) {
ASSERT(rep == cluster_ids[rep]);
bool success = false;
const HypernodeWeight weight_of_u = hypergraph.nodeWeight(u);
const HypernodeWeight weight_of_rep = _cluster_weight[rep];
bool cluster_join_operation_allowed =
weight_of_u + weight_of_rep <= _context.coarsening.max_allowed_node_weight;
if constexpr ( has_fixed_vertices ) {
if ( cluster_join_operation_allowed ) {
cluster_join_operation_allowed = fixed_vertices.contract(rep, u);
}
}
if ( cluster_join_operation_allowed ) {
cluster_ids[u] = rep;
_cluster_weight[rep] += weight_of_u;
++contracted_nodes;
success = true;
}
_matching_partner[u] = u;
_matching_state[u] = STATE(MatchingState::MATCHED);
return success;
}

HypernodeID currentNumberOfNodesImpl() const override {
return Base::currentNumNodes();
}
Expand Down
Loading

0 comments on commit 34b3544

Please sign in to comment.