Skip to content

Commit

Permalink
fix vertex support in flat initial partitioning algorithms
Browse files Browse the repository at this point in the history
  • Loading branch information
kittobi1992 committed Jul 27, 2023
1 parent 6c2a2d1 commit 0f0700a
Show file tree
Hide file tree
Showing 11 changed files with 372 additions and 116 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,8 @@ void BFSInitialPartitioner<TypeTraits>::partitionImpl() {
_ip_data.local_hyperedge_fast_reset_flag_array();

_ip_data.reset_unassigned_hypernodes(_rng);
parallel::scalable_vector<HypernodeID> start_nodes =
_ip_data.preassignFixedVertices(hypergraph);
vec<vec<HypernodeID>> start_nodes =
PseudoPeripheralStartNodes<TypeTraits>::computeStartNodes(_ip_data, _context, kInvalidPartition, _rng);

// Insert each start node for each block into its corresponding queue
Expand All @@ -52,23 +53,27 @@ void BFSInitialPartitioner<TypeTraits>::partitionImpl() {
parallel::scalable_vector<Queue> queues(_context.partition.k);

for (PartitionID block = 0; block < _context.partition.k; ++block) {
queues[block].push(start_nodes[block]);
markHypernodeAsInQueue(hypergraph, hypernodes_in_queue, start_nodes[block], block);
for ( const HypernodeID& hn : start_nodes[block] ) {
queues[block].push(hn);
markHypernodeAsInQueue(hypergraph, hypernodes_in_queue, hn, block);
}
}

HypernodeID num_assigned_hypernodes = 0;
// We grow the k blocks of the partition starting from each start node in
// a BFS-fashion. The BFS queues for each block are visited in round-robin-fashion.
// Once a block is on turn, it pops it first hypernode and pushes
// all adjacent vertices into its queue.
HypernodeID num_assigned_hypernodes = _ip_data.numFixedVertices();
const HypernodeID current_num_nodes =
hypergraph.initialNumNodes() - hypergraph.numRemovedHypernodes();
while (num_assigned_hypernodes < current_num_nodes) {
for (PartitionID block = 0; block < _context.partition.k; ++block) {
HypernodeID hn = kInvalidHypernode;

bool fits_into_block = false;
while (!queues[block].empty()) {
const HypernodeID next_hn = queues[block].front();
ASSERT(!hypergraph.isFixed(next_hn));
queues[block].pop();

if (hypergraph.partID(next_hn) == kInvalidPartition) {
Expand All @@ -80,6 +85,7 @@ void BFSInitialPartitioner<TypeTraits>::partitionImpl() {
// Note, in that case the balanced constraint will be violated.
hn = next_hn;
if (fitsIntoBlock(hypergraph, hn, block)) {
fits_into_block = true;
break;
}
}
Expand All @@ -90,6 +96,22 @@ void BFSInitialPartitioner<TypeTraits>::partitionImpl() {
// assigned to an other block or the hypergraph is unconnected, we
// choose an new unassigned hypernode (if one exists)
hn = _ip_data.get_unassigned_hypernode();
if ( hn != kInvalidHypernode && fitsIntoBlock(hypergraph, hn, block) ) {
fits_into_block = true;
}
}

if ( hn != kInvalidHypernode && !fits_into_block ) {
// The node does not fit into the block. Thus, we quickly
// check if there is another block to which we can assign the node
for ( PartitionID other_block = 0; other_block < _context.partition.k; ++other_block ) {
if ( other_block != block && fitsIntoBlock(hypergraph, hn, other_block) ) {
// There is another block to which we can assign the node
// => ignore the node for now
hn = kInvalidHypernode;
break;
}
}
}

if (hn != kInvalidHypernode) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -72,23 +72,28 @@ class GreedyInitialPartitioner : public IInitialPartitioner {
// initial partitioner. E.g. the round-robin variant leaves the hypernode
// unassigned, but the global and sequential strategy both preassign
// all vertices to block 1 before initial partitioning.
_ip_data.preassignFixedVertices(hg);
if ( _default_block != kInvalidPartition ) {
ASSERT(_default_block < _context.partition.k);
kway_pq.disablePart(_default_block);
for ( const HypernodeID& hn : hg.nodes() ) {
hg.setNodePart(hn, _default_block);
if ( !hg.isFixed(hn) ) {
hg.setNodePart(hn, _default_block);
}
}
}

// Insert start vertices into its corresponding PQs
_ip_data.reset_unassigned_hypernodes(_rng);
parallel::scalable_vector<HypernodeID> start_nodes =
vec<vec<HypernodeID>> start_nodes =
PseudoPeripheralStartNodes<TypeTraits>::computeStartNodes(_ip_data, _context, _default_block, _rng);
ASSERT(static_cast<size_t>(_context.partition.k) == start_nodes.size());
kway_pq.clear();
for ( PartitionID block = 0; block < _context.partition.k; ++block ) {
if ( block != _default_block ) {
insertVertexIntoPQ(hg, kway_pq, start_nodes[block], block);
for ( const HypernodeID& hn : start_nodes[block] ) {
insertVertexIntoPQ(hg, kway_pq, hn, block);
}
}
}

Expand Down Expand Up @@ -221,7 +226,8 @@ class GreedyInitialPartitioner : public IInitialPartitioner {
for ( const HyperedgeID& he : hypergraph.incidentEdges(hn)) {
if ( !hyperedges_in_queue[to * hypergraph.initialNumEdges() + he] ) {
for ( const HypernodeID& pin : hypergraph.pins(he) ) {
if ( hypergraph.partID(pin) == _default_block && !pq.contains(pin, to) ) {
if ( hypergraph.partID(pin) == _default_block &&
!pq.contains(pin, to) && !hypergraph.isFixed(pin) ) {
insertVertexIntoPQ(hypergraph, pq, pin, to);
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@
#include "mt-kahypar/parallel/stl/scalable_vector.h"
#include "mt-kahypar/utils/cast.h"
#include "mt-kahypar/utils/utilities.h"
#include "mt-kahypar/utils/range.h"
#include "mt-kahypar/partition/refinement/fm/sequential_twoway_fm_refiner.h"
#include "mt-kahypar/partition/refinement/gains/gain_cache_ptr.h"

Expand Down Expand Up @@ -350,6 +351,7 @@ class InitialPartitioningDataContainer {

using ThreadLocalHypergraph = tbb::enumerable_thread_specific<LocalInitialPartitioningHypergraph>;
using ThreadLocalUnassignedHypernodes = tbb::enumerable_thread_specific<parallel::scalable_vector<HypernodeID>>;
using FixedVertexIterator = typename vec<HypernodeID>::const_iterator;

public:
InitialPartitioningDataContainer(PartitionedHypergraph& hypergraph,
Expand All @@ -368,6 +370,7 @@ class InitialPartitioningDataContainer {
_local_he_visited(_context.partition.k * hypergraph.initialNumEdges()),
_local_unassigned_hypernodes(),
_local_unassigned_hypernode_pointer(std::numeric_limits<size_t>::max()),
_fixed_vertices(),
_max_pop_size(_context.initial_partitioning.population_size) {
// Setup Label Propagation IRefiner Config for Initial Partitioning
_context.refinement = _context.initial_partitioning.refinement;
Expand All @@ -379,6 +382,14 @@ class InitialPartitioningDataContainer {
_best_partitions[i].second.resize(hypergraph.initialNumNodes(), kInvalidPartition);
}
}

if ( _partitioned_hg.hasFixedVertices() ) {
for ( const HypernodeID& hn : _partitioned_hg.nodes() ) {
if ( _partitioned_hg.isFixed(hn) ) {
_fixed_vertices.push_back(hn);
}
}
}
}

InitialPartitioningDataContainer(const InitialPartitioningDataContainer&) = delete;
Expand Down Expand Up @@ -434,7 +445,9 @@ class InitialPartitioningDataContainer {
// we initialize it here
const PartitionedHypergraph& hypergraph = local_partitioned_hypergraph();
for ( const HypernodeID& hn : hypergraph.nodes() ) {
unassigned_hypernodes.push_back(hn);
if ( !hypergraph.isFixed(hn) ) {
unassigned_hypernodes.push_back(hn);
}
}
std::shuffle(unassigned_hypernodes.begin(), unassigned_hypernodes.end(), prng);
}
Expand All @@ -452,7 +465,8 @@ class InitialPartitioningDataContainer {
while ( unassigned_hypernode_pointer > 0 ) {
const HypernodeID current_hn = unassigned_hypernodes[0];
// In case the current hypernode is unassigned we return it
if ( hypergraph.partID(current_hn) == unassigned_block ) {
if ( hypergraph.partID(current_hn) == unassigned_block &&
!hypergraph.isFixed(current_hn) ) {
return current_hn;
}
// In case the hypernode on the first position is already assigned,
Expand Down Expand Up @@ -651,6 +665,24 @@ class InitialPartitioningDataContainer {
_context.utility_id).add_initial_partitioning_result(best_flat_algo, number_of_threads, stats);
}

IteratorRange<FixedVertexIterator> fixedVertices() const {
return IteratorRange<FixedVertexIterator>(
_fixed_vertices.cbegin(), _fixed_vertices.cend());
}

HypernodeID numFixedVertices() const {
return _fixed_vertices.size();
}

void preassignFixedVertices(PartitionedHypergraph& hypergraph) {
if ( hypergraph.hasFixedVertices() ) {
for ( const HypernodeID& hn : fixedVertices() ) {
ASSERT(hypergraph.isFixed(hn));
hypergraph.setNodePart(hn, hypergraph.fixedVertexBlock(hn));
}
}
}

private:
LocalInitialPartitioningHypergraph construct_local_partitioned_hypergraph() {
return LocalInitialPartitioningHypergraph(
Expand All @@ -671,6 +703,7 @@ class InitialPartitioningDataContainer {
ThreadLocalFastResetFlagArray _local_he_visited;
ThreadLocalUnassignedHypernodes _local_unassigned_hypernodes;
tbb::enumerable_thread_specific<size_t> _local_unassigned_hypernode_pointer;
vec<HypernodeID> _fixed_vertices;

size_t _max_pop_size;
SpinLock _pop_lock;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,20 +40,46 @@ void LabelPropagationInitialPartitioner<TypeTraits>::partitionImpl() {
HighResClockTimepoint start = std::chrono::high_resolution_clock::now();
PartitionedHypergraph& hg = _ip_data.local_partitioned_hypergraph();

_ip_data.reset_unassigned_hypernodes(_rng);

parallel::scalable_vector<HypernodeID> start_nodes =
_ip_data.reset_unassigned_hypernodes(_rng);
_ip_data.preassignFixedVertices(hg);
vec<vec<HypernodeID>> start_nodes =
PseudoPeripheralStartNodes<TypeTraits>::computeStartNodes(_ip_data, _context, kInvalidPartition, _rng);
for ( PartitionID block = 0; block < _context.partition.k; ++block ) {
if ( hg.partID(start_nodes[block]) == kInvalidPartition ) {
hg.setNodePart(start_nodes[block], block);
size_t i = 0;
for ( ; i < std::min(start_nodes[block].size(),
_context.initial_partitioning.lp_initial_block_size); ++i ) {
const HypernodeID hn = start_nodes[block][i];
if ( hg.partID(hn) == kInvalidPartition && fitsIntoBlock(hg, hn, block) ) {
hg.setNodePart(hn, block);
} else {
std::swap(start_nodes[block][i--], start_nodes[block][start_nodes[block].size() - 1]);
start_nodes[block].pop_back();
}
}

// Remove remaining unassigned seed nodes
for ( ; i < start_nodes[block].size(); ++i ) {
start_nodes[block].pop_back();
}

if ( start_nodes[block].size() == 0 ) {
// There has been no seed node assigned to the block
// => find an unassigned node and assign it to the block
const HypernodeID hn = _ip_data.get_unassigned_hypernode();
if ( hn != kInvalidHypernode ) {
hg.setNodePart(hn, block);
start_nodes[block].push_back(hn);
}
}
}

// Each block is extended with 5 additional vertices which are adjacent
// to their corresponding seed vertices. This should prevent that block
// becomes empty after several label propagation rounds.
for ( PartitionID block = 0; block < _context.partition.k; ++block ) {
if ( hg.partID(start_nodes[block]) == block ) {
if ( !start_nodes[block].empty() && start_nodes[block].size() <
_context.initial_partitioning.lp_initial_block_size ) {
extendBlockToInitialBlockSize(hg, start_nodes[block], block);
}
}
Expand All @@ -63,8 +89,7 @@ void LabelPropagationInitialPartitioner<TypeTraits>::partitionImpl() {
converged = true;

for ( const HypernodeID& hn : hg.nodes() ) {

if (hg.nodeDegree(hn) > 0) {
if (hg.nodeDegree(hn) > 0 && !hg.isFixed(hn)) {
// Assign vertex to the block where FM gain is maximized
MaxGainMove max_gain_move = computeMaxGainMove(hg, hn);

Expand Down Expand Up @@ -238,42 +263,39 @@ MaxGainMove LabelPropagationInitialPartitioner<TypeTraits>::findMaxGainMove(Part

template<typename TypeTraits>
void LabelPropagationInitialPartitioner<TypeTraits>::extendBlockToInitialBlockSize(PartitionedHypergraph& hypergraph,
HypernodeID seed_vertex,
const vec<HypernodeID>& seed_vertices,
const PartitionID block) {
ASSERT(hypergraph.partID(seed_vertex) == block);
size_t block_size = 1;
ASSERT(seed_vertices.size() > 0);
size_t block_size = seed_vertices.size();

while (block_size < _context.initial_partitioning.lp_initial_block_size) {

// We search for _context.initial_partitioning.lp_initial_block_size vertices
// around the seed vertex to extend the corresponding block
// We search for _context.initial_partitioning.lp_initial_block_size vertices
// around the seed vertex to extend the corresponding block
for ( const HypernodeID& seed_vertex : seed_vertices ) {
for ( const HyperedgeID& he : hypergraph.incidentEdges(seed_vertex) ) {
for ( const HypernodeID& pin : hypergraph.pins(he) ) {
if ( hypergraph.partID(pin) == kInvalidPartition ) {
if ( hypergraph.partID(pin) == kInvalidPartition &&
fitsIntoBlock(hypergraph, pin, block) ) {
hypergraph.setNodePart(pin, block);
block_size++;
if ( block_size >= _context.initial_partitioning.lp_initial_block_size ) {
break;
}
if ( block_size >= _context.initial_partitioning.lp_initial_block_size ) break;
}
}
if ( block_size >= _context.initial_partitioning.lp_initial_block_size ) {
break;
}
if ( block_size >= _context.initial_partitioning.lp_initial_block_size ) break;
}
if ( block_size >= _context.initial_partitioning.lp_initial_block_size ) break;
}


// If there are less than _context.initial_partitioning.lp_initial_block_size
// adjacent vertices to the seed vertex, we find a new seed vertex and call
// this function recursive
if ( block_size < _context.initial_partitioning.lp_initial_block_size ) {
seed_vertex = _ip_data.get_unassigned_hypernode();
if ( seed_vertex != kInvalidHypernode ) {
hypergraph.setNodePart(seed_vertex, block);
block_size++;
} else {
break;
}
// If there are less than _context.initial_partitioning.lp_initial_block_size
// adjacent vertices to the seed vertex, we find a new seed vertex and call
// this function recursive
while ( block_size < _context.initial_partitioning.lp_initial_block_size ) {
const HypernodeID seed_vertex = _ip_data.get_unassigned_hypernode();
if ( seed_vertex != kInvalidHypernode && fitsIntoBlock(hypergraph, seed_vertex, block) ) {
hypergraph.setNodePart(seed_vertex, block);
block_size++;
} else {
break;
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ class LabelPropagationInitialPartitioner : public IInitialPartitioner {
const HypernodeWeight internal_weight);

void extendBlockToInitialBlockSize(PartitionedHypergraph& hypergraph,
HypernodeID seed_vertex,
const vec<HypernodeID>& seed_vertices,
const PartitionID block);

void assignVertexToBlockWithMinimumWeight(PartitionedHypergraph& hypergraph,
Expand Down
Loading

0 comments on commit 0f0700a

Please sign in to comment.