diff --git a/mt-kahypar/partition/deep_multilevel.cpp b/mt-kahypar/partition/deep_multilevel.cpp index d5c684277..d200a496b 100644 --- a/mt-kahypar/partition/deep_multilevel.cpp +++ b/mt-kahypar/partition/deep_multilevel.cpp @@ -31,18 +31,17 @@ #include #include -#include "tbb/parallel_invoke.h" +#include "tbb/parallel_for.h" #include "mt-kahypar/definitions.h" #include "mt-kahypar/macros.h" +#include "mt-kahypar/partition/metrics.h" #include "mt-kahypar/partition/multilevel.h" +#include "mt-kahypar/partition/coarsening/coarsening_commons.h" #include "mt-kahypar/partition/coarsening/multilevel_uncoarsener.h" #include "mt-kahypar/partition/coarsening/nlevel_uncoarsener.h" -#include "mt-kahypar/partition/initial_partitioning/pool_initial_partitioner.h" -#include "mt-kahypar/partition/preprocessing/sparsification/degree_zero_hn_remover.h" #include "mt-kahypar/partition/refinement/gains/gain_cache_ptr.h" #include "mt-kahypar/partition/refinement/gains/bipartitioning_policy.h" -#include "mt-kahypar/utils/randomize.h" #include "mt-kahypar/utils/utilities.h" #include "mt-kahypar/utils/timer.h" #include "mt-kahypar/utils/progress_bar.h" @@ -279,19 +278,18 @@ void enableTimerAndStats(const Context& context, const bool was_enabled_before) } } -template -Context setupBipartitioningContext(const Hypergraph& hypergraph, - const Context& context, +Context setupBipartitioningContext(const Context& context, const OriginalHypergraphInfo& info, const PartitionID start_k, - const PartitionID end_k) { + const PartitionID end_k, + const HypernodeWeight total_weight, + const bool is_graph) { ASSERT(end_k - start_k >= 2); Context b_context(context); b_context.partition.k = 2; b_context.partition.objective = Objective::cut; - b_context.partition.gain_policy = Hypergraph::is_graph ? - GainPolicy::cut_for_graphs : GainPolicy::cut; + b_context.partition.gain_policy = is_graph ? GainPolicy::cut_for_graphs : GainPolicy::cut; b_context.partition.verbose_output = false; b_context.initial_partitioning.mode = Mode::direct; b_context.type = ContextType::initial_partitioning; @@ -306,7 +304,6 @@ Context setupBipartitioningContext(const Hypergraph& hypergraph, b_context.refinement = b_context.initial_partitioning.refinement; // Setup Part Weights - const HypernodeWeight total_weight = hypergraph.totalWeight(); const PartitionID k = end_k - start_k; const PartitionID k0 = k / 2 + (k % 2 != 0 ? 1 : 0); const PartitionID k1 = k / 2; @@ -472,7 +469,7 @@ DeepPartitioningResult bipartition_block(typename TypeTraits::Hyperg if ( bipartition.hypergraph.initialNumNodes() > 0 ) { // Bipartition block Context b_context = setupBipartitioningContext( - bipartition.hypergraph, context, info, start_k, end_k); + context, info, start_k, end_k, bipartition.hypergraph.totalWeight(), PartitionedHypergraph::is_graph); bipartition.partitioned_hg = Multilevel::partition( bipartition.hypergraph, b_context); } else { @@ -483,9 +480,61 @@ DeepPartitioningResult bipartition_block(typename TypeTraits::Hyperg } template +void apply_bipartitions_to_hypergraph(typename TypeTraits::PartitionedHypergraph& partitioned_hg, + GainCache& gain_cache, + const vec& mapping, + const vec>& bipartitions, + const vec& block_ranges) { + partitioned_hg.doParallelForAllNodes([&](const HypernodeID& hn) { + const PartitionID from = partitioned_hg.partID(hn); + ASSERT(static_cast(from) < bipartitions.size()); + PartitionID to = kInvalidPartition; + const DeepPartitioningResult& bipartition = bipartitions[from]; + if ( bipartition.valid ) { + ASSERT(static_cast(hn) < mapping.size()); + const HypernodeID mapped_hn = mapping[hn]; + to = bipartition.partitioned_hg.partID(mapped_hn) == 0 ? + block_ranges[from] : block_ranges[from] + 1; + } else { + to = block_ranges[from]; + } + + ASSERT(to > kInvalidPartition && to < block_ranges.back()); + if ( from != to ) { + if ( gain_cache.isInitialized() ) { + partitioned_hg.changeNodePart(gain_cache, hn, from, to); + } else { + partitioned_hg.changeNodePart(hn, from, to); + } + } + }); + + if ( GainCache::invalidates_entries && gain_cache.isInitialized() ) { + partitioned_hg.doParallelForAllNodes([&](const HypernodeID& hn) { + gain_cache.recomputeInvalidTerms(partitioned_hg, hn); + }); + } + + HEAVY_REFINEMENT_ASSERT(partitioned_hg.checkTrackedPartitionInformation(gain_cache)); +} + +template +void apply_bipartitions_to_hypergraph(typename TypeTraits::PartitionedHypergraph& partitioned_hg, + gain_cache_t gain_cache, + const vec& mapping, + const vec>& bipartitions, + const vec& block_ranges) { + using PartitionedHypergraph = typename TypeTraits::PartitionedHypergraph; + + GainCachePtr::applyWithConcreteGainCacheForHG([&](auto& gain_cache) { + apply_bipartitions_to_hypergraph(partitioned_hg,gain_cache, mapping, bipartitions, block_ranges); + }, gain_cache); +} + +template void bipartition_each_block(typename TypeTraits::PartitionedHypergraph& partitioned_hg, const Context& context, - GainCache& gain_cache, + gain_cache_t gain_cache, const OriginalHypergraphInfo& info, const RBTree& rb_tree, vec& already_cut, @@ -551,30 +600,8 @@ void bipartition_each_block(typename TypeTraits::PartitionedHypergraph& partitio timer.stop_timer("bipartition_blocks"); timer.start_timer("apply_bipartitions", "Apply Bipartition"); - // Apply all bipartitions to current hypergraph - partitioned_hg.doParallelForAllNodes([&](const HypernodeID& hn) { - const PartitionID from = partitioned_hg.partID(hn); - ASSERT(static_cast(from) < bipartitions.size()); - PartitionID to = kInvalidPartition; - const DeepPartitioningResult& bipartition = bipartitions[from]; - if ( bipartition.valid ) { - ASSERT(static_cast(hn) < mapping.size()); - const HypernodeID mapped_hn = mapping[hn]; - to = bipartition.partitioned_hg.partID(mapped_hn) == 0 ? - block_ranges[from] : block_ranges[from] + 1; - } else { - to = block_ranges[from]; - } - - ASSERT(to > kInvalidPartition && to < block_ranges.back()); - if ( from != to ) { - if ( gain_cache.isInitialized() ) { - partitioned_hg.changeNodePart(gain_cache, hn, from, to); - } else { - partitioned_hg.changeNodePart(hn, from, to); - } - } - }); + apply_bipartitions_to_hypergraph(partitioned_hg, gain_cache, mapping, bipartitions, block_ranges); + timer.stop_timer("apply_bipartitions"); ASSERT([&] { HyperedgeWeight expected_objective = current_objective; @@ -592,69 +619,12 @@ void bipartition_each_block(typename TypeTraits::PartitionedHypergraph& partitio return true; }(), "Cut of extracted blocks does not sum up to current objective"); - if ( GainCache::invalidates_entries && gain_cache.isInitialized() ) { - partitioned_hg.doParallelForAllNodes([&](const HypernodeID& hn) { - gain_cache.recomputeInvalidTerms(partitioned_hg, hn); - }); - } - timer.stop_timer("apply_bipartitions"); - timer.start_timer("free_hypergraphs", "Free Hypergraphs"); tbb::parallel_for(UL(0), bipartitions.size(), [&](const size_t i) { DeepPartitioningResult tmp_res; tmp_res = std::move(bipartitions[i]); }); timer.stop_timer("free_hypergraphs"); - - HEAVY_REFINEMENT_ASSERT(partitioned_hg.checkTrackedPartitionInformation(gain_cache)); -} - -template -void bipartition_each_block(typename TypeTraits::PartitionedHypergraph& partitioned_hg, - const Context& context, - gain_cache_t gain_cache, - const OriginalHypergraphInfo& info, - const RBTree& rb_tree, - vec& already_cut, - const PartitionID current_k, - const HyperedgeWeight current_objective, - const bool progress_bar_enabled) { - switch(gain_cache.type) { - case GainPolicy::cut: - bipartition_each_block(partitioned_hg, context, - GainCachePtr::cast(gain_cache), info, rb_tree, - already_cut, current_k, current_objective, progress_bar_enabled); break; - case GainPolicy::km1: - bipartition_each_block(partitioned_hg, context, - GainCachePtr::cast(gain_cache), info, rb_tree, - already_cut, current_k, current_objective, progress_bar_enabled); break; - #ifdef KAHYPAR_ENABLE_SOED_METRIC - case GainPolicy::soed: - bipartition_each_block(partitioned_hg, context, - GainCachePtr::cast(gain_cache), info, rb_tree, - already_cut, current_k, current_objective, progress_bar_enabled); break; - #endif - #ifdef KAHYPAR_ENABLE_STEINER_TREE_METRIC - case GainPolicy::steiner_tree: - bipartition_each_block(partitioned_hg, context, - GainCachePtr::cast(gain_cache), info, rb_tree, - already_cut, current_k, current_objective, progress_bar_enabled); break; - #endif - #ifdef KAHYPAR_ENABLE_GRAPH_PARTITIONING_FEATURES - case GainPolicy::cut_for_graphs: - bipartition_each_block(partitioned_hg, context, - GainCachePtr::cast(gain_cache), info, rb_tree, - already_cut, current_k, current_objective, progress_bar_enabled); break; - #ifdef KAHYPAR_ENABLE_STEINER_TREE_METRIC - case GainPolicy::steiner_tree_for_graphs: - bipartition_each_block(partitioned_hg, context, - GainCachePtr::cast(gain_cache), info, rb_tree, - already_cut, current_k, current_objective, progress_bar_enabled); break; - #endif - #endif - case GainPolicy::none: break; - default: break; - } } template @@ -768,7 +738,8 @@ PartitionID deep_multilevel_partitioning(typename TypeTraits::PartitionedHypergr // and continue with uncoarsening. const auto target_blocks = rb_tree.targetBlocksInFinalPartition(1, 0); Context b_context = setupBipartitioningContext( - hypergraph, context, info, target_blocks.first, target_blocks.second); + context, info, target_blocks.first, target_blocks.second, + hypergraph.totalWeight(), Hypergraph::is_graph); Multilevel::partition(coarsest_phg, b_context); current_k = 2;