Skip to content

Commit

Permalink
refactor: move to the same interface as in the Shm code
Browse files Browse the repository at this point in the history
  • Loading branch information
DanielSeemaier committed Apr 30, 2024
1 parent 78a8f37 commit ba85d5d
Show file tree
Hide file tree
Showing 7 changed files with 177 additions and 266 deletions.
94 changes: 51 additions & 43 deletions kaminpar-dist/coarsening/coarsener.h
Original file line number Diff line number Diff line change
@@ -1,61 +1,69 @@
/*******************************************************************************
* Builds and manages a hierarchy of coarse graphs.
* Interface for graph coarseners.
*
* @file: coarsener.h
* @author: Daniel Seemaier
* @date: 28.04.2022
******************************************************************************/
#pragma once

#include <vector>

#include "kaminpar-dist/coarsening/clustering/clusterer.h"
#include "kaminpar-dist/coarsening/contraction/cluster_contraction.h"
#include "kaminpar-dist/context.h"
#include "kaminpar-dist/datastructures/distributed_graph.h"
#include "kaminpar-dist/datastructures/distributed_partitioned_graph.h"
#include "kaminpar-dist/dkaminpar.h"

#include "kaminpar-common/datastructures/scalable_vector.h"

namespace kaminpar::dist {
class Coarsener {
public:
Coarsener(const DistributedGraph &input_graph, const Context &input_ctx);

const DistributedGraph *coarsen_once();

const DistributedGraph *coarsen_once(GlobalNodeWeight max_cluster_weight);

DistributedPartitionedGraph uncoarsen_once(DistributedPartitionedGraph &&p_graph);

GlobalNodeWeight max_cluster_weight() const;
const DistributedGraph *coarsest() const;
std::size_t level() const;

private:
const DistributedGraph *coarsen_once_local(GlobalNodeWeight max_cluster_weight);
const DistributedGraph *coarsen_once_global(GlobalNodeWeight max_cluster_weight);

DistributedPartitionedGraph uncoarsen_once_local(DistributedPartitionedGraph &&p_graph);
DistributedPartitionedGraph uncoarsen_once_global(DistributedPartitionedGraph &&p_graph);

const DistributedGraph *nth_coarsest(std::size_t n) const;

bool has_converged(const DistributedGraph &before, const DistributedGraph &after) const;

const DistributedGraph &_input_graph;
const Context &_input_ctx;

std::unique_ptr<GlobalClusterer> _global_clusterer;
std::unique_ptr<LocalClusterer> _local_clusterer;

std::vector<DistributedGraph> _graph_hierarchy;
std::vector<GlobalMapping> _global_mapping_hierarchy; //< produced by global clustering algorithm
std::vector<MigratedNodes> _node_migration_history;
std::vector<ScalableVector<NodeID>>
_local_mapping_hierarchy; //< produced by local clustering_algorithm

bool _local_clustering_converged = false;
Coarsener() = default;

Coarsener(const Coarsener &) = delete;
Coarsener &operator=(const Coarsener &) = delete;

Coarsener(Coarsener &&) noexcept = default;
Coarsener &operator=(Coarsener &&) noexcept = default;

virtual ~Coarsener() = default;

/**
* Initializes the coarsener with a new toplevel graph.
*/
virtual void initialize(const DistributedGraph *graph) = 0;

/**
* Computes the next level of the graph hierarchy.
*
* @return whether coarsening has *not* yet converged.
*/
virtual bool coarsen() = 0;

/**
* @return the coarsest graph in the hierarchy.
*/
[[nodiscard]] virtual const DistributedGraph &current() const = 0;

/**
* @return number of coarse graphs in the hierarchy.
*/
[[nodiscard]] virtual std::size_t level() const = 0;

/**
* @return whether we have *not* yet computed any coarse graphs.
*/
[[nodiscard]] bool empty() const {
return level() == 0;
}

/**
* Projects a partition of the currently coarsest graph onto the next finer
* graph and frees the currently coarsest graph, i.e., unrolls one level of
* the coarse graph hierarchy.
*
* @param p_graph Partition of the currently coarsest graph.
* Precondition: `p_graph.graph() == current()`.
*
* @return partition of the *new* coarsest graph.
*/
virtual DistributedPartitionedGraph uncoarsen(DistributedPartitionedGraph &&p_graph) = 0;
};
} // namespace kaminpar::dist
Loading

0 comments on commit ba85d5d

Please sign in to comment.