Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix warnings #193

Merged
merged 20 commits into from
Oct 16, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion external_tools/googletest
Submodule googletest updated 242 files
1 change: 0 additions & 1 deletion mt-kahypar/datastructures/array.h
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,6 @@ class Array {

ArrayIterator() : _ptr(nullptr) { }
ArrayIterator(T* ptr) : _ptr(ptr) { }
ArrayIterator(const ArrayIterator& other) : _ptr(other._ptr) { }

reference operator*() const {
return *_ptr;
Expand Down
18 changes: 9 additions & 9 deletions mt-kahypar/datastructures/connectivity_set.h
Original file line number Diff line number Diff line change
Expand Up @@ -69,8 +69,8 @@ class ConnectivitySets {
using Iterator = typename StaticBitset::const_iterator;

ConnectivitySets() :
_k(0),
_num_hyperedges(0),
ENABLE_ASSERTIONS(_k(0) COMMA)
ENABLE_ASSERTIONS(_num_hyperedges(0) COMMA)
_num_blocks_per_hyperedge(0),
_bits(),
_deep_copy_bitset(),
Expand All @@ -79,8 +79,8 @@ class ConnectivitySets {
ConnectivitySets(const HyperedgeID num_hyperedges,
const PartitionID k,
const bool assign_parallel = true) :
_k(k),
_num_hyperedges(num_hyperedges),
ENABLE_ASSERTIONS(_k(k) COMMA)
ENABLE_ASSERTIONS(_num_hyperedges(num_hyperedges) COMMA)
_num_blocks_per_hyperedge(k / BITS_PER_BLOCK + (k % BITS_PER_BLOCK != 0)),
_bits(),
_deep_copy_bitset(),
Expand Down Expand Up @@ -178,16 +178,16 @@ class ConnectivitySets {

private:
void toggle(const HyperedgeID he, const PartitionID p) {
assert(p < _k);
assert(he < _num_hyperedges);
ASSERT(p < _k);
ASSERT(he < _num_hyperedges);
const size_t div = p / BITS_PER_BLOCK, rem = p % BITS_PER_BLOCK;
const size_t idx = static_cast<size_t>(he) * _num_blocks_per_hyperedge + div;
assert(idx < _bits.size());
ASSERT(idx < _bits.size());
__atomic_xor_fetch(&_bits[idx], UnsafeBlock(1) << rem, __ATOMIC_RELAXED);
}

PartitionID _k;
HyperedgeID _num_hyperedges;
ENABLE_ASSERTIONS(PartitionID _k;)
ENABLE_ASSERTIONS(HyperedgeID _num_hyperedges;)
PartitionID _num_blocks_per_hyperedge;
Array<UnsafeBlock> _bits;

Expand Down
1 change: 1 addition & 0 deletions mt-kahypar/datastructures/delta_partitioned_hypergraph.h
Original file line number Diff line number Diff line change
Expand Up @@ -340,6 +340,7 @@ class DeltaPartitionedHypergraph {
MT_KAHYPAR_ATTRIBUTE_ALWAYS_INLINE
void updateConnectivitySet(const HyperedgeID e,
const SynchronizedEdgeUpdate& sync_update) {
unused(e);
if ( sync_update.pin_count_in_from_part_after == 0 ) {
_connectivity_set_delta.remove(sync_update.he, sync_update.from);
}
Expand Down
1 change: 1 addition & 0 deletions mt-kahypar/datastructures/dynamic_graph.h
Original file line number Diff line number Diff line change
Expand Up @@ -681,6 +681,7 @@ class DynamicGraph {
// ####################### Contract / Uncontract #######################

DynamicGraph contract(parallel::scalable_vector<HypernodeID>&, bool deterministic = false) {
unused(deterministic);
throw NonSupportedOperationException(
"contract(c, id) is not supported in dynamic graph");
return DynamicGraph();
Expand Down
1 change: 1 addition & 0 deletions mt-kahypar/datastructures/dynamic_hypergraph.h
Original file line number Diff line number Diff line change
Expand Up @@ -775,6 +775,7 @@ class DynamicHypergraph {
// ####################### Contract / Uncontract #######################

DynamicHypergraph contract(parallel::scalable_vector<HypernodeID>&, bool deterministic = false) {
unused(deterministic);
throw NonSupportedOperationException(
"contract(c, id) is not supported in dynamic hypergraph");
return DynamicHypergraph();
Expand Down
3 changes: 3 additions & 0 deletions mt-kahypar/datastructures/sparse_pin_counts.h
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,8 @@ class SparsePinCounts {
_k(k),
_pin_count_list(data),
_ext_pin_count_list(nullptr) {
// this assert needs to be active in release mode to silence a null pointer related compiler warning
ALWAYS_ASSERT(data != nullptr);
next_valid_entry();
}

Expand All @@ -108,6 +110,7 @@ class SparsePinCounts {
_k(k),
_pin_count_list(nullptr),
_ext_pin_count_list(data) {
ASSERT(data != nullptr);
next_valid_entry();
}

Expand Down
4 changes: 2 additions & 2 deletions mt-kahypar/datastructures/static_bitset.h
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ class StaticBitset {
OneBitIterator(const size_t num_blocks,
const Block* bitset,
const PartitionID start_block) :
_num_blocks(num_blocks),
ENABLE_ASSERTIONS(_num_blocks(num_blocks) COMMA)
_bitset(bitset),
_max_block_id(num_blocks * BITS_PER_BLOCK),
_current_block_id(start_block) {
Expand Down Expand Up @@ -113,7 +113,7 @@ class StaticBitset {
return __atomic_load_n(_bitset + ( _current_block_id >> DIV_SHIFT ), __ATOMIC_RELAXED);
}

const size_t _num_blocks;
ENABLE_ASSERTIONS(const size_t _num_blocks;)
const Block* _bitset;
const PartitionID _max_block_id;
PartitionID _current_block_id;
Expand Down
6 changes: 6 additions & 0 deletions mt-kahypar/macros.h
Original file line number Diff line number Diff line change
Expand Up @@ -164,6 +164,12 @@
#define ENABLE_EXPERIMENTAL_FEATURES(X)
#endif

#ifdef KAHYPAR_USE_ASSERTIONS
#define ENABLE_ASSERTIONS(X) X
#else
#define ENABLE_ASSERTIONS(X)
#endif

#ifdef KAHYPAR_ENABLE_LARGE_K_PARTITIONING_FEATURES
#define ENABLE_LARGE_K(X) X
#else
Expand Down
79 changes: 49 additions & 30 deletions mt-kahypar/partition/coarsening/multilevel_coarsener.h
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,7 @@ class MultilevelCoarsener : public ICoarsener,
#define STATE(X) static_cast<uint8_t>(X)
using AtomicMatchingState = parallel::IntegralAtomicWrapper<uint8_t>;
using AtomicWeight = parallel::IntegralAtomicWrapper<HypernodeWeight>;
using AtomicID = parallel::IntegralAtomicWrapper<HypernodeID>;

static constexpr bool debug = false;
static constexpr bool enable_heavy_assert = false;
Expand Down Expand Up @@ -154,8 +155,8 @@ class MultilevelCoarsener : public ICoarsener,
ASSERT(hn < _current_vertices.size());
// Reset clustering
_current_vertices[hn] = hn;
_matching_state[hn] = STATE(MatchingState::UNMATCHED);
_matching_partner[hn] = hn;
_matching_state[hn].store(STATE(MatchingState::UNMATCHED), std::memory_order_relaxed);
_matching_partner[hn].store(hn, std::memory_order_relaxed);
cluster_ids[hn] = hn;
if ( current_hg.nodeIsEnabled(hn) ) {
_cluster_weight[hn] = current_hg.nodeWeight(hn);
Expand Down Expand Up @@ -362,77 +363,97 @@ class MultilevelCoarsener : public ICoarsener,
parallel::scalable_vector<HypernodeID>& cluster_ids,
HypernodeID& contracted_nodes,
ds::FixedVertexSupport<Hypergraph>& fixed_vertices) {
// MEMORY ORDERING AND SYNCHRONIZATION
// During the clustering, there are concurrent memory accesses to the following 4 locations:
// _matching_state, cluster_ids, _matching_partner and _cluster_weight.
// We use _matching_state to synchronize accesses to cluster_ids with acquire/release semantics, while
// _matching_partner and _cluster_weight don't require synchronization. In more detail (see also PR #193):
// 1. We read cluster_ids[v] to determine the representative for joinCluster if v is already matched.
// This is synchronized by using release ordering when writing the MATCHED state to _matching_state
// and always checking _matching_state[v] with acquire ordering before accessing cluster_ids[v]
// 2. _matching_partner is used for conflict resolution. Since the conflict resolution loops until
// a stable state is detected, no explicit synchronization is necessary and relaxed ordering suffices
// 3. Updating _cluster_weight might cause a race condition in joinCluster. However, this just causes the
// cluster to exceed the allowed weight. This is acceptable since it rarely happens in practice

ASSERT(u < hypergraph.initialNumNodes());
ASSERT(v < hypergraph.initialNumNodes());
uint8_t unmatched = STATE(MatchingState::UNMATCHED);
uint8_t match_in_progress = STATE(MatchingState::MATCHING_IN_PROGRESS);
const uint8_t matched = STATE(MatchingState::MATCHED);
const uint8_t match_in_progress = STATE(MatchingState::MATCHING_IN_PROGRESS);

// Indicates that u wants to join the cluster of v.
// Will be important later for conflict resolution.
bool success = false;
const HypernodeWeight weight_u = hypergraph.nodeWeight(u);
HypernodeWeight weight_v = _cluster_weight[v];
HypernodeWeight weight_v = _cluster_weight[v].load(std::memory_order_relaxed);
if ( weight_u + weight_v <= _context.coarsening.max_allowed_node_weight ) {

if ( _matching_state[u].compare_exchange_strong(unmatched, match_in_progress) ) {
_matching_partner[u] = v;
uint8_t expect_unmatched_u = STATE(MatchingState::UNMATCHED);
if ( _matching_state[u].compare_exchange_strong(expect_unmatched_u, match_in_progress, std::memory_order_relaxed) ) {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In a compare-exchange, the memory ordering should be acquire

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Uhm, for what reason? This is absolutely not generally true. You can't just map operations to orderings, that is not how memory ordering semantics work

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Wouldn't some of the loads and stores in the locked state be allowed to moved before the CAS? Is that why the load on line 389 has acquire ordering? Does that prevent this?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So, the accesses we need to be careful about are loads and stores of cluster_ids. New cluster IDs are written by the "owning" thread of the according node in the joinCluster operation. Cluster IDs are read either during conflict resolution (line 440) or when the state of v is already MATCHED (line 393). So in these two cases, we need to establish a happens-before relationship between the reading thread (lets say t_u) and the writing thread (t_v).

Line 393: Since u is owned by t_u, no other thread than t_u has performed a write on matching_state[u]. Thus, using acquire semantics for the compare_exchange wouldn't do anything here, since there is nothing to establish a happens-before relationship with. However, t_v has updated matching_state[v] to MATCHED after updating the cluster ID. What we need is therefore a happens-before relationship with regards to matching_state[v]. This is achieved by the release semantics in line 448 (as well as 403/430, though I think the release semantics aren't even strictly necessary there, since the cluster ID does not change) and the load on line 389 with acquire semantics.

Line 440: Same principle, in this case the acquire semantics on matching_state[v] are provided by the load in line 432.

Now, of course this does not provide any ordering guarantees for the accesses to _matching_partner (e.g. line 384). However, as discussed before, acquire semantics on the compare_exchange wouldn't help with this either, since there is nothing to synchronize against (I guess if we want that ordering we could use release semantics for the write in line 384). Luckily, the conflict resolution code does not need any ordering guarantees, since it just loops until a consistent state is detected. Only when the conflict is resolved and writes happen we need the acquire/release semantics.

So the summarized argument for the correctness is that (a) all accesses to cluster_ids are synchronized via acquire/release semantics on the read/write when cluster_ids[v] changes state to MATCHED and (b) we don't need ordering guarantees for accesses to _matching_partner.

Of course there is also _cluster_weight, but here we just accept that a cluster might get too heavy if a race condition happens. To fix this we would need a compare_exchange loop in joinCluster, which we probably don't want.

_matching_partner[u].store(v, std::memory_order_relaxed);
// Current thread gets "ownership" for vertex u. Only threads with "ownership"
// can change the cluster id of a vertex.

uint8_t matching_state_v = _matching_state[v].load();
if ( matching_state_v == STATE(MatchingState::MATCHED) ) {
uint8_t expect_unmatched_v = STATE(MatchingState::UNMATCHED);
uint8_t matching_state_v = _matching_state[v].load(std::memory_order_acquire);
N-Maas marked this conversation as resolved.
Show resolved Hide resolved
if ( matching_state_v == matched ) {
// Vertex v is already matched and will not change it cluster id any more.
// In that case, it is safe to set the cluster id of u to the cluster id of v.
const HypernodeID rep = cluster_ids[v];
ASSERT(_matching_state[rep] == STATE(MatchingState::MATCHED));
ASSERT(_matching_state[rep] == matched);
success = joinCluster<has_fixed_vertices>(hypergraph,
u, rep, cluster_ids, contracted_nodes, fixed_vertices);
} else if ( _matching_state[v].compare_exchange_strong(unmatched, match_in_progress) ) {
} else if ( matching_state_v == expect_unmatched_v &&
_matching_state[v].compare_exchange_strong(expect_unmatched_v, match_in_progress, std::memory_order_relaxed) ) {
N-Maas marked this conversation as resolved.
Show resolved Hide resolved
// Current thread has the "ownership" for u and v and can change the cluster id
// of both vertices thread-safe.
success = joinCluster<has_fixed_vertices>(hypergraph,
u, v, cluster_ids, contracted_nodes, fixed_vertices);
_matching_state[v] = STATE(MatchingState::MATCHED);
_matching_state[v].store(matched, std::memory_order_release);
} else {
// State of v must be either MATCHING_IN_PROGRESS or an other thread changed the state
// in the meantime to MATCHED. We have to wait until the state of v changed to
// MATCHED or resolve the conflict if u is matched within a cyclic matching dependency

// Conflict Resolution
while ( _matching_state[v] == STATE(MatchingState::MATCHING_IN_PROGRESS) ) {

do {
// Check if current vertex is in a cyclic matching dependency
HypernodeID cur_u = u;
HypernodeID smallest_node_id_in_cycle = cur_u;
while ( _matching_partner[cur_u] != u && _matching_partner[cur_u] != cur_u ) {
cur_u = _matching_partner[cur_u];
smallest_node_id_in_cycle = std::min(smallest_node_id_in_cycle, cur_u);
while (true) {
HypernodeID next_u = _matching_partner[cur_u].load(std::memory_order_relaxed);
if (next_u != u && next_u != cur_u) {
cur_u = next_u;
smallest_node_id_in_cycle = std::min(smallest_node_id_in_cycle, cur_u);
} else {
break;
}
}

// Resolve cyclic matching dependency
// Vertex with smallest id starts to resolve conflict
const bool is_in_cyclic_dependency = _matching_partner[cur_u] == u;
const bool is_in_cyclic_dependency = _matching_partner[cur_u].load(std::memory_order_relaxed) == u;
if ( is_in_cyclic_dependency && u == smallest_node_id_in_cycle) {
success = joinCluster<has_fixed_vertices>(hypergraph,
u, v, cluster_ids, contracted_nodes, fixed_vertices);
_matching_state[v] = STATE(MatchingState::MATCHED);
_matching_state[v].store(matched, std::memory_order_release);
}
}
} while ( _matching_state[v].load(std::memory_order_acquire) == match_in_progress );
// note: the loop provides acquire semantics for the block below

// If u is still in state MATCHING_IN_PROGRESS its matching partner v
// must be matched in the meantime with an other vertex. Therefore,
// we try to match u with the representative v's cluster.
if ( _matching_state[u] == STATE(MatchingState::MATCHING_IN_PROGRESS) ) {
ASSERT( _matching_state[v] == STATE(MatchingState::MATCHED) );
if ( _matching_state[u].load(std::memory_order_relaxed) == match_in_progress ) {
ASSERT( _matching_state[v] == matched );
const HypernodeID rep = cluster_ids[v];
success = joinCluster<has_fixed_vertices>(hypergraph,
u, rep, cluster_ids, contracted_nodes, fixed_vertices);
}
}
_rater.markAsMatched(u);
_rater.markAsMatched(v);
_matching_partner[u] = u;
_matching_state[u] = STATE(MatchingState::MATCHED);
_matching_partner[u].store(u, std::memory_order_relaxed);
_matching_state[u].store(matched, std::memory_order_release);
}
}
return success;
Expand All @@ -448,7 +469,7 @@ class MultilevelCoarsener : public ICoarsener,
ASSERT(rep == cluster_ids[rep]);
bool success = false;
const HypernodeWeight weight_of_u = hypergraph.nodeWeight(u);
const HypernodeWeight weight_of_rep = _cluster_weight[rep];
const HypernodeWeight weight_of_rep = _cluster_weight[rep].load(std::memory_order_relaxed);
bool cluster_join_operation_allowed =
weight_of_u + weight_of_rep <= _context.coarsening.max_allowed_node_weight;
if constexpr ( has_fixed_vertices ) {
Expand All @@ -458,12 +479,10 @@ class MultilevelCoarsener : public ICoarsener,
}
if ( cluster_join_operation_allowed ) {
cluster_ids[u] = rep;
_cluster_weight[rep] += weight_of_u;
_cluster_weight[rep].fetch_add(weight_of_u, std::memory_order_relaxed);
++contracted_nodes;
success = true;
}
_matching_partner[u] = u;
_matching_state[u] = STATE(MatchingState::MATCHED);
N-Maas marked this conversation as resolved.
Show resolved Hide resolved
return success;
}

Expand Down Expand Up @@ -498,7 +517,7 @@ class MultilevelCoarsener : public ICoarsener,
parallel::scalable_vector<HypernodeID> _current_vertices;
parallel::scalable_vector<AtomicMatchingState> _matching_state;
parallel::scalable_vector<AtomicWeight> _cluster_weight;
parallel::scalable_vector<HypernodeID> _matching_partner;
parallel::scalable_vector<AtomicID> _matching_partner;
int _pass_nr;
utils::ProgressBar _progress_bar;
bool _enable_randomization;
Expand Down
11 changes: 11 additions & 0 deletions mt-kahypar/partition/mapping/target_graph.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,12 +33,23 @@
#include <tbb/enumerable_thread_specific.h>

#ifdef __linux__
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-parameter"
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can we fix the unused parameter warning?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Well, if anyone wants to open a PR at growt, go ahead 😄

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ah :) No thanks. I'm actually in favor of removing growt and just using tbb_unordered_map.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah removing might make sense, but perhaps in another PR

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The tbb::unordered_map was 2 - 3 order of magnitudes slower for caching the Steiner trees calculations -> do not remove :D

#endif
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wpedantic"
#endif
#include "allocator/alignedallocator.hpp"
#include "data-structures/hash_table_mods.hpp"
#include "data-structures/table_config.hpp"
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#elif defined(_WIN32) or defined(__APPLE__)
#include <tbb/concurrent_unordered_map.h>
#endif
Expand Down
6 changes: 2 additions & 4 deletions mt-kahypar/partition/refinement/fm/fm_commons.h
Original file line number Diff line number Diff line change
Expand Up @@ -155,15 +155,13 @@ class UnconstrainedFMData {
using BucketID = uint32_t;
using AtomicBucketID = parallel::IntegralAtomicWrapper<BucketID>;

#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wmismatched-tags"
template<typename GraphAndGainTypes>
struct InitializationHelper {
class InitializationHelper {
public:
static void initialize(UnconstrainedFMData& data, const Context& context,
const typename GraphAndGainTypes::PartitionedHypergraph& phg,
const typename GraphAndGainTypes::GainCache& gain_cache);
};
#pragma GCC diagnostic pop

static constexpr BucketID NUM_BUCKETS = 16;
static constexpr double BUCKET_FACTOR = 1.5;
Expand Down
2 changes: 1 addition & 1 deletion python/pybind11
Submodule pybind11 updated 201 files
3 changes: 3 additions & 0 deletions tests/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,9 @@ set_property(TARGET mt_kahypar_tests PROPERTY CXX_STANDARD 17)
set_property(TARGET mt_kahypar_tests PROPERTY CXX_STANDARD_REQUIRED ON)
target_link_libraries(mt_kahypar_tests ${Boost_LIBRARIES} TBB::tbb TBB::tbbmalloc_proxy)

# suppress warnings generated by TYPED_TEST_SUITE macro from gtest
target_compile_options(mt_kahypar_tests PRIVATE "-Wno-gnu-zero-variadic-macro-arguments")
N-Maas marked this conversation as resolved.
Show resolved Hide resolved

add_subdirectory(datastructures)
add_subdirectory(interface)
add_subdirectory(io)
Expand Down
2 changes: 1 addition & 1 deletion tests/datastructures/gain_updates_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ class AGainUpdate : public Test {
Km1GainCache gain_cache;
};

TYPED_TEST_CASE(AGainUpdate, tests::HypergraphTestTypeTraits);
TYPED_TEST_SUITE(AGainUpdate, tests::HypergraphTestTypeTraits);

TYPED_TEST(AGainUpdate, Example1) {
this->phg.setNodePart(0, 0);
Expand Down
2 changes: 2 additions & 0 deletions tests/datastructures/nlevel_smoke_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -517,6 +517,7 @@ TEST(ANlevelHypergraph, SimulatesParallelContractionsAndAccessToHypergraph) {
}
}
}
unused(rating);
});
}
}, [&] {
Expand Down Expand Up @@ -690,6 +691,7 @@ TEST(ANlevelGraph, SimulatesParallelContractionsAndAccessToHypergraph) {
}
}
}
unused(rating);
});
}
}, [&] {
Expand Down
2 changes: 1 addition & 1 deletion tests/datastructures/partitioned_graph_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ void executeConcurrent(const F1& f1, const F2& f2) {
f2();
});
}
TYPED_TEST_CASE(APartitionedGraph, tests::GraphTestTypeTraits);
TYPED_TEST_SUITE(APartitionedGraph, tests::GraphTestTypeTraits);

TYPED_TEST(APartitionedGraph, HasCorrectPartWeightAndSizes) {
ASSERT_EQ(3, this->partitioned_hypergraph.partWeight(0));
Expand Down
2 changes: 1 addition & 1 deletion tests/datastructures/partitioned_hypergraph_smoke_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ typedef ::testing::Types<TestConfig<StaticHypergraphTypeTraits, 2, Objective::cu
ENABLE_HIGHEST_QUALITY(COMMA TestConfig<DynamicHypergraphTypeTraits COMMA 128 COMMA Objective::km1>)
ENABLE_LARGE_K(COMMA TestConfig<LargeKHypergraphTypeTraits COMMA 128 COMMA Objective::km1>)> TestConfigs;

TYPED_TEST_CASE(AConcurrentHypergraph, TestConfigs);
TYPED_TEST_SUITE(AConcurrentHypergraph, TestConfigs);

template<typename HyperGraph>
void moveAllNodesOfHypergraphRandom(HyperGraph& hypergraph,
Expand Down
Loading
Loading