diff --git a/include/workaround.h b/include/workaround.h index 47e8b3736..5ef8fd861 100644 --- a/include/workaround.h +++ b/include/workaround.h @@ -11,15 +11,6 @@ #define CELERITY_WORKAROUND_DPCPP 0 #endif -#if defined(CELERITY_SIMSYCL) -#define CELERITY_WORKAROUND_SIMSYCL 1 -#define CELERITY_WORKAROUND_VERSION_MAJOR SIMSYCL_VERSION_MAJOR -#define CELERITY_WORKAROUND_VERSION_MINOR SIMSYCL_VERSION_MINOR -#define CELERITY_WORKAROUND_VERSION_PATCH SIMSYCL_VERSION_PATCH -#else -#define CELERITY_WORKAROUND_SIMSYCL 0 -#endif - #if defined(__HIPSYCL__) #define CELERITY_WORKAROUND_HIPSYCL 1 #define CELERITY_WORKAROUND_VERSION_MAJOR HIPSYCL_VERSION_MAJOR diff --git a/test/buffer_manager_tests.cc b/test/buffer_manager_tests.cc index 20544d77a..2c61a1f9e 100644 --- a/test/buffer_manager_tests.cc +++ b/test/buffer_manager_tests.cc @@ -811,7 +811,6 @@ namespace detail { SECTION("when using device buffers") { const auto range = celerity::range<2>(32, 32); const auto offset = id<2>(32, 0); - auto sr = subrange<3>(id_cast<3>(offset), range_cast<3>(range)); get_device_accessor(bid, {48, 32}, {16, 0}); auto acc = get_device_accessor(bid, {32, 32}, {32, 0}); @@ -856,7 +855,6 @@ namespace detail { SECTION("when using device buffers") { auto range = celerity::range<1>(32); - auto sr = subrange<3>({}, range_cast<3>(range)); // For device accessors we test this both on host and device diff --git a/test/graph_generation_tests.cc b/test/graph_generation_tests.cc index a05fa24cf..c345f4293 100644 --- a/test/graph_generation_tests.cc +++ b/test/graph_generation_tests.cc @@ -203,7 +203,7 @@ TEST_CASE("distributed_graph_generator generates anti-dependencies for execution const auto run_test = [&](const node_id writing_node, const node_id other_node) { const auto only_one_writes = [=](chunk<1> chnk) -> subrange<1> { - if(chnk.range[0] == test_range) return subrange<1>{writing_node == 0 ? 0 : 64, 64}; + if(chnk.range[0] == test_range) return subrange<1>{writing_node == 0 ? 0u : 64u, 64}; switch(chnk.offset[0]) { case 0: return writing_node == 0 ? chnk : subrange<1>{0, 0}; case 64: return writing_node == 1 ? chnk : subrange<1>{0, 0}; @@ -213,7 +213,7 @@ TEST_CASE("distributed_graph_generator generates anti-dependencies for execution }; // Both nodes write parts of the buffer. - const auto tid_a = dctx.device_compute(test_range).discard_write(buf0, acc::one_to_one{}).submit(); + [[maybe_unused]] const auto tid_a = dctx.device_compute(test_range).discard_write(buf0, acc::one_to_one{}).submit(); // Both nodes read the full buffer, but writing_node also writes to it. const auto tid_b = dctx.device_compute(test_range).read(buf0, acc::all{}).discard_write(buf0, only_one_writes).submit(); diff --git a/test/runtime_tests.cc b/test/runtime_tests.cc index e893e9367..98753416f 100644 --- a/test/runtime_tests.cc +++ b/test/runtime_tests.cc @@ -726,8 +726,8 @@ namespace detail { q.submit([&](handler& cgh) { accessor ga{out, cgh, celerity::access::all{}, read_only_host_task}; cgh.host_task(on_master_node, [=] { - for(size_t i = 0; i < 64; ++i) { - CHECK(ga[i] == i / 32 * 32 + (32 - 1 - i % 32)); + for(int i = 0; i < 64; ++i) { + CHECK(ga[static_cast(i)] == i / 32 * 32 + (32 - 1 - i % 32)); } }); }); diff --git a/test/system/distr_tests.cc b/test/system/distr_tests.cc index e8d10738b..8c274a6c2 100644 --- a/test/system/distr_tests.cc +++ b/test/system/distr_tests.cc @@ -201,13 +201,12 @@ namespace detail { const auto global_range = test_utils::truncate_range({n * 4 * 3, 3 * 5, 2 * 11}); const auto local_range = test_utils::truncate_range({3, 5, 11}); const auto group_range = global_range / local_range; - const auto global_offset = test_utils::truncate_id({47, 53, 59}); buffer geo(global_range); q.submit([&](handler& cgh) { accessor g{geo, cgh, celerity::access::one_to_one{}, write_only, no_init}; - cgh.parallel_for>(celerity::nd_range{global_range, local_range}, /* global_offset,*/ [=](nd_item item) { + cgh.parallel_for>(celerity::nd_range{global_range, local_range}, [=](nd_item item) { auto group = item.get_group(); g[item.get_global_id()] = geometry{// {item.get_group_linear_id(), range_cast<3>(item.get_group_range()), id_cast<3>(item.get_local_id()), item.get_local_linear_id(), diff --git a/test/task_graph_tests.cc b/test/task_graph_tests.cc index b561dcc42..b3ae8e243 100644 --- a/test/task_graph_tests.cc +++ b/test/task_graph_tests.cc @@ -369,15 +369,15 @@ namespace detail { const auto max_para = 3; tt.tm.set_horizon_max_parallelism(max_para); - const auto buff_size = 128; - const auto num_tasks = 9; - const auto buff_elem_per_task = buff_size / num_tasks; + const size_t buff_size = 128; + const size_t num_tasks = 9; + const size_t buff_elem_per_task = buff_size / num_tasks; auto buf_a = tt.mbf.create_buffer(range<1>(buff_size), true /* mark_as_host_initialized */); auto current_horizon = task_manager_testspy::get_current_horizon(tt.tm); CHECK_FALSE(current_horizon.has_value()); - for(int i = 0; i < num_tasks; ++i) { + for(size_t i = 0; i < num_tasks; ++i) { const auto offset = buff_elem_per_task * i; test_utils::add_host_task(tt.tm, on_master_node, [&](handler& cgh) { buf_a.get_access(cgh, fixed<1>({offset, buff_elem_per_task})); @@ -411,7 +411,7 @@ namespace detail { task_id tid_2 = test_utils::add_host_task(tt.tm, on_master_node, [&](handler& cgh) { buf_a.get_access(cgh, fixed<1>({64, 64})); }); - task_id tid_3 = test_utils::add_host_task(tt.tm, on_master_node, [&](handler& cgh) { + [[maybe_unused]] task_id tid_3 = test_utils::add_host_task(tt.tm, on_master_node, [&](handler& cgh) { buf_a.get_access(cgh, fixed<1>({32, 64})); }); task_id tid_4 = test_utils::add_host_task(tt.tm, on_master_node, [&](handler& cgh) { @@ -422,10 +422,10 @@ namespace detail { CHECK(task_manager_testspy::get_num_horizons(tt.tm) == 1); CHECK(horizon.has_value()); - task_id tid_6 = test_utils::add_host_task(tt.tm, on_master_node, [&](handler& cgh) { + [[maybe_unused]] task_id tid_6 = test_utils::add_host_task(tt.tm, on_master_node, [&](handler& cgh) { buf_b.get_access(cgh, fixed<1>({0, 128})); }); - task_id tid_7 = test_utils::add_host_task(tt.tm, on_master_node, [&](handler& cgh) { + [[maybe_unused]] task_id tid_7 = test_utils::add_host_task(tt.tm, on_master_node, [&](handler& cgh) { buf_b.get_access(cgh, fixed<1>({0, 128})); }); @@ -437,7 +437,7 @@ namespace detail { CHECK(region_map_a.get_region_values(make_region(32, 96)).front().second.value() == tid_4); } - task_id tid_8 = test_utils::add_host_task(tt.tm, on_master_node, [&](handler& cgh) { + [[maybe_unused]] task_id tid_8 = test_utils::add_host_task(tt.tm, on_master_node, [&](handler& cgh) { buf_b.get_access(cgh, fixed<1>({0, 128})); }); @@ -482,8 +482,7 @@ namespace detail { // We need 7 tasks to generate a pseudo-critical path length of 6 (3x2 horizon step size), // and another one that triggers the actual deferred deletion. for(int i = 0; i < 8; ++i) { - const auto tid = - test_utils::add_host_task(tt.tm, on_master_node, [&](handler& cgh) { buf.get_access(cgh, all{}); }); + test_utils::add_host_task(tt.tm, on_master_node, [&](handler& cgh) { buf.get_access(cgh, all{}); }); const auto current_horizon = task_manager_testspy::get_current_horizon(tt.tm); if(current_horizon && *current_horizon > last_executed_horizon) { last_executed_horizon = *current_horizon; @@ -516,7 +515,7 @@ namespace detail { tt.tm.set_horizon_step(2); auto buf = tt.mbf.create_buffer(range<1>(1)); - const auto first_collective = test_utils::add_host_task(tt.tm, experimental::collective, [&](handler& cgh) {}); + [[maybe_unused]] const auto first_collective = test_utils::add_host_task(tt.tm, experimental::collective, [&](handler& cgh) {}); // generate exactly two horizons for(int i = 0; i < 4; ++i) { @@ -605,7 +604,7 @@ namespace detail { tt.tm.set_horizon_step(2); auto ho = tt.mhof.create_host_object(); - const auto first_task = + [[maybe_unused]] const auto first_task = test_utils::add_host_task(tt.tm, on_master_node, [&](handler& cgh) { ho.add_side_effect(cgh, experimental::side_effect_order::sequential); }); // generate exactly two horizons