Skip to content

Commit

Permalink
Add dump tests for computed mappings
Browse files Browse the repository at this point in the history
  • Loading branch information
bernhardmgruber committed Sep 5, 2022
1 parent c839fb6 commit 5fd2056
Show file tree
Hide file tree
Showing 5 changed files with 177 additions and 157 deletions.
25 changes: 19 additions & 6 deletions include/llama/DumpMapping.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -236,11 +236,11 @@ namespace llama

std::string svg;

const auto hasAnyComputedField = internal::hasAnyComputedField<Mapping>();
constexpr auto hasAnyComputedField = internal::hasAnyComputedField<Mapping>();
std::array<int, Mapping::blobCount + hasAnyComputedField + 1> blobYOffset{};
for(std::size_t i = 0; i < Mapping::blobCount + hasAnyComputedField; i++)
auto writeBlobHeader = [&](std::size_t i, std::size_t size, std::string_view name)
{
const auto blobRows = (mapping.blobSize(i) + wrapByteCount - 1) / wrapByteCount;
const auto blobRows = (size + wrapByteCount - 1) / wrapByteCount;
blobYOffset[i + 1] = blobYOffset[i] + (blobRows + 1) * byteSizeInPixel; // one row gap between blobs
const auto height = blobRows * byteSizeInPixel;
svg += fmt::format(
Expand All @@ -252,8 +252,10 @@ namespace llama
height,
blobBlockWidth / 2,
blobYOffset[i] + height / 2,
i < Mapping::blobCount ? "Blob: " + std::to_string(i) : "Comp.");
}
name);
};
for(std::size_t i = 0; i < Mapping::blobCount; i++)
writeBlobHeader(i, mapping.blobSize(i), "Blob: " + std::to_string(i));

svg = fmt::format(
R"(<?xml version="1.0" encoding="UTF-8" standalone="no"?>
Expand All @@ -263,7 +265,7 @@ namespace llama
</style>
)",
blobBlockWidth + wrapByteCount * byteSizeInPixel,
blobYOffset.back() - byteSizeInPixel,
blobYOffset.back() == 0 ? 987654321 : blobYOffset.back() - byteSizeInPixel,
byteSizeInPixel / 2)
+ svg;

Expand Down Expand Up @@ -349,6 +351,17 @@ namespace llama
svg += R"(</svg>
)";
}

if(hasAnyComputedField)
{
writeBlobHeader(Mapping::blobCount, computedSizeSoFar, "Comp.");

// fix total SVG size
const auto i = svg.find("987654321");
assert(i != std::string::npos);
svg.replace(i, 9, std::to_string(blobYOffset.back() - byteSizeInPixel));
}

svg += "</svg>";
return svg;
}
Expand Down
142 changes: 142 additions & 0 deletions tests/common.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -163,3 +163,145 @@ void iotaCheckView(View& view)
++value;
});
}

// maps each element of the record dimension into a separate blobs. Each blob stores Modulus elements. If the array
// dimensions are larger than Modulus, elements are overwritten.
template<typename TArrayExtents, typename TRecordDim, std::size_t Modulus>
struct ModulusMapping : TArrayExtents
{
using ArrayExtents = TArrayExtents;
using ArrayIndex = typename ArrayExtents::Index;
using RecordDim = TRecordDim;
static constexpr std::size_t blobCount = boost::mp11::mp_size<llama::FlatRecordDim<RecordDim>>::value;

LLAMA_FN_HOST_ACC_INLINE
constexpr explicit ModulusMapping(ArrayExtents extents, RecordDim = {}) : ArrayExtents(extents)
{
}

LLAMA_FN_HOST_ACC_INLINE constexpr auto extents() const -> const ArrayExtents&
{
return *this;
}

constexpr auto blobSize(std::size_t) const -> std::size_t
{
return Modulus * llama::sizeOf<RecordDim>;
}

template<std::size_t... RecordCoords>
constexpr auto blobNrAndOffset(ArrayIndex ai, llama::RecordCoord<RecordCoords...> = {}) const
-> llama::NrAndOffset<std::size_t>
{
const auto blob = llama::flatRecordCoord<RecordDim, llama::RecordCoord<RecordCoords...>>;
const auto offset = (llama::mapping::LinearizeArrayDimsCpp{}(ai, extents()) % Modulus)
* sizeof(llama::GetType<RecordDim, llama::RecordCoord<RecordCoords...>>);
return {blob, offset};
}
};

// Maps everything to blob 0, offset 0
template<typename TArrayExtents, typename TRecordDim>
struct MapEverythingToZero : TArrayExtents
{
using ArrayExtents = TArrayExtents;
using ArrayIndex = typename ArrayExtents::Index;
using RecordDim = TRecordDim;
static constexpr std::size_t blobCount = 1;

LLAMA_FN_HOST_ACC_INLINE
constexpr explicit MapEverythingToZero(ArrayExtents extents, RecordDim = {}) : ArrayExtents(extents)
{
}

LLAMA_FN_HOST_ACC_INLINE constexpr auto extents() const -> const ArrayExtents&
{
return *this;
}

constexpr auto blobSize(std::size_t) const -> std::size_t
{
return llama::product(extents()) * llama::sizeOf<RecordDim>;
}

template<std::size_t... RecordCoords>
constexpr auto blobNrAndOffset(ArrayIndex, llama::RecordCoord<RecordCoords...> = {}) const
-> llama::NrAndOffset<std::size_t>
{
return {0, 0};
}
};

using Triangle = llama::Record<
llama::Field<tag::A, Vec3D>,
llama::Field<tag::B, Vec3D>,
llama::Field<tag::C, Vec3D>,
llama::Field<tag::Normal, Vec3D>>;

template<typename ArrayExtents, typename RecordDim>
struct TriangleAoSWithComputedNormal : llama::mapping::PackedAoS<ArrayExtents, RecordDim>
{
using Base = llama::mapping::PackedAoS<ArrayExtents, RecordDim>;
using typename Base::ArrayIndex;

using Base::Base;

template<std::size_t... RecordCoords>
static constexpr auto isComputed(llama::RecordCoord<RecordCoords...>)
{
return llama::recordCoordCommonPrefixIsSame<llama::RecordCoord<RecordCoords...>, llama::RecordCoord<3>>;
}

template<std::size_t... RecordCoords, typename Blob>
constexpr auto compute(
ArrayIndex ai,
llama::RecordCoord<RecordCoords...>,
llama::Array<Blob, Base::blobCount>& storageBlobs) const
{
auto fetch = [&](llama::NrAndOffset<std::size_t> nrAndOffset) -> double
{ return *reinterpret_cast<double*>(&storageBlobs[nrAndOffset.nr][nrAndOffset.offset]); };

const auto ax = fetch(Base::template blobNrAndOffset<0, 0>(ai));
const auto ay = fetch(Base::template blobNrAndOffset<0, 1>(ai));
const auto az = fetch(Base::template blobNrAndOffset<0, 2>(ai));
const auto bx = fetch(Base::template blobNrAndOffset<1, 0>(ai));
const auto by = fetch(Base::template blobNrAndOffset<1, 1>(ai));
const auto bz = fetch(Base::template blobNrAndOffset<1, 2>(ai));
const auto cx = fetch(Base::template blobNrAndOffset<2, 0>(ai));
const auto cy = fetch(Base::template blobNrAndOffset<2, 1>(ai));
const auto cz = fetch(Base::template blobNrAndOffset<2, 2>(ai));

const auto e1x = bx - ax;
const auto e1y = by - ay;
const auto e1z = bz - az;
const auto e2x = cx - ax;
const auto e2y = cy - ay;
const auto e2z = cz - az;

const auto crossx = e1y * e2z - e1z * e2y;
const auto crossy = -(e1x * e2z - e1z * e2x);
const auto crossz = e1x * e2y - e1y * e2x;

const auto length = std::sqrt(crossx * crossx + crossy * crossy + crossz * crossz);

[[maybe_unused]] const auto normalx = crossx / length;
[[maybe_unused]] const auto normaly = crossy / length;
[[maybe_unused]] const auto normalz = crossz / length;

using DC = llama::RecordCoord<RecordCoords...>;
if constexpr(std::is_same_v<DC, llama::RecordCoord<3, 0>>)
return normalx;
if constexpr(std::is_same_v<DC, llama::RecordCoord<3, 1>>)
return normaly;
if constexpr(std::is_same_v<DC, llama::RecordCoord<3, 2>>)
return normalz;
// if constexpr (std::is_same_v<DC, llama::RecordCoord<3>>)
//{
// llama::One<llama::GetType<RecordDim, llama::RecordCoord<3>>> normal;
// normal(llama::RecordCoord<0>{}) = normalx;
// normal(llama::RecordCoord<1>{}) = normaly;
// normal(llama::RecordCoord<2>{}) = normalz;
// return normal;
//}
}
};
79 changes: 1 addition & 78 deletions tests/computedprop.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,87 +3,10 @@
#include <catch2/catch_approx.hpp>
#include <numeric>

namespace
{
using Triangle = llama::Record<
llama::Field<tag::A, Vec3D>,
llama::Field<tag::B, Vec3D>,
llama::Field<tag::C, Vec3D>,
llama::Field<tag::Normal, Vec3D>>;

template<typename ArrayExtents, typename RecordDim>
struct AoSWithComputedNormal : llama::mapping::PackedAoS<ArrayExtents, RecordDim>
{
using Base = llama::mapping::PackedAoS<ArrayExtents, RecordDim>;
using typename Base::ArrayIndex;

using Base::Base;

template<std::size_t... RecordCoords>
static constexpr auto isComputed(llama::RecordCoord<RecordCoords...>)
{
return llama::recordCoordCommonPrefixIsSame<llama::RecordCoord<RecordCoords...>, llama::RecordCoord<3>>;
}

template<std::size_t... RecordCoords, typename Blob>
constexpr auto compute(
ArrayIndex ai,
llama::RecordCoord<RecordCoords...>,
llama::Array<Blob, Base::blobCount>& storageBlobs) const
{
auto fetch = [&](llama::NrAndOffset<std::size_t> nrAndOffset) -> double
{ return *reinterpret_cast<double*>(&storageBlobs[nrAndOffset.nr][nrAndOffset.offset]); };

const auto ax = fetch(Base::template blobNrAndOffset<0, 0>(ai));
const auto ay = fetch(Base::template blobNrAndOffset<0, 1>(ai));
const auto az = fetch(Base::template blobNrAndOffset<0, 2>(ai));
const auto bx = fetch(Base::template blobNrAndOffset<1, 0>(ai));
const auto by = fetch(Base::template blobNrAndOffset<1, 1>(ai));
const auto bz = fetch(Base::template blobNrAndOffset<1, 2>(ai));
const auto cx = fetch(Base::template blobNrAndOffset<2, 0>(ai));
const auto cy = fetch(Base::template blobNrAndOffset<2, 1>(ai));
const auto cz = fetch(Base::template blobNrAndOffset<2, 2>(ai));

const auto e1x = bx - ax;
const auto e1y = by - ay;
const auto e1z = bz - az;
const auto e2x = cx - ax;
const auto e2y = cy - ay;
const auto e2z = cz - az;

const auto crossx = e1y * e2z - e1z * e2y;
const auto crossy = -(e1x * e2z - e1z * e2x);
const auto crossz = e1x * e2y - e1y * e2x;

const auto length = std::sqrt(crossx * crossx + crossy * crossy + crossz * crossz);

[[maybe_unused]] const auto normalx = crossx / length;
[[maybe_unused]] const auto normaly = crossy / length;
[[maybe_unused]] const auto normalz = crossz / length;

using DC = llama::RecordCoord<RecordCoords...>;
if constexpr(std::is_same_v<DC, llama::RecordCoord<3, 0>>)
return normalx;
if constexpr(std::is_same_v<DC, llama::RecordCoord<3, 1>>)
return normaly;
if constexpr(std::is_same_v<DC, llama::RecordCoord<3, 2>>)
return normalz;
// if constexpr (std::is_same_v<DC, llama::RecordCoord<3>>)
//{
// llama::One<llama::GetType<RecordDim, llama::RecordCoord<3>>> normal;
// normal(llama::RecordCoord<0>{}) = normalx;
// normal(llama::RecordCoord<1>{}) = normaly;
// normal(llama::RecordCoord<2>{}) = normalz;
// return normal;
//}
}
};
} // namespace

TEST_CASE("computedprop")
{
auto extents = llama::ArrayExtentsDynamic<std::size_t, 1>{10};
auto mapping = AoSWithComputedNormal<decltype(extents), Triangle>{extents};
auto mapping = TriangleAoSWithComputedNormal<decltype(extents), Triangle>{extents};

STATIC_REQUIRE(mapping.blobCount == 1);
CHECK(mapping.blobSize(0) == sizeof(double) * 10 * 12);
Expand Down
15 changes: 15 additions & 0 deletions tests/dump.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -340,4 +340,19 @@ TEST_CASE("dump.ParticleAligned.PackedAoS")
{
dump(llama::mapping::PackedAoS<ArrayExtents, ParticleAligned>{extents});
}

TEST_CASE("dump.Particle.ModulusMapping.8")
{
dump(ModulusMapping<ArrayExtents, Particle, 8>{extents});
}

TEST_CASE("dump.Particle.MapEverythingToZero")
{
dump(MapEverythingToZero<ArrayExtents, Particle>{extents});
}

TEST_CASE("dump.Triangle.TriangleAoSWithComputedNormal")
{
dump(TriangleAoSWithComputedNormal<ArrayExtents, Triangle>{extents});
}
#endif
73 changes: 0 additions & 73 deletions tests/proofs.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,40 +25,6 @@ TEST_CASE("mapsNonOverlappingly.AlignedAoS")
}
#endif

namespace
{
template<typename TArrayExtents, typename TRecordDim>
struct MapEverythingToZero : TArrayExtents
{
using ArrayExtents = TArrayExtents;
using ArrayIndex = typename ArrayExtents::Index;
using RecordDim = TRecordDim;
static constexpr std::size_t blobCount = 1;

LLAMA_FN_HOST_ACC_INLINE
constexpr explicit MapEverythingToZero(ArrayExtents extents, RecordDim = {}) : ArrayExtents(extents)
{
}

LLAMA_FN_HOST_ACC_INLINE constexpr auto extents() const -> const ArrayExtents&
{
return *this;
}

constexpr auto blobSize(std::size_t) const -> std::size_t
{
return llama::product(extents()) * llama::sizeOf<RecordDim>;
}

template<std::size_t... RecordCoords>
constexpr auto blobNrAndOffset(ArrayIndex, llama::RecordCoord<RecordCoords...> = {}) const
-> llama::NrAndOffset<std::size_t>
{
return {0, 0};
}
};
} // namespace

TEST_CASE("mapsNonOverlappingly.MapEverythingToZero")
{
#ifdef __cpp_constexpr_dynamic_alloc
Expand All @@ -73,45 +39,6 @@ TEST_CASE("mapsNonOverlappingly.MapEverythingToZero")
#endif
}

namespace
{
// maps each element of the record dimension into a separate blobs. Each blob stores Modulus elements. If the array
// dimensions are larger than Modulus, elements are overwritten.
template<typename TArrayExtents, typename TRecordDim, std::size_t Modulus>
struct ModulusMapping : TArrayExtents
{
using ArrayExtents = TArrayExtents;
using ArrayIndex = typename ArrayExtents::Index;
using RecordDim = TRecordDim;
static constexpr std::size_t blobCount = boost::mp11::mp_size<llama::FlatRecordDim<RecordDim>>::value;

LLAMA_FN_HOST_ACC_INLINE
constexpr explicit ModulusMapping(ArrayExtents extents, RecordDim = {}) : ArrayExtents(extents)
{
}

LLAMA_FN_HOST_ACC_INLINE constexpr auto extents() const -> const ArrayExtents&
{
return *this;
}

constexpr auto blobSize(std::size_t) const -> std::size_t
{
return Modulus * llama::sizeOf<RecordDim>;
}

template<std::size_t... RecordCoords>
constexpr auto blobNrAndOffset(ArrayIndex ai, llama::RecordCoord<RecordCoords...> = {}) const
-> llama::NrAndOffset<std::size_t>
{
const auto blob = llama::flatRecordCoord<RecordDim, llama::RecordCoord<RecordCoords...>>;
const auto offset = (llama::mapping::LinearizeArrayDimsCpp{}(ai, extents()) % Modulus)
* sizeof(llama::GetType<RecordDim, llama::RecordCoord<RecordCoords...>>);
return {blob, offset};
}
};
} // namespace

TEST_CASE("mapsNonOverlappingly.ModulusMapping")
{
#ifdef __cpp_constexpr_dynamic_alloc
Expand Down

0 comments on commit 5fd2056

Please sign in to comment.