Skip to content

Commit

Permalink
Merge pull request #38 from axsaucedo/migrating_to_gtest
Browse files Browse the repository at this point in the history
Migrating to gtest
  • Loading branch information
axsaucedo authored Sep 5, 2020
2 parents e666c3f + c2d1465 commit 6e3c6a1
Show file tree
Hide file tree
Showing 17 changed files with 161 additions and 152 deletions.
2 changes: 0 additions & 2 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,6 @@ if(KOMPUTE_OPT_BUILD_DOCS)
endif()

if(KOMPUTE_OPT_BUILD_TESTS)
include(CTest)
enable_testing()
add_subdirectory(test)
endif()

38 changes: 21 additions & 17 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -64,14 +64,12 @@ int main() {
auto tensorRhs = std::make_shared<kp::Tensor>(kp::Tensor({ 2., 4., 6. }));
auto tensorOut = std::make_shared<kp::Tensor>(kp::Tensor({ 0., 0., 0. }));

// Create tensor data in GPU
// Create tensors data in GPU
mgr.evalOpDefault<kp::OpCreateTensor>({ tensorLhs, tensorRhs, tensorOut });

// Run Kompute operation on the parameters provided with dispatch layout
mgr.evalOpDefault<kp::OpMult<3, 1, 1>>(
{ tensorLhs, tensorRhs, tensorOut },
true, // Whether to retrieve the output from GPU memory
std::vector<char>(shader.begin(), shader.end()));
{ tensorLhs, tensorRhs, tensorOut });

// Prints the output which is { 0, 4, 12 }
std::cout << fmt::format("Output: {}", tensorOutput.data()) << std::endl;
Expand All @@ -88,18 +86,22 @@ int main() {
auto tensorA = std::make_shared<kp::Tensor>(kp::Tensor({ 0, 1, 2 }));
auto tensorRhs = std::make_shared<kp::Tensor>(kp::Tensor({ 2, 4, 6 }));

// Define your shader as a string, or directly pass the compiled bytes
std::string shader(
"#version 450\n"
"layout (local_size_x = 1) in;\n"
"layout(set = 0, binding = 0) buffer bufa { uint a[]; };\n"
"layout(set = 0, binding = 1) buffer bufb { uint b[]; };\n"
"void main() {\n"
" uint index = gl_GlobalInvocationID.x;\n"
" b[index] = a[index];\n"
" a[index] = index;\n"
"}\n"
);
// Define your shader as a string (using string literals for simplicity)
// Or pass the raw bytes of the compiled shader as uint32_t
std::string shader(R"(
#version 450

layout (local_size_x = 1) in;

layout(set = 0, binding = 0) buffer a { float pa[]; };
layout(set = 0, binding = 1) buffer b { float pb[]; };

void main() {
uint index = gl_GlobalInvocationID.x;
pb[index] = pa[index];
pa[index] = index;
}
)");

// Create tensor data in GPU
mgr.evalOpDefault<kp::OpCreateTensor>({ tensorA, tensorB });
Expand Down Expand Up @@ -208,6 +210,8 @@ SPDLOG is the preferred logging library, however by default Vulkan Kompute runs

You can choose to build with or without SPDLOG by using the cmake flag `KOMPUTE_OPT_ENABLE_SPDLOG`.

Finally, remember that you will still need to set both the compile time log level with `SPDLOG_ACTIVE_LEVEL`, and the runtime log level with `spdlog::set_level(spdlog::level::debug);`.


## Motivations

Expand Down Expand Up @@ -267,7 +271,7 @@ We appreciate PRs and Issues. If you want to contribute try checking the "Good f
#### Dev Dependencies

* Testing
+ Catch2
+ GTest
* Documentation
+ Doxygen (with Dot)
+ Sphynx
Expand Down
8 changes: 5 additions & 3 deletions single_include/kompute/Kompute.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,13 @@
#include <vulkan/vulkan.hpp>

// SPDLOG_ACTIVE_LEVEL must be defined before spdlog.h import
#if !defined(SPDLOG_ACTIVE_LEVEL)
#if DEBUG
#ifndef SPDLOG_ACTIVE_LEVEL
#define SPDLOG_ACTIVE_LEVEL SPDLOG_LEVEL_DEBUG
#endif
#else
#define SPDLOG_ACTIVE_LEVEL SPDLOG_LEVEL_INFO
#endif
#endif

#ifndef KOMPUTE_LOG_OVERRIDE
#if KOMPUTE_ENABLE_SPDLOG
Expand Down Expand Up @@ -1135,7 +1137,7 @@ OpAlgoBase<tX, tY, tZ>::record()

if (this->mCopyOutputData) {
// Barrier to ensure the shader code is executed before buffer read
for (std::shared_ptr<Tensor> tensor : this->mTensors) {
for (const std::shared_ptr<Tensor>& tensor : this->mTensors) {
tensor->recordBufferMemoryBarrier(
vk::AccessFlagBits::eShaderWrite,
vk::AccessFlagBits::eTransferRead,
Expand Down
9 changes: 7 additions & 2 deletions src/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,10 @@

find_package(spdlog REQUIRED)

if(KOMPUTE_OPT_ENABLE_SPDLOG)
find_package(spdlog REQUIRED)
find_package(fmt REQUIRED)
endif()

find_package(Vulkan REQUIRED)

# In production builds all shaders are compiled into cpp files
Expand Down Expand Up @@ -30,6 +35,7 @@ target_link_libraries(
if(KOMPUTE_OPT_ENABLE_SPDLOG)
target_link_libraries(
kompute
fmt::fmt
spdlog::spdlog
)
endif()
Expand All @@ -38,7 +44,6 @@ add_dependencies(kompute
build_shaders
build_single_header)


add_library(kompute::kompute ALIAS kompute)


Expand Down
6 changes: 4 additions & 2 deletions src/Tensor.cpp
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@

#if DEBUG
#if KOMPUTE_SPDLOG_ENABLED
#if KOMPUTE_ENABLE_SPDLOG
// Only enabled if spdlog is enabled
#include <spdlog/fmt/ranges.h>
#include <fmt/ranges.h>
#endif
#endif

Expand All @@ -18,8 +18,10 @@ Tensor::Tensor()

Tensor::Tensor(std::vector<float> data, TensorTypes tensorType)
{
#if DEBUG
SPDLOG_DEBUG(
"Kompute Tensor constructor data: {}, and type: {}", data, tensorType);
#endif

this->mData = data;
this->mShape = { static_cast<uint32_t>(data.size()) };
Expand Down
6 changes: 4 additions & 2 deletions src/include/kompute/Core.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,13 @@
#include <vulkan/vulkan.hpp>

// SPDLOG_ACTIVE_LEVEL must be defined before spdlog.h import
#if !defined(SPDLOG_ACTIVE_LEVEL)
#if DEBUG
#ifndef SPDLOG_ACTIVE_LEVEL
#define SPDLOG_ACTIVE_LEVEL SPDLOG_LEVEL_DEBUG
#endif
#else
#define SPDLOG_ACTIVE_LEVEL SPDLOG_LEVEL_INFO
#endif
#endif

#ifndef KOMPUTE_LOG_OVERRIDE
#if KOMPUTE_ENABLE_SPDLOG
Expand Down
2 changes: 1 addition & 1 deletion src/include/kompute/operations/OpAlgoBase.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -293,7 +293,7 @@ OpAlgoBase<tX, tY, tZ>::record()

if (this->mCopyOutputData) {
// Barrier to ensure the shader code is executed before buffer read
for (std::shared_ptr<Tensor> tensor : this->mTensors) {
for (const std::shared_ptr<Tensor>& tensor : this->mTensors) {
tensor->recordBufferMemoryBarrier(
vk::AccessFlagBits::eShaderWrite,
vk::AccessFlagBits::eTransferRead,
Expand Down
8 changes: 6 additions & 2 deletions test/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@

find_package(Catch2 REQUIRED)
enable_testing()
find_package(GTest CONFIG REQUIRED)

file(GLOB test_kompute_CPP
"${CMAKE_CURRENT_SOURCE_DIR}/*.cpp"
Expand All @@ -13,7 +14,10 @@ target_include_directories(
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/compiled_shaders_include>
)

target_link_libraries(test_kompute PRIVATE Catch2::Catch2)
target_link_libraries(test_kompute PRIVATE
GTest::gtest
GTest::gmock)

target_link_libraries(test_kompute PRIVATE kompute)

add_test(NAME test_kompute COMMAND test_kompute)
Expand Down
11 changes: 6 additions & 5 deletions test/TestLogisticRegression.cpp
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@

#include "catch2/catch.hpp"
#include "gtest/gtest.h"

#include "kompute/Kompute.hpp"

TEST_CASE("test_logistic_regression") {
TEST(LogisticRegressionAlgorithm, TestMainLogisticRegression) {

uint32_t ITERATIONS = 100;

Expand Down Expand Up @@ -71,9 +71,10 @@ TEST_CASE("test_logistic_regression") {
// * wi < 0.01
// * wj > 1.0
// * b < 0
REQUIRE(wIn->data()[0] < 0.01);
REQUIRE(wIn->data()[1] > 1.0);
REQUIRE(bIn->data()[0] < 0.0);
// TODO: Add EXPECT_DOUBLE_EQ instead
EXPECT_LT(wIn->data()[0], 0.01);
EXPECT_GT(wIn->data()[1], 1.0);
EXPECT_LT(bIn->data()[0], 0.0);

//SPDLOG_DEBUG("Result wIn: {}, bIn: {}",
// wIn->data(), bIn->data());
Expand Down
27 changes: 9 additions & 18 deletions test/TestMain.cpp
Original file line number Diff line number Diff line change
@@ -1,24 +1,15 @@
#define CATCH_CONFIG_RUNNER

// clang-format: SPDLOG_ACTIVE_LEVEL must be defined before spdlog.h import
#if DEBUG
#ifndef SPDLOG_ACTIVE_LEVEL
#define SPDLOG_ACTIVE_LEVEL SPDLOG_LEVEL_DEBUG
#endif
#endif

//#include <spdlog/spdlog.h>
//// clang-format: ranges.h must come after spdlog.h
//#include <fmt/ranges.h>
#include <gmock/gmock.h>

#include "catch2/catch.hpp"
#include <kompute/Kompute.hpp>

int main( int argc, char* argv[] ) {
int main(int argc, char *argv[]) {
testing::InitGoogleTest(&argc, argv);
testing::InitGoogleMock(&argc, argv);

int result = Catch::Session().run( argc, argv );

// global clean-up...
#if KOMPUTE_ENABLE_SPDLOG
spdlog::set_level(static_cast<spdlog::level::level_enum>(SPDLOG_ACTIVE_LEVEL));
#endif

return result;
return RUN_ALL_TESTS();
}

30 changes: 15 additions & 15 deletions test/TestManager.cpp
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@

#include "catch2/catch.hpp"
#include "gtest/gtest.h"

#include "kompute/Kompute.hpp"

TEST_CASE("End to end OpMult Flow should execute correctly from manager")
TEST(TestManager, EndToEndOpMultFlow)
{
kp::Manager mgr;

Expand All @@ -20,10 +20,10 @@ TEST_CASE("End to end OpMult Flow should execute correctly from manager")

mgr.evalOp<kp::OpMult<>>({ tensorLHS, tensorRHS, tensorOutput });

REQUIRE(tensorOutput->data() == std::vector<float>{0, 4, 12});
EXPECT_EQ(tensorOutput->data(), std::vector<float>({0, 4, 12}));
}

TEST_CASE("End to end OpMult Flow should execute correctly from sequence") {
TEST(TestManager, OpMultSequenceFlow) {

std::shared_ptr<kp::Tensor> tensorLHS{ new kp::Tensor(
{ 0, 1, 2 }) };
Expand Down Expand Up @@ -51,10 +51,10 @@ TEST_CASE("End to end OpMult Flow should execute correctly from sequence") {
}
sqWeakPtr.reset();

REQUIRE(tensorOutput->data() == std::vector<float>{0, 4, 12});
EXPECT_EQ(tensorOutput->data(), std::vector<float>({0, 4, 12}));
}

TEST_CASE("Test manager get create functionality for sequences") {
TEST(TestManager, TestMultipleSequences) {
kp::Manager mgr;

std::weak_ptr<kp::Sequence> sqWeakPtrOne =
Expand All @@ -69,13 +69,13 @@ TEST_CASE("Test manager get create functionality for sequences") {
std::weak_ptr<kp::Sequence> sqWeakPtrTwoRef =
mgr.getOrCreateManagedSequence("sqTwo");

REQUIRE(sqWeakPtrOne.lock() == sqWeakPtrOneRef.lock());
REQUIRE(sqWeakPtrTwo.lock() != sqWeakPtrOneRef.lock());
REQUIRE(sqWeakPtrTwo.lock() == sqWeakPtrTwoRef.lock());
REQUIRE(sqWeakPtrOneRef.lock() != sqWeakPtrTwoRef.lock());
EXPECT_EQ(sqWeakPtrOne.lock(), sqWeakPtrOneRef.lock());
EXPECT_NE(sqWeakPtrTwo.lock(), sqWeakPtrOneRef.lock());
EXPECT_EQ(sqWeakPtrTwo.lock(), sqWeakPtrTwoRef.lock());
EXPECT_NE(sqWeakPtrOneRef.lock(), sqWeakPtrTwoRef.lock());
}

TEST_CASE("End to end OpMult Flow with OpCreateTensor called with multiple tensors") {
TEST(TestManager, TestMultipleTensorsAtOnce) {

std::shared_ptr<kp::Tensor> tensorLHS{ new kp::Tensor(
{ 0, 1, 2 }) };
Expand All @@ -94,9 +94,9 @@ TEST_CASE("End to end OpMult Flow with OpCreateTensor called with multiple tenso

sq->record<kp::OpCreateTensor>({ tensorLHS, tensorRHS, tensorOutput });

REQUIRE(tensorLHS->isInit());
REQUIRE(tensorRHS->isInit());
REQUIRE(tensorOutput->isInit());
EXPECT_TRUE(tensorLHS->isInit());
EXPECT_TRUE(tensorRHS->isInit());
EXPECT_TRUE(tensorOutput->isInit());

sq->record<kp::OpMult<>>({ tensorLHS, tensorRHS, tensorOutput });

Expand All @@ -105,5 +105,5 @@ TEST_CASE("End to end OpMult Flow with OpCreateTensor called with multiple tenso
}
sqWeakPtr.reset();

REQUIRE(tensorOutput->data() == std::vector<float>{0, 4, 12});
EXPECT_EQ(tensorOutput->data(), std::vector<float>({0, 4, 12}));
}
18 changes: 9 additions & 9 deletions test/TestMultipleAlgoExecutions.cpp
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@

#include "catch2/catch.hpp"
#include "gtest/gtest.h"

#include "kompute/Kompute.hpp"

TEST_CASE("test_multiple_algo_exec_single_cmd_buf_record") {
TEST(TestMultipleAlgoExecutions, SingleSequenceRecord) {

kp::Manager mgr;

Expand Down Expand Up @@ -43,10 +43,10 @@ TEST_CASE("test_multiple_algo_exec_single_cmd_buf_record") {
}
sqWeakPtr.reset();

REQUIRE(tensorA->data() == std::vector<float>{3, 3, 3});
EXPECT_EQ(tensorA->data(), std::vector<float>({3, 3, 3}));
}

TEST_CASE("test_multiple_algo_exec_multiple_record") {
TEST(TestMultipleAlgoExecutions, MultipleCmdBufRecords) {

kp::Manager mgr;

Expand Down Expand Up @@ -98,11 +98,11 @@ TEST_CASE("test_multiple_algo_exec_multiple_record") {
}
sqWeakPtr.reset();

REQUIRE(tensorA->data() == std::vector<float>{3, 3, 3});
EXPECT_EQ(tensorA->data(), std::vector<float>({3, 3, 3}));

}

TEST_CASE("test_multiple_algo_exec_multiple_sequence") {
TEST(TestMultipleAlgoExecutions, MultipleSequences) {

kp::Manager mgr;

Expand Down Expand Up @@ -160,10 +160,10 @@ TEST_CASE("test_multiple_algo_exec_multiple_sequence") {
sq->eval();
}

REQUIRE(tensorA->data() == std::vector<float>{3, 3, 3});
EXPECT_EQ(tensorA->data(), std::vector<float>({3, 3, 3}));
}

TEST_CASE("test_multiple_algo_exec_single_sequence_single_record") {
TEST(TestMultipleAlgoExecutions, SingleRecordMultipleEval) {

kp::Manager mgr;

Expand Down Expand Up @@ -205,6 +205,6 @@ TEST_CASE("test_multiple_algo_exec_single_sequence_single_record") {
sq->eval();
}

REQUIRE(tensorA->data() == std::vector<float>{3, 3, 3});
EXPECT_EQ(tensorA->data(), std::vector<float>({3, 3, 3}));
}

Loading

0 comments on commit 6e3c6a1

Please sign in to comment.