Skip to content

Commit

Permalink
Merge pull request #14 from sameeul/add_dynamic_pyramid
Browse files Browse the repository at this point in the history
Add dynamic pyramid generation support
  • Loading branch information
sameeul authored Jun 24, 2024
2 parents 330ee9f + aeb5e71 commit 1d044cc
Show file tree
Hide file tree
Showing 20 changed files with 716 additions and 321 deletions.
28 changes: 19 additions & 9 deletions .github/workflows/publish_pypi.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,14 +21,17 @@ jobs:
steps:
- uses: actions/checkout@v3
name: Check out
with:
submodules: recursive

- uses: ilammy/msvc-dev-cmd@v1
name: Add MSVS Path

- uses: ilammy/setup-nasm@v1
name: Add NASM
- name: Add NASM
if: matrix.os == 'windows-latest'
uses: ilammy/setup-nasm@v1

- name: Add Ninja
if: matrix.os == 'windows-latest'
uses: seanmiddleditch/gha-setup-ninja@master

- uses: actions/setup-python@v4
name: Install Python
Expand All @@ -51,8 +54,14 @@ jobs:
bash ci-utils/install_prereq_linux.sh &&
mkdir -p /tmp/argolid_bld &&
cp -r local_install /tmp/argolid_bld
CIBW_BEFORE_ALL_LINUX: curl -L http://mirror.centos.org/centos/8-stream/PowerTools/x86_64/os/Packages/nasm-2.15.03-3.el8.x86_64.rpm -o nasm-2.15.03-3.el8.x86_64.rpm &&
yum -y install nasm-2.15.03-3.el8.x86_64.rpm &&
CIBW_BEFORE_ALL_LINUX: yum -y install wget &&
wget https://www.nasm.us/pub/nasm/releasebuilds/2.15.05/nasm-2.15.05.tar.bz2 &&
tar -xjf nasm-2.15.05.tar.bz2 &&
cd nasm-2.15.05 &&
./configure &&
make &&
make install &&
cd .. &&
bash ci-utils/install_prereq_linux.sh &&
mkdir -p /tmp/argolid_bld &&
cp -r local_install /tmp/argolid_bld
Expand All @@ -61,10 +70,11 @@ jobs:
CIBW_ENVIRONMENT_MACOS: REPAIR_LIBRARY_PATH="/tmp/argolid_bld/local_install/lib:/tmp/argolid_bld/local_install/lib64" ON_GITHUB="TRUE" ARGOLID_DEP_DIR="/tmp/argolid_bld/local_install"
CIBW_ENVIRONMENT_LINUX: LD_LIBRARY_PATH="/tmp/argolid_bld/local_install/lib:/tmp/argolid_bld/local_install/lib64:$LD_LIBRARY_PATH" ON_GITHUB="TRUE" ARGOLID_DEP_DIR="/tmp/argolid_bld/local_install"
CIBW_REPAIR_WHEEL_COMMAND_MACOS: DYLD_LIBRARY_PATH=$REPAIR_LIBRARY_PATH delocate-listdeps {wheel} && DYLD_LIBRARY_PATH=$REPAIR_LIBRARY_PATH delocate-wheel --require-archs {delocate_archs} -w {dest_dir} {wheel}
CIBW_ENVIRONMENT_WINDOWS: PATH="$TEMP\\argolid\\bin;$PATH" ON_GITHUB="TRUE" ARGOLID_DEP_DIR="C:\\TEMP\\argolid_bld\\local_install"
CIBW_ENVIRONMENT_WINDOWS: PATH="$TEMP\\argolid\\bin;$PATH" ON_GITHUB="TRUE" ARGOLID_DEP_DIR="C:\\TEMP\\argolid_bld\\local_install" CMAKE_ARGS="-DCMAKE_GENERATOR=Ninja"
CIBW_REPAIR_WHEEL_COMMAND_WINDOWS: "delvewheel repair -w {dest_dir} {wheel}"
CIBW_ARCHS: ${{ matrix.cibw_archs }}
CIBW_TEST_REQUIRES: bfio==2.3.2 tensorstore
CIBW_BEFORE_TEST_LINUX: yum -y install maven java
CIBW_TEST_REQUIRES: bfio tensorstore numpy==1.24.0
CIBW_TEST_COMMAND: python -W default -m unittest discover -s {project}/tests -v

- name: Install Dependencies
Expand Down Expand Up @@ -118,7 +128,7 @@ jobs:
CIBW_ENVIRONMENT_MACOS: REPAIR_LIBRARY_PATH="/tmp/argolid_bld/local_install/lib:/tmp/argolid_bld/local_install/lib64" ON_GITHUB="TRUE" ARGOLID_DEP_DIR="/tmp/argolid_bld/local_install" CMAKE_ARGS="-DTENSORSTORE_USE_SYSTEM_JPEG=ON"
CIBW_REPAIR_WHEEL_COMMAND_MACOS: DYLD_LIBRARY_PATH=$REPAIR_LIBRARY_PATH delocate-listdeps {wheel} && DYLD_LIBRARY_PATH=$REPAIR_LIBRARY_PATH delocate-wheel --require-archs {delocate_archs} -w {dest_dir} {wheel}
CIBW_ARCHS: ${{ matrix.cibw_archs }}
CIBW_TEST_REQUIRES: bfio==2.3.2 tensorstore
CIBW_TEST_REQUIRES: bfio tensorstore numpy==1.24.0
CIBW_TEST_COMMAND: python -W default -m unittest discover -s {project}/tests -v

- name: Install Dependencies
Expand Down
28 changes: 19 additions & 9 deletions .github/workflows/wheel_build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,17 @@ jobs:
steps:
- uses: actions/checkout@v3
name: Check out
with:
submodules: recursive

- uses: ilammy/msvc-dev-cmd@v1
name: Add MSVS Path

- uses: ilammy/setup-nasm@v1
name: Add NASM
- name: Add NASM
if: matrix.os == 'windows-latest'
uses: ilammy/setup-nasm@v1

- name: Add Ninja
if: matrix.os == 'windows-latest'
uses: seanmiddleditch/gha-setup-ninja@master

- uses: actions/setup-python@v4
name: Install Python
Expand All @@ -49,8 +52,14 @@ jobs:
bash ci-utils/install_prereq_linux.sh &&
mkdir -p /tmp/argolid_bld &&
cp -r local_install /tmp/argolid_bld
CIBW_BEFORE_ALL_LINUX: curl -L http://mirror.centos.org/centos/8-stream/PowerTools/x86_64/os/Packages/nasm-2.15.03-3.el8.x86_64.rpm -o nasm-2.15.03-3.el8.x86_64.rpm &&
yum -y install nasm-2.15.03-3.el8.x86_64.rpm &&
CIBW_BEFORE_ALL_LINUX: yum -y install wget &&
wget https://www.nasm.us/pub/nasm/releasebuilds/2.15.05/nasm-2.15.05.tar.bz2 &&
tar -xjf nasm-2.15.05.tar.bz2 &&
cd nasm-2.15.05 &&
./configure &&
make &&
make install &&
cd .. &&
bash ci-utils/install_prereq_linux.sh &&
mkdir -p /tmp/argolid_bld &&
cp -r local_install /tmp/argolid_bld
Expand All @@ -59,10 +68,11 @@ jobs:
CIBW_ENVIRONMENT_MACOS: REPAIR_LIBRARY_PATH="/tmp/argolid_bld/local_install/lib:/tmp/argolid_bld/local_install/lib64" ON_GITHUB="TRUE" ARGOLID_DEP_DIR="/tmp/argolid_bld/local_install"
CIBW_ENVIRONMENT_LINUX: LD_LIBRARY_PATH="/tmp/argolid_bld/local_install/lib:/tmp/argolid_bld/local_install/lib64:$LD_LIBRARY_PATH" ON_GITHUB="TRUE" ARGOLID_DEP_DIR="/tmp/argolid_bld/local_install"
CIBW_REPAIR_WHEEL_COMMAND_MACOS: DYLD_LIBRARY_PATH=$REPAIR_LIBRARY_PATH delocate-listdeps {wheel} && DYLD_LIBRARY_PATH=$REPAIR_LIBRARY_PATH delocate-wheel --require-archs {delocate_archs} -w {dest_dir} {wheel}
CIBW_ENVIRONMENT_WINDOWS: PATH="$TEMP\\argolid\\bin;$PATH" ON_GITHUB="TRUE" ARGOLID_DEP_DIR="C:\\TEMP\\argolid_bld\\local_install"
CIBW_ENVIRONMENT_WINDOWS: PATH="$TEMP\\argolid\\bin;$PATH" ON_GITHUB="TRUE" ARGOLID_DEP_DIR="C:\\TEMP\\argolid_bld\\local_install" CMAKE_ARGS="-DCMAKE_GENERATOR=Ninja"
CIBW_REPAIR_WHEEL_COMMAND_WINDOWS: "delvewheel repair -w {dest_dir} {wheel}"
CIBW_ARCHS: ${{ matrix.cibw_archs }}
CIBW_TEST_REQUIRES: bfio==2.3.2 tensorstore
CIBW_BEFORE_TEST_LINUX: yum -y install maven java
CIBW_TEST_REQUIRES: bfio tensorstore numpy==1.24.0
CIBW_TEST_COMMAND: python -W default -m unittest discover -s {project}/tests -v

- name: Upload Artifact
Expand Down Expand Up @@ -113,7 +123,7 @@ jobs:
CIBW_ENVIRONMENT_MACOS: REPAIR_LIBRARY_PATH="/tmp/argolid_bld/local_install/lib:/tmp/argolid_bld/local_install/lib64" ON_GITHUB="TRUE" ARGOLID_DEP_DIR="/tmp/argolid_bld/local_install" CMAKE_ARGS="-DTENSORSTORE_USE_SYSTEM_JPEG=ON"
CIBW_REPAIR_WHEEL_COMMAND_MACOS: DYLD_LIBRARY_PATH=$REPAIR_LIBRARY_PATH delocate-listdeps {wheel} && DYLD_LIBRARY_PATH=$REPAIR_LIBRARY_PATH delocate-wheel --require-archs {delocate_archs} -w {dest_dir} {wheel}
CIBW_ARCHS: ${{ matrix.cibw_archs }}
CIBW_TEST_REQUIRES: bfio==2.3.2 tensorstore
CIBW_TEST_REQUIRES: bfio tensorstore numpy==1.24.0
CIBW_TEST_COMMAND: python -W default -m unittest discover -s {project}/tests -v

- name: Upload Artifact
Expand Down
23 changes: 6 additions & 17 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,11 +1,10 @@
cmake_minimum_required(VERSION 3.24)
include(GNUInstallDirs)

project(Argolid)
#==== Compiler Options
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON)

include(GNUInstallDirs)
# this is a workaround for GitHub Action for wheelbuiling
if(DEFINED ENV{ARGOLID_DEP_DIR})
set(CMAKE_PREFIX_PATH $ENV{ARGOLID_DEP_DIR})
Expand All @@ -27,8 +26,8 @@ set(SOURCE
src/chunked_base_to_pyr_gen.cpp
src/ome_tiff_to_chunked_pyramid.cpp
src/utilities.cpp
src/pyramid_view.cpp
)
add_executable(test_pyrgen ${SOURCE} tests/main.cpp)

include(FetchContent)

Expand All @@ -47,15 +46,8 @@ FetchContent_MakeAvailable(tensorstore)
include_directories(lib/pugixml)
include_directories(lib/plog/include)
include_directories(lib/bs_threadpool)
target_link_libraries(
test_pyrgen
PRIVATE
tensorstore::tensorstore tensorstore::all_drivers
)


find_package(filepattern REQUIRED)
target_link_libraries(test_pyrgen PRIVATE filepattern::filepattern)

find_package(Threads QUIET)
if (Threads_FOUND)
Expand All @@ -76,6 +68,9 @@ pybind11_add_module(libargolid
)

# VERSION_INFO is defined by setup.py and passed into the C++ code as a define (VERSION_INFO) here.
if (NOT DEFINED VERSION_INFO)
set(VERSION_INFO "000")
endif()
target_compile_definitions(libargolid PRIVATE VERSION_INFO=${VERSION_INFO})

#target_compile_definitions(libargolid PRIVATE WITH_PYTHON_H)
Expand All @@ -85,10 +80,4 @@ if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
endif()

target_link_libraries(libargolid PRIVATE tensorstore::tensorstore tensorstore::all_drivers filepattern::filepattern)
target_link_libraries(libargolid PRIVATE ${Build_LIBRARIES})

target_link_libraries(
test_pyrgen
PRIVATE
${Build_LIBRARIES}
)
target_link_libraries(libargolid PRIVATE ${Build_LIBRARIES})
5 changes: 1 addition & 4 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,17 +52,14 @@ def build_extension(self, ext):

cfg = "Debug" if self.debug else "Release"
build_args = ["--config", cfg]
cmake_args += ["-DCMAKE_BUILD_TYPE=" + cfg]

if platform.system() == "Windows":
print("-----------------Windows...")
cmake_args += [
"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}".format(cfg.upper(), extdir)
]
if sys.maxsize > 2 ** 32:
cmake_args += ["-A", "x64"]
build_args += ["--", "/m"]
else:
cmake_args += ["-DCMAKE_BUILD_TYPE=" + cfg]
build_args += ["--", "-j4"]

env = os.environ.copy()
Expand Down
28 changes: 4 additions & 24 deletions src/chunked_base_to_pyr_gen.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ void ChunkedBaseToPyramid::CreatePyramidImages( const std::string& input_chunked
int resolution = 1; // this gets doubled in each level up
auto input_spec = [v, &input_chunked_dir, &base_level_key](){
if (v == VisType::NG_Zarr | v == VisType::Viv){
return GetZarrSpecToRead(input_chunked_dir, std::to_string(base_level_key));
return GetZarrSpecToRead(input_chunked_dir+"/"+std::to_string(base_level_key));
} else if (v == VisType::PCNG){
return GetNPCSpecToRead(input_chunked_dir, std::to_string(base_level_key));
} else {// this will probably never happen
Expand Down Expand Up @@ -124,30 +124,10 @@ void ChunkedBaseToPyramid::WriteDownsampledImage( const std::string& input_fil
std::unordered_map<std::int64_t, DSType>& channel_ds_config,
BS::thread_pool& th_pool)
{
int num_dims, x_dim, y_dim, c_dim;

if (v == VisType::Viv){ //5D file
x_dim = 4;
y_dim = 3;
c_dim = 1;
num_dims = 5;

} else if (v == VisType::NG_Zarr){ // 3D file
x_dim = 3;
y_dim = 2;
c_dim = 0;
num_dims = 4;

} else if (v == VisType::PCNG ){ // 3D file
x_dim = 1;
y_dim = 0;
c_dim = 3;
num_dims = 3;
}

auto [x_dim, y_dim, c_dim, num_dims] = GetZarrParams(v);
auto input_spec = [v, &input_file, &input_scale_key](){
if (v == VisType::NG_Zarr | v == VisType::Viv){
return GetZarrSpecToRead(input_file, input_scale_key);
return GetZarrSpecToRead(input_file+"/"+input_scale_key);
} else if (v == VisType::PCNG){
return GetNPCSpecToRead(input_file, input_scale_key);
} else {// this will probably never happen
Expand Down Expand Up @@ -233,7 +213,7 @@ void ChunkedBaseToPyramid::WriteDownsampledImage( const std::string& input_fil
th_pool.push_task([ &store1, &store2,
prev_x_start, prev_x_end, prev_y_start, prev_y_end,
x_start, x_end, y_start, y_end,
x_dim, y_dim, c_dim, c, v, downsampling_func_ptr](){
x_dim=x_dim, y_dim=y_dim, c_dim=c_dim, c, v, downsampling_func_ptr](){
std::vector<T> read_buffer((prev_x_end-prev_x_start)*(prev_y_end-prev_y_start));
auto array = tensorstore::Array(read_buffer.data(), {prev_y_end-prev_y_start, prev_x_end-prev_x_start}, tensorstore::c_order);

Expand Down
68 changes: 2 additions & 66 deletions src/chunked_pyramid_assembler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -86,26 +86,7 @@ ImageInfo OmeTiffCollToChunked::Assemble(const std::string& input_dir,
}
PLOG_INFO << "Total images found: " << image_vec.size() <<std::endl;
auto t1 = std::chrono::high_resolution_clock::now();
int num_dims, x_dim, y_dim, c_dim;

if (v == VisType::Viv){ //5D file
x_dim = 4;
y_dim = 3;
c_dim = 1;
num_dims = 5;

} else if (v == VisType::NG_Zarr){ // 3D file
x_dim = 3;
y_dim = 2;
c_dim = 0;
num_dims = 4;

} else if (v == VisType::PCNG ){ // 3D file
x_dim = 0;
y_dim = 1;
c_dim = 3;
num_dims = 3;
}
auto [x_dim, y_dim, c_dim, num_dims] = GetZarrParams(v);

if (image_vec.size() != 0){
//std::list<tensorstore::WriteFutures> pending_writes;
Expand Down Expand Up @@ -150,7 +131,7 @@ ImageInfo OmeTiffCollToChunked::Assemble(const std::string& input_dir,

auto t4 = std::chrono::high_resolution_clock::now();
for(const auto& i: image_vec){
th_pool.push_task([&dest, i, x_dim, y_dim, c_dim, v, &whole_image](){
th_pool.push_task([&dest, i, x_dim=x_dim, y_dim=y_dim, c_dim=c_dim, v, &whole_image](){


TENSORSTORE_CHECK_OK_AND_ASSIGN(auto source, tensorstore::Open(
Expand Down Expand Up @@ -194,49 +175,4 @@ ImageInfo OmeTiffCollToChunked::Assemble(const std::string& input_dir,
}
return std::move(whole_image);
}

void OmeTiffCollToChunked::GenerateOmeXML(const std::string& image_name, const std::string& output_file, ImageInfo& whole_image){

pugi::xml_document doc;

// Create the root element <OME>
pugi::xml_node omeNode = doc.append_child("OME");

// Add the namespaces and attributes to the root element
omeNode.append_attribute("xmlns") = "http://www.openmicroscopy.org/Schemas/OME/2016-06";
omeNode.append_attribute("xmlns:xsi") = "http://www.w3.org/2001/XMLSchema-instance";
auto creator = std::string{"Argolid "} + std::string{VERSION_INFO};
omeNode.append_attribute("Creator") = creator.c_str();
omeNode.append_attribute("UUID") = "urn:uuid:ce3367ae-0512-4e87-a045-20d87db14001";
omeNode.append_attribute("xsi:schemaLocation") = "http://www.openmicroscopy.org/Schemas/OME/2016-06 http://www.openmicroscopy.org/Schemas/OME/2016-06/ome.xsd";

// Create the <Image> element
pugi::xml_node imageNode = omeNode.append_child("Image");
imageNode.append_attribute("ID") = "Image:0";
imageNode.append_attribute("Name") =image_name.c_str();

// Create the <Pixels> element
pugi::xml_node pixelsNode = imageNode.append_child("Pixels");
pixelsNode.append_attribute("BigEndian") = "false";
pixelsNode.append_attribute("DimensionOrder") = "XYZCT";
pixelsNode.append_attribute("ID") = "Pixels:0";
pixelsNode.append_attribute("Interleaved") = "false";
pixelsNode.append_attribute("SizeC") = std::to_string(whole_image._num_channels).c_str();;
pixelsNode.append_attribute("SizeT") = "1";
pixelsNode.append_attribute("SizeX") = std::to_string(whole_image._full_image_width).c_str();
pixelsNode.append_attribute("SizeY") = std::to_string(whole_image._full_image_height).c_str();
pixelsNode.append_attribute("SizeZ") = "1";
pixelsNode.append_attribute("Type") = whole_image._data_type.c_str();

// Create the <Channel> elements
for(std::int64_t i=0; i<whole_image._num_channels; ++i){
pugi::xml_node channelNode = pixelsNode.append_child("Channel");
channelNode.append_attribute("ID") = ("Channel:0:" + std::to_string(i)).c_str();
channelNode.append_attribute("SamplesPerPixel") = "1";
// Create the <LightPath> elements
channelNode.append_child("LightPath");
}

doc.save_file(output_file.c_str());
}
} // ns argolid
7 changes: 1 addition & 6 deletions src/chunked_pyramid_assembler.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,7 @@ struct ImageSegment{

};

struct ImageInfo
{
std::int64_t _full_image_height, _full_image_width, _chunk_size_x, _chunk_size_y, _num_channels;
std::string _data_type;
};


class OmeTiffCollToChunked{

Expand All @@ -35,6 +31,5 @@ class OmeTiffCollToChunked{
const std::string& scale_key,
VisType v,
BS::thread_pool& th_pool);
void GenerateOmeXML(const std::string& image_name, const std::string& output_file, ImageInfo& whole_image);
};
} // ns argolid
18 changes: 2 additions & 16 deletions src/ome_tiff_to_chunked_converter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,21 +26,7 @@ namespace argolid {
void OmeTiffToChunkedConverter::Convert( const std::string& input_file, const std::string& output_file,
const std::string& scale_key, const VisType v, BS::thread_pool& th_pool){

int num_dims, x_dim, y_dim;

if (v == VisType::Viv){ //5D file
x_dim = 4;
y_dim = 3;
num_dims = 5;
} else if (v == VisType::NG_Zarr ){ // 3D file
x_dim = 3;
y_dim = 2;
num_dims = 4;
} else if (v == VisType::PCNG ){ // 3D file
x_dim = 0;
y_dim = 1;
num_dims = 3;
}
const auto [x_dim, y_dim, c_dim, num_dims] = GetZarrParams(v);

TENSORSTORE_CHECK_OK_AND_ASSIGN(auto store1, tensorstore::Open(GetOmeTiffSpecToRead(input_file),
tensorstore::OpenMode::open,
Expand Down Expand Up @@ -84,7 +70,7 @@ void OmeTiffToChunkedConverter::Convert( const std::string& input_file, const st
for(std::int64_t j=0; j<num_cols; ++j){
std::int64_t x_start = j*chunk_shape[x_dim];
std::int64_t x_end = std::min({(j+1)*chunk_shape[x_dim], image_width});
th_pool.push_task([&store1, &store2, x_start, x_end, y_start, y_end, x_dim, y_dim, v](){
th_pool.push_task([&store1, &store2, x_start, x_end, y_start, y_end, x_dim=x_dim, y_dim=y_dim, v](){

auto array = tensorstore::AllocateArray({y_end-y_start, x_end-x_start},tensorstore::c_order,
tensorstore::value_init, store1.dtype());
Expand Down
Loading

0 comments on commit 1d044cc

Please sign in to comment.