Skip to content

Commit

Permalink
docs: use 6 dimensions everywhere in tutorial
Browse files Browse the repository at this point in the history
(addresses 6th point raised by @jakelangham in issue #129)
  • Loading branch information
Theresa Pollinger committed Dec 6, 2024
1 parent 8a47da2 commit 85d1576
Showing 1 changed file with 33 additions and 14 deletions.
47 changes: 33 additions & 14 deletions docs/simple_tutorial.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@ int num_points_x = ...
int num_points_y = ...
int num_points_z = ...
int num_points_vx = ...
int num_points_vy = ...
int num_points_vz = ...
...
initial_function = ...
grid.initialize(
Expand All @@ -23,6 +25,8 @@ grid.initialize(
num_points_y,
num_points_z,
num_points_vx,
num_points_vy,
num_points_vz,
...
)
helper_data_structures.initialize(...)
Expand All @@ -39,6 +43,8 @@ properties = compute_properties(grid)
write_output(properties, grid)
```

This example assumes that your PDE solver uses three dimensions in both space and velocity.

For use with DisCoTec, we assume nestable power-of-two discretizations, i.e.,
where your grid spacing can be $2^{-l}$ for $l \in \mathbb{N}$.
For simplicity, in this tutorial we also assume you use periodic boundary
Expand All @@ -60,6 +66,8 @@ int num_points_x = ...
int num_points_y = ...
int num_points_z = ...
int num_points_vx = ...
int num_points_vy = ...
int num_points_vz = ...
...

- initial_function = ...
Expand All @@ -69,6 +77,8 @@ int num_points_vx = ...
- num_points_y,
- num_points_z,
- num_points_vx,
- num_points_vy,
- num_points_vz,
- ...
- )
- helper_data_structures.initialize(...)
Expand All @@ -77,6 +87,8 @@ int num_points_vx = ...
+ num_points_y,
+ num_points_z,
+ num_points_vx,
+ num_points_vy,
+ num_points_vz,
+ ...
+ )
float end_time = ...
Expand Down Expand Up @@ -112,7 +124,9 @@ C++ header `YourSolver.h`, roughly like this:

class YourSolver {
public:
YourSolver(int num_points_x, int num_points_y, int num_points_vx, int num_points_vy, ...);
YourSolver(int num_points_x, int num_points_y, int num_points_z,
int num_points_vx, int num_points_vy, int num_points_vz,
...);
//...rule of 5...

void run(double time_step);
Expand Down Expand Up @@ -143,9 +157,12 @@ With `YourSolver`, that will be as simple as:
class YourTask : public combigrid::Task {
public:
YourTask(combigrid::LevelVector& l, const std::vector<combigrid::BoundaryType>& boundary,
combigrid::real coeff, combigrid::real dt)
: Task(4, l, boundary, coeff, nullptr, nullptr), sim_(nullptr), dfg_(nullptr), dt_(dt) {}
YourTask(combigrid::LevelVector& l,
const std::vector<combigrid::BoundaryType>& boundary,
combigrid::real coeff,
combigrid::real dt)
: Task(4, l, boundary, coeff, nullptr, nullptr),
sim_(nullptr), dfg_(nullptr), dt_(dt) {}
virtual ~YourTask(){
if (sim_ != nullptr){
Expand All @@ -162,17 +179,20 @@ class YourTask : public combigrid::Task {
const auto& l = this->getLevelVector();
int num_points_x = combigrid::powerOfTwoByBitshift(l[0]);
int num_points_y = combigrid::powerOfTwoByBitshift(l[1]);
int num_points_vx = combigrid::powerOfTwoByBitshift(l[2]);
int num_points_vy = combigrid::powerOfTwoByBitshift(l[3]);
int num_points_z = combigrid::powerOfTwoByBitshift(l[2]);
int num_points_vx = combigrid::powerOfTwoByBitshift(l[3]);
int num_points_vy = combigrid::powerOfTwoByBitshift(l[4]);
int num_points_vz = combigrid::powerOfTwoByBitshift(l[5]);
// this is the number of MPI processes in each dimension --
// if all are 1, we are only using the parallelism between grids
std::vector<int> p = {1, 1, 1, 1};
std::vector<int> p = {1, 1, 1, 1, 1, 1};
// if using MPI within your solver,
// pass p and the lcomm communicator to sim_, too
sim_ =
std::make_unique<YourSolver>(num_points_x, num_points_y, num_points_vx, num_points_vy);
std::make_unique<YourSolver>(num_points_x, num_points_y, num_points_z,
num_points_vx, num_points_vy, num_points_vz);
// wrap tensor in a DistributedFullGrid
dfg_ = std::make_unique<combigrid::DistributedFullGrid<combigrid::CombiDataType>>(
this->getDim(), this->getLevelVector(), lcomm, this->getBoundary(),
Expand Down Expand Up @@ -239,15 +259,14 @@ int main(int argc, char** argv) {
combigrid::theMPISystem()->initWorldReusable(MPI_COMM_WORLD, ngroup, nprocs, false, true);

// input parameters
const combigrid::DimType dim = 4; // four-dimensional problem
const combigrid::DimType dim = 6; // six-dimensional problem
const combigrid::LevelVector lmin = {
2, 2, 2,
2}; // minimal level vector for each grid -> have at least 4 points in each dimension
const combigrid::LevelVector lmax = {6, 6, 6, 6}; // maximum level vector -> level vector of target grid
2, 2, 2}; // minimal level vector for each grid -> have at least 4 points in each dimension
const combigrid::LevelVector lmax = {6, 6, 6, 6, 6, 6}; // maximum level vector -> level vector of target grid
const std::vector<int> p = {
1, 1, 1, 1}; // parallelization of domain (one process per dimension) -> must match nprocs
const std::vector<bool> hierarchizationDims = {true, true, true,
true}; // all dimensions should be hierarchized
1, 1, 1, 1, 1, 1}; // parallelization of domain (one process per dimension) -> must match nprocs
const std::vector<bool> hierarchizationDims(dim, true); // all dimensions should be hierarchized
const std::vector<combigrid::BoundaryType> boundary(dim,
1); // periodic boundary in every dimension
const combigrid::real dt = 0.01; // time step
Expand Down

0 comments on commit 85d1576

Please sign in to comment.