Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

adding raja derived field #1161

Merged
merged 40 commits into from
Dec 6, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
40 commits
Select commit Hold shift + click to select a range
7a74672
things making sense. now for raja and avoiding graph building
nicolemarsaglia Jun 22, 2023
24e8e42
brains in circles, but things still fine
nicolemarsaglia Jun 22, 2023
51368da
change output node in functor
nicolemarsaglia Jun 22, 2023
ce6052e
blueprint/conduit q's. preserve domain structure?
nicolemarsaglia Jun 22, 2023
2b31b68
lots of tweaks. making this a function, not an object, hope that's co…
nicolemarsaglia Jun 23, 2023
681fd81
things built
nicolemarsaglia Jun 23, 2023
21d9ba1
figure out undefined symbol error
nicolemarsaglia Jun 23, 2023
44f14f0
push current version
nicolemarsaglia Jun 29, 2023
f954f78
change dataset in place
nicolemarsaglia Jun 29, 2023
9a9dcde
move addfields from expressions to filters
nicolemarsaglia Jun 30, 2023
89a6e9a
device values?
nicolemarsaglia Jun 30, 2023
b3bf81c
close but wha ha happen to the middle field?
nicolemarsaglia Jun 30, 2023
7d85bf5
some refactoring and added a simple test
cyrush Jul 3, 2023
0e3b6eb
cleanup
nicolemarsaglia Jul 3, 2023
da92297
add missing guard for add fields test
cyrush Jul 3, 2023
fe02c7d
finish merge (post recent develop exprs renaming)
cyrush Jul 14, 2023
c2ee648
wip: identify expanded case to zero copy to vtk-m
cyrush Jul 26, 2023
e947135
use strided handle
cyrush Jul 27, 2023
28c22e5
Merge branch 'develop' into task/2022_6_raja_derived_field
nicolemarsaglia Jul 27, 2023
a1bf125
Merge branch 'task/2023_07_expand_vtkm_strided_zero_copy' into task/2…
nicolemarsaglia Jul 27, 2023
e7ae4cd
pull in new vtkm zero copy
nicolemarsaglia Jul 27, 2023
46f74f9
remove merge leftovers
nicolemarsaglia Jul 27, 2023
716ef54
add ints
nicolemarsaglia Jul 28, 2023
ef5449c
start of change explicit coord to use vtkm array handle stride
nicolemarsaglia Jul 28, 2023
07f13ad
first swipe at a coords, now to test.
nicolemarsaglia Jul 31, 2023
05cea95
ascent_vtkh_data_adapter.cpp
nicolemarsaglia Jul 31, 2023
1a48615
back to working and clean
nicolemarsaglia Aug 1, 2023
73ffef9
2d logic
nicolemarsaglia Aug 1, 2023
fba3451
this seems more right
nicolemarsaglia Aug 1, 2023
53c2208
let's finish our if statement kthxbye -- fixes nyx
nicolemarsaglia Aug 8, 2023
0581ee7
these need to go back to original
nicolemarsaglia Aug 10, 2023
24ac645
Update ascent_data_object.cpp
nicolemarsaglia Aug 10, 2023
f123f6a
complete the merge from develop
cyrush Nov 7, 2023
b7e6bd0
port to new interfaces
cyrush Nov 8, 2023
9222dea
adaptor logic update
cyrush Nov 8, 2023
d93b340
use proper node as mcarray input
cyrush Nov 8, 2023
d4b3aa2
add some more debugging output
cyrush Nov 10, 2023
1e977d5
Merge branch 'develop' into task/2023_6_raja_derived_field
cyrush Dec 6, 2023
6cd6d7a
fix for fields vs non vtk-m supported type
cyrush Dec 6, 2023
728a977
fix with one of the zstride coords calcs, simplify ascent render poly…
cyrush Dec 6, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions src/libs/ascent/runtimes/ascent_expression_eval.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -970,7 +970,7 @@ initialize_functions()
field_sig["args/component/optional"];
field_sig["args/component/description"] =
"Used to specify a single component if the field is a vector field.";
field_sig["description"] = "Return a mesh field given a its name.";
field_sig["description"] = "Return a mesh field given its name.";

//---------------------------------------------------------------------------
// topo()
Expand All @@ -979,7 +979,7 @@ initialize_functions()
topo_sig["return_type"] = "topology";
topo_sig["filter_name"] = "expr_mesh_topology";
topo_sig["args/arg1/type"] = "string";
topo_sig["description"] = "Return a mesh topology given a its name.";
topo_sig["description"] = "Return a mesh topology given its name.";

//---------------------------------------------------------------------------
// topology()
Expand Down
447 changes: 318 additions & 129 deletions src/libs/ascent/runtimes/ascent_vtkh_data_adapter.cpp

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -992,6 +992,83 @@ field_histogram(const conduit::Node &dataset,
return res;
}

//Take in an array of fields
//add new field that is field1 + .. + fieldn
void
derived_field_add_fields(conduit::Node &dataset,
const std::vector<std::string> &field_names,
const std::string &out_field_name)
{
const int num_fields = field_names.size();
const std::string output_path = "fields/" + out_field_name;
for(int i = 0; i < dataset.number_of_children(); ++i)
{
conduit::Node &dom = dataset.child(i);
for(int field_idx = 0; field_idx < num_fields; field_idx++)
{
const std::string path = "fields/" + field_names[field_idx];
if(dom.has_path(path)) //has both
{
if(!dom.has_path(output_path)) //setup output path
{
dom[output_path]["association"] = dom[path]["association"];
dom[output_path]["topology"] = dom[path]["topology"];
if(field_is_float32(dom[path]))//Todo:: Ints. longs?
{
const int vals = dom[path]["values"].dtype().number_of_elements();
dom[output_path]["values"].set(conduit::DataType::float32(vals));
}
else
{
const int vals = dom[path]["values"].dtype().number_of_elements();
dom[output_path]["values"].set(conduit::DataType::float64(vals));
}
}
else //has output path already
{
// check that the field assoc and topo
std::string out_assoc = dom[output_path]["association"].to_string();
std::string out_topo = dom[output_path]["topology"].to_string();
std::string f_assoc = dom[path]["association"].to_string();
std::string f_topo = dom[path]["topology"].to_string();
if(out_assoc != f_assoc)
{
ASCENT_ERROR("Field associations do not match:\n " <<
"Field " << field_names[field_idx]
<< " has association " << f_assoc << "\n" <<
"Field " << out_field_name
<< " has association " << out_assoc << "\n");
}
if(out_topo != f_topo)
{
ASCENT_ERROR("Field topologies do not match:\n " <<
"Field " << field_names[field_idx]
<< " has topology " << f_topo << "\n" <<
"Field " << out_field_name << " has topology " << out_topo << "\n");
}
}

//make tmp input
conduit::Node tmp_a,tmp_b;
tmp_a.set_external(dom[output_path]);
tmp_b.set_external(dom[path]);
// execute
// add out result to next field
conduit::Node n_add_res = derived_field_binary_add(tmp_a, tmp_b);
// replace out with new result
dom[output_path]["values"] = n_add_res["values"];
}
else //does not have field
{
// some domains may not have this field, simply skip
continue;
}
}//fields
}//domains

return;
}

// returns a Node containing the min, max and dim for x,y,z given a topology
conduit::Node
global_bounds(const conduit::Node &dataset, const std::string &topo_name)
Expand Down Expand Up @@ -1705,6 +1782,7 @@ binning(const conduit::Node &dataset,
bins = global_bins;
#endif


conduit::Node res;
res["value"].set(conduit::DataType::c_double(num_bins));
double *res_bins = res["value"].value();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,10 @@ ASCENT_API
conduit::Node global_bounds(const conduit::Node &dataset,
const std::string &topo_name);

ASCENT_API
void derived_field_add_fields(conduit::Node &dataset,
const std::vector<std::string> &field_names,
const std::string &output_field_name);
//
// NOTE: ascent_data_binning contains a RAJA version
// of binning that needs more work, but should eventually
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -490,6 +490,141 @@ exec_dispatch_mesh(const conduit::Node &n_coords,
}
}

//-----------------------------------------------------------------------------
//dispatch memory for a derived field (DF) binary operation
template<typename Function, typename Exec>
conduit::Node
dispatch_memory_binary_df(const conduit::Node &l_field,
const conduit::Node &r_field,
std::string component,
const Function &func,
const Exec &exec)
{
const std::string mem_space = Exec::memory_space;

conduit::Node res;
if(field_is_float32(l_field))
{
if(!field_is_float32(r_field))
{
ASCENT_ERROR("Type dispatch: mismatch array types\n"<<
l_field.schema().to_string() <<
"\n vs. \n" <<
r_field.schema().to_string());
}

MCArray<conduit::float32> l_farray(l_field["values"]);
MCArray<conduit::float32> r_farray(r_field["values"]);
DeviceAccessor<conduit::float32> l_accessor = l_farray.accessor(mem_space, component);
DeviceAccessor<conduit::float32> r_accessor = r_farray.accessor(mem_space, component);
res = func(l_accessor, r_accessor, exec);

}
else if(field_is_float64(l_field))
{
if(!field_is_float64(r_field))
{
ASCENT_ERROR("Type dispatch: mismatch array types\n"<<
l_field.schema().to_string() <<
"\n vs. \n" <<
r_field.schema().to_string());
}

MCArray<conduit::float64> l_farray(l_field["values"]);
MCArray<conduit::float64> r_farray(r_field["values"]);
DeviceAccessor<conduit::float64> l_accessor = l_farray.accessor(mem_space, component);
DeviceAccessor<conduit::float64> r_accessor = r_farray.accessor(mem_space, component);
res = func(l_accessor, r_accessor, exec);
}
else if(field_is_int32(l_field))
{
if(!field_is_int32(r_field))
{
ASCENT_ERROR("Type dispatch: mismatch array types\n"<<
l_field.schema().to_string() <<
"\n vs. \n" <<
r_field.schema().to_string());
}

MCArray<conduit::int32> l_farray(l_field["values"]);
MCArray<conduit::int32> r_farray(r_field["values"]);
DeviceAccessor<conduit::int32> l_accessor = l_farray.accessor(mem_space, component);
DeviceAccessor<conduit::int32> r_accessor = r_farray.accessor(mem_space, component);
res = func(l_accessor, r_accessor, exec);
}
else if(field_is_int64(l_field))
{

if(!field_is_int64(r_field))
{
ASCENT_ERROR("Type dispatch: mismatch array types\n"<<
l_field.schema().to_string() <<
"\n vs. \n" <<
r_field.schema().to_string());
}

MCArray<conduit::int64> l_farray(l_field["values"]);
MCArray<conduit::int64> r_farray(r_field["values"]);
DeviceAccessor<conduit::int64> l_accessor = l_farray.accessor(mem_space, component);
DeviceAccessor<conduit::int64> r_accessor = r_farray.accessor(mem_space, component);
res = func(l_accessor, r_accessor, exec);
}
else
{
ASCENT_ERROR("Type dispatch: unsupported array type "<<
l_field.schema().to_string());
}

return res;
}



template<typename Function>
conduit::Node
exec_dispatch_binary_df(const conduit::Node &l_field,
const conduit::Node &r_field,
std::string component,
const Function &func)
{

conduit::Node res;
const std::string exec_policy = ExecutionManager::execution_policy();
//std::cout<<"Exec policy "<<exec_policy<<"\n";
if(exec_policy == "serial")
{
SerialExec exec;
res = dispatch_memory_binary_df(l_field, r_field, component, func, exec);
}
#if defined(ASCENT_OPENMP_ENABLED) && defined(ASCENT_RAJA_ENABLED)
else if(exec_policy == "openmp")
{
OpenMPExec exec;
res = dispatch_memory_binary_df(l_field, r_field, component, func, exec);
}
#endif
#if defined(ASCENT_CUDA_ENABLED)
else if(exec_policy == "cuda")
{
CudaExec exec;
res = dispatch_memory_binary_df(l_field, r_field, component, func, exec);
}
#endif
#if defined(ASCENT_HIP_ENABLED)
else if(exec_policy == "hip")
{
HipExec exec;
res = dispatch_memory_binary_df(l_field, r_field, component, func, exec);
}
#endif
else
{
ASCENT_ERROR("Execution dispatch: unsupported execution policy "<<
exec_policy);
}
return res;
}


//-----------------------------------------------------------------------------
template<typename Function, typename T>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,85 @@ struct SumFunctor
}
};

struct DFAddFunctor
{
template<typename T, typename Exec>
conduit::Node operator()(const DeviceAccessor<T> l_accessor,
const DeviceAccessor<T> r_accessor,
const Exec &) const
{

const int l_size = l_accessor.m_size;
const int r_size = r_accessor.m_size;

bool diff_sizes = false;
int size;
int max_size;

size = max_size = l_size;
if(l_size != r_size)
{
size = min(l_size, r_size);
max_size = max(l_size, r_size);
diff_sizes = true;
}


// conduit zero initializes this array
conduit::Node res;
res["values"].set(conduit::DataType::float64(max_size));
double *res_array = res["values"].value();

Array<double> field_sums(res_array, max_size);
double *sums_ptr = field_sums.get_ptr(Exec::memory_space);

using for_policy = typename Exec::for_policy;
using atomic_policy = typename Exec::atomic_policy;

// init device array
ascent::forall<for_policy>(0, max_size, [=] ASCENT_LAMBDA(index_t i)
{
sums_ptr[i]=0.0;
});
ASCENT_DEVICE_ERROR_CHECK();

ascent::forall<for_policy>(0, size, [=] ASCENT_LAMBDA(index_t i)
{
const double val = l_accessor[i] + r_accessor[i];
//sums_ptr[i] = val;
int old = ascent::atomic_add<atomic_policy>(&(sums_ptr[i]), val);
});
ASCENT_DEVICE_ERROR_CHECK();

if(diff_sizes)
{
if(l_size > r_size)
{
ascent::forall<for_policy>(size, l_size, [=] ASCENT_LAMBDA(index_t i)
{
const T val = l_accessor[i];
sums_ptr[i] = val;
});
ASCENT_DEVICE_ERROR_CHECK();
}
else
{
ascent::forall<for_policy>(size, r_size, [=] ASCENT_LAMBDA(index_t i)
{
const T val = r_accessor[i];
sums_ptr[i] = val;
});
ASCENT_DEVICE_ERROR_CHECK();
}
}

// synch the values back to the host
(void) field_sums.get_host_ptr();

return res;
}
};

struct NanFunctor
{
template<typename T, typename Exec>
Expand Down Expand Up @@ -444,6 +523,18 @@ array_sum(const conduit::Node &array,

return res;
}

conduit::Node
derived_field_binary_add(const conduit::Node &l_field,
const conduit::Node &r_field,
const std::string &component)
{
return exec_dispatch_binary_df(l_field,
r_field,
component,
detail::DFAddFunctor());
}

//-----------------------------------------------------------------------------
};
//-----------------------------------------------------------------------------
Expand Down
Loading
Loading