Skip to content

Commit

Permalink
Merge pull request gunrock#638 from gunrock/dev
Browse files Browse the repository at this point in the history
Bringing changes live (gunrock as shared dll) -- build failing
  • Loading branch information
neoblizz authored Oct 31, 2019
2 parents b85c1f2 + a032a24 commit 02f5589
Show file tree
Hide file tree
Showing 23 changed files with 431 additions and 209 deletions.
2 changes: 1 addition & 1 deletion examples/shared_libs/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,4 @@ target_link_libraries(shared_lib_pr gunrock)

add_test(NAME SHARED_LIB_TEST_PAGERANK COMMAND shared_lib_pr)
set_tests_properties(SHARED_LIB_TEST_PAGERANK
PROPERTIES PASS_REGULAR_EXPRESSION "Node_ID.*2.*: Score.*1.2*")
PROPERTIES PASS_REGULAR_EXPRESSION "Node_ID.*2.*: Score.*1.2*")
11 changes: 11 additions & 0 deletions examples/shared_libs/shared_lib_pr.c
Original file line number Diff line number Diff line change
Expand Up @@ -17,15 +17,26 @@ int main(int argc, char *argv[]) {
int *node_ids = (int *)malloc(sizeof(int) * num_nodes);
float *ranks = (float *)malloc(sizeof(float) * num_nodes);

// Jonathan's Note: Disabled pagerank here since I was testing only hits and sm in CMakeLists.txt.
double elapsed = pagerank(num_nodes, num_edges, row_offsets, col_indices, 1,
node_ids, ranks);



int node;
for (node = 0; node < num_nodes; ++node)
printf("Node_ID [%d] : Score: [%f]\n", node_ids[node], ranks[node]);

// HITS
float *hub_ranks = (float *)malloc(sizeof(float) * num_nodes);
float *auth_ranks = (float *)malloc(sizeof(float) * num_nodes);
int num_iter = 10;
// double elapsed_hits = hits(num_nodes, num_edges, row_offsets, col_indices, num_iter, hub_ranks, auth_ranks);

if (node_ids) free(node_ids);
if (ranks) free(ranks);
if(hub_ranks) free(hub_ranks);
if(auth_ranks) free(auth_ranks);

return 0;
}
2 changes: 1 addition & 1 deletion examples/sssp/run_real.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

BASEOPTION="--src=randomize2 --queue-sizing=1.2 --iteration-num=16"
BASEFLAG=""
EXECUTION="./bin/test_sssp_8.0_x86_64"
EXECUTION="./bin/test_sssp_10.0_x86_64"
DATADIR="/data/gunrock_dataset/large"

OPTION[0]="" && FLAG[0]=".default"
Expand Down
2 changes: 2 additions & 0 deletions gunrock/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,8 @@ set(CUFILES
#app/bc/bc_app.cu
##app/cc/cc_app.cu
app/pr/pr_app.cu
app/hits/hits_app.cu
#app/sm/sm_app.cu
#app/sssp/sssp_app.cu
#app/louvain/louvain_app.cu
#app/color/color_app.cu
Expand Down
7 changes: 4 additions & 3 deletions gunrock/app/app_base.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -21,17 +21,18 @@
namespace gunrock {
namespace app {

cudaError_t UseParameters_app(util::Parameters &parameters) {
template <typename ParametersT>
cudaError_t UseParameters_app(ParametersT &parameters) {
cudaError_t retval = cudaSuccess;
GUARD_CU(util::UseParameters_info(parameters));

GUARD_CU(parameters.Use<int>(
GUARD_CU(parameters.template Use<int>(
"num-runs",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
1, "Number of runs to perform the test, per parameter-set", __FILE__,
__LINE__));

GUARD_CU(parameters.Use<double>(
GUARD_CU(parameters.template Use<double>(
"preprocess-time",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::INTERNAL_PARAMETER,
0.0, "Preprocessing time", __FILE__, __LINE__));
Expand Down
31 changes: 16 additions & 15 deletions gunrock/app/enactor_base.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -45,46 +45,47 @@ enum : Enactor_Flag {
* @param parameters The util::Parameter<...> structure holding all parameter
* info \return cudaError_t error message(s), if any
*/
cudaError_t UseParameters_enactor(util::Parameters &parameters) {
template <typename ParametersT>
cudaError_t UseParameters_enactor(ParametersT &parameters) {
cudaError_t retval = cudaSuccess;

if (!parameters.Have("device"))
GUARD_CU(parameters.Use<int>(
if (!parameters.template Have("device"))
GUARD_CU(parameters.template Use<int>(
"device",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
0, "Set GPU(s) for testing", __FILE__, __LINE__));

GUARD_CU(parameters.Use<int>(
GUARD_CU(parameters.template Use<int>(
"communicate-latency",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
0, "additional communication latency", __FILE__, __LINE__));

GUARD_CU(parameters.Use<float>(
GUARD_CU(parameters.template Use<float>(
"communicate-multipy",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
1.0f, "communication sizing factor", __FILE__, __LINE__));

GUARD_CU(parameters.Use<int>(
GUARD_CU(parameters.template Use<int>(
"expand-latency",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
0, "additional expand incoming latency", __FILE__, __LINE__));

GUARD_CU(parameters.Use<int>(
GUARD_CU(parameters.template Use<int>(
"subqueue-latency",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
0, "additional subqueue latency", __FILE__, __LINE__));

GUARD_CU(parameters.Use<int>(
GUARD_CU(parameters.template Use<int>(
"fullqueue-latency",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
0, "additional fullqueue latency", __FILE__, __LINE__));

GUARD_CU(parameters.Use<int>(
GUARD_CU(parameters.template Use<int>(
"makeout-latency",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
0, "additional make-out latency", __FILE__, __LINE__));

GUARD_CU(parameters.Use<std::string>(
GUARD_CU(parameters.template Use<std::string>(
"advance-mode",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
"LB",
Expand All @@ -97,33 +98,33 @@ cudaError_t UseParameters_enactor(util::Parameters &parameters) {
"\tadd -CULL for fuzed kernels;\n"
"\tnot all modes are available for specific problem;\n"));

GUARD_CU(parameters.Use<std::string>(
GUARD_CU(parameters.template Use<std::string>(
"filter-mode",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
"CULL", "Filter strategy", __FILE__, __LINE__));

GUARD_CU(parameters.Use<double>(
GUARD_CU(parameters.template Use<double>(
"queue-factor",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
6.0,
"Reserved frontier sizing factor, multiples of numbers of vertices or "
"edges",
__FILE__, __LINE__));

GUARD_CU(parameters.Use<double>(
GUARD_CU(parameters.template Use<double>(
"trans-factor",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
1.0,
"Reserved sizing factor for data communication, multiples of number of "
"vertices",
__FILE__, __LINE__));

GUARD_CU(parameters.Use<bool>(
GUARD_CU(parameters.template Use<bool>(
"size-check",
util::OPTIONAL_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
true, "Whether to enable frontier auto resizing", __FILE__, __LINE__));

GUARD_CU(parameters.Use<int>(
GUARD_CU(parameters.template Use<int>(
"max-grid-size",
util::OPTIONAL_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
0, "Maximun number of grids for GPU kernels", __FILE__, __LINE__));
Expand Down
155 changes: 29 additions & 126 deletions gunrock/app/hits/hits_app.cu
Original file line number Diff line number Diff line change
Expand Up @@ -11,148 +11,51 @@
* @brief HITS Gunrock Application
*/

#include <gunrock/gunrock.h>
#include <gunrock/util/test_utils.cuh>
#include <gunrock/graphio/graphio.cuh>
#include <gunrock/app/app_base.cuh>
#include <gunrock/app/test_base.cuh>

#include <gunrock/app/hits/hits_enactor.cuh>
#include <gunrock/app/hits/hits_test.cuh>
#include <gunrock/app/hits/hits_app.cuh>

namespace gunrock {
namespace app {
namespace hits {

cudaError_t UseParameters(util::Parameters &parameters) {
template <typename ParametersT>
cudaError_t UseParameters(ParametersT &parameters) {
cudaError_t retval = cudaSuccess;
GUARD_CU(UseParameters_app(parameters));
GUARD_CU(UseParameters_problem(parameters));
GUARD_CU(UseParameters_enactor(parameters));
GUARD_CU(UseParameters_test(parameters));

return retval;
}

/**
* @brief Run hits tests
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
...
* @param[in] target where to perform the app
* \return cudaError_t error message(s), if any
} // namespace hits
} // namespace app
} // namespace gunrock

/*
* @brief Simple interface take in graph as CSR format
* @param[in] num_nodes Number of veritces in the input graph
* @param[in] num_edges Number of edges in the input graph
* @param[in] row_offsets CSR-formatted graph input row offsets
* @param[in] col_indices CSR-formatted graph input column indices
* @param[in] edge_values CSR-formatted graph input edge weights
* @param[in] num_runs Number of runs to perform SM
* @param[out] subgraphs Return number of subgraphs
* \return double Return accumulated elapsed times for all runs
*/
template <typename GraphT>
cudaError_t RunTests(util::Parameters &parameters, GraphT &graph,
typename GraphT::ValueT *ref_hrank,
typename GraphT::ValueT *ref_arank,
util::Location target) {
cudaError_t retval = cudaSuccess;

typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::ValueT ValueT;
typedef typename GraphT::SizeT SizeT;
typedef Problem<GraphT> ProblemT;
typedef Enactor<ProblemT> EnactorT;

// CLI parameters
bool quiet_mode = parameters.Get<bool>("quiet");
bool quick_mode = parameters.Get<bool>("quick");
int num_runs = parameters.Get<int>("num-runs");
double tol = parameters.Get<double>("tol");
std::string validation = parameters.Get<std::string>("validation");
util::Info info("HITS", parameters, graph);

util::CpuTimer cpu_timer, total_timer;
cpu_timer.Start();
total_timer.Start();

// Allocate problem specific host data
ValueT *h_hrank = new ValueT[graph.nodes];
ValueT *h_arank = new ValueT[graph.nodes];

// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
GUARD_CU(problem.Init(graph, target));
GUARD_CU(enactor.Init(problem, target));

cpu_timer.Stop();
parameters.Set("preprocess-time", cpu_timer.ElapsedMillis());

for (int run_num = 0; run_num < num_runs; ++run_num) {
GUARD_CU(problem.Reset(target));
GUARD_CU(enactor.Reset(graph.nodes, target));

util::PrintMsg("__________________________", !quiet_mode);

cpu_timer.Start();
GUARD_CU(enactor.Enact());
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());

util::PrintMsg(
"--------------------------\nRun " + std::to_string(run_num) +
" elapsed: " + std::to_string(cpu_timer.ElapsedMillis()) +
", #iterations = " +
std::to_string(enactor.enactor_slices[0].enactor_stats.iteration),
!quiet_mode);

if (validation == "each") {
GUARD_CU(problem.Extract(h_hrank, h_arank));
SizeT num_errors = Validate_Results(parameters, graph, h_hrank, h_arank,
ref_hrank, ref_arank, false);
}
}

cpu_timer.Start();

GUARD_CU(problem.Extract(h_hrank, h_arank));

if (validation == "last") {
SizeT num_errors = Validate_Results(parameters, graph, h_hrank, h_arank,
ref_hrank, ref_arank, tol, false);

// num_errors stores how many positions are mismatched
// Makes sense to keep this? Would need to sort first.
if (!quiet_mode) {
if (!quick_mode) {
printf("CPU Algorithm Results:\n");
DisplaySolution<GraphT>(ref_hrank, ref_arank, graph.nodes);
printf("\n");
}

printf("GPU Algorithm Results:\n");
DisplaySolution<GraphT>(h_hrank, h_arank, graph.nodes);
}
}

// compute running statistics
info.ComputeTraversalStats(enactor, (VertexT *)NULL);
#ifdef ENABLE_PERFORMANCE_PROFILING
#endif

// Clean up
GUARD_CU(enactor.Release(target));
GUARD_CU(problem.Release(target));
// Release problem specific data, e.g.:
delete[] h_hrank;
h_hrank = NULL;
delete[] h_arank;
h_arank = NULL;

cpu_timer.Stop();
total_timer.Stop();

info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis());
return retval;
double hits(
const int num_nodes,
const int num_edges,
const int *row_offsets,
const int *col_indices,
const int num_iter,
float *hub_ranks,
float *auth_ranks)
{
return hits(num_nodes, num_edges, row_offsets, col_indices,
num_iter, hub_ranks, auth_ranks);
}

} // namespace hits
} // namespace app
} // namespace gunrock

// Leave this at the end of the file
// Local Variables:
// mode:c++
Expand Down
Loading

0 comments on commit 02f5589

Please sign in to comment.