Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Prefer C++ method std::log over C method log #3270

Merged
merged 1 commit into from
Aug 8, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion apps/include/pcl/apps/vfh_nn_classifier.h
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ namespace pcl
// compute the VFH feature for this point cloud
FeatureCloudPtr vfhs = computeFeature (testing_data);
// compute gaussian parameter producing the desired minimum score (around 50 for the default values)
float gaussian_param = - static_cast<float> (radius / log (min_score));
float gaussian_param = - static_cast<float> (radius / std::log (min_score));
// TODO accept result to be filled in by reference
return classifier_.classify(vfhs->points.at (0), radius, gaussian_param);
}
Expand Down
6 changes: 3 additions & 3 deletions common/include/pcl/common/impl/norms.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ B_Norm (FloatVectorT a, FloatVectorT b, int dim)
norm += std::sqrt (a[i] * b[i]);

if (norm > 0)
result = -logf (norm);
result = -std::log (norm);
else
result = 0;

Expand Down Expand Up @@ -185,7 +185,7 @@ Div_Norm (FloatVectorT a, FloatVectorT b, int dim)

for (int i = 0; i < dim; ++i)
if ((a[i] / b[i]) > 0)
norm += (a[i] - b[i]) * logf (a[i] / b[i]);
norm += (a[i] - b[i]) * std::log (a[i] / b[i]);
else
norm += 0;
return norm;
Expand Down Expand Up @@ -221,7 +221,7 @@ KL_Norm (FloatVectorT a, FloatVectorT b, int dim)

for (int i = 0; i < dim; ++i)
if ( (b[i] != 0) && ((a[i] / b[i]) > 0) )
norm += a[i] * logf (a[i] / b[i]);
norm += a[i] * std::log (a[i] / b[i]);
else
norm += 0;
return norm;
Expand Down
14 changes: 7 additions & 7 deletions cuda/sample_consensus/src/msac.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -157,15 +157,15 @@ pcl_cuda::MultiRandomSampleConsensus<Storage>::computeModel (int debug_verbosity
n_best_inliers_count = n_inliers_count;
good_coeff = cur_iteration;

// Compute the k parameter (k=log(z)/log(1-w^n))
// Compute the k parameter (k=std::log(z)/std::log(1-w^n))
float w = (float)((float)n_best_inliers_count / (float)nr_remaining_points);
float p_no_outliers = 1.0f - w;
p_no_outliers = (std::max) (std::numeric_limits<float>::epsilon (), p_no_outliers); // Avoid division by -Inf
p_no_outliers = (std::min) (1.0f - std::numeric_limits<float>::epsilon (), p_no_outliers); // Avoid division by 0.
if (p_no_outliers == 1.0f)
k++;
else
k = log (1.0f - probability_) / log (p_no_outliers);
k = std::log (1.0f - probability_) / std::log (p_no_outliers);
}

//fprintf (stderr, "[pcl_cuda::MultiRandomSampleConsensus::computeModel] Trial %d out of %f: %d inliers (best is: %d so far).\n",
Expand All @@ -184,18 +184,18 @@ pcl_cuda::MultiRandomSampleConsensus<Storage>::computeModel (int debug_verbosity
//if (nr_remaining_points != nr_remaining_points_before_delete)
{

// Compute the k parameter (k=log(z)/log(1-w^n))
// Compute the k parameter (k=std::log(z)/std::log(1-w^n))
float w = (float)((float)min_nr_in_shape / (float)nr_remaining_points);
float p_no_outliers = 1.0f - w;
p_no_outliers = (std::max) (std::numeric_limits<float>::epsilon (), p_no_outliers); // Avoid division by -Inf
p_no_outliers = (std::min) (1.0f - std::numeric_limits<float>::epsilon (), p_no_outliers); // Avoid division by 0.
if (p_no_outliers != 1.0f)
{
if (log (1.0f - probability_) / log (p_no_outliers) < valid_iterations) // we won't find a model with min_nr_in_shape points anymore...
if (std::log (1.0f - probability_) / std::log (p_no_outliers) < valid_iterations) // we won't find a model with min_nr_in_shape points anymore...
find_no_better = true;
else
if (debug_verbosity_level > 1)
std::cerr << "------->" << log (1.0f - probability_) / log (p_no_outliers) << " -vs- " << valid_iterations << std::endl;
std::cerr << "------->" << std::log (1.0f - probability_) / std::log (p_no_outliers) << " -vs- " << valid_iterations << std::endl;
}
}

Expand Down Expand Up @@ -274,15 +274,15 @@ pcl_cuda::MultiRandomSampleConsensus<Storage>::computeModel (int debug_verbosity
n_best_inliers_count = n_inliers_count;
good_coeff = b * iterations_per_batch_ + j;

// Compute the k parameter (k=log(z)/log(1-w^n))
// Compute the k parameter (k=std::log(z)/std::log(1-w^n))
float w = (float)((float)n_best_inliers_count / (float)nr_remaining_points);
float p_no_outliers = 1.0f - w;
p_no_outliers = (std::max) (std::numeric_limits<float>::epsilon (), p_no_outliers); // Avoid division by -Inf
p_no_outliers = (std::min) (1.0f - std::numeric_limits<float>::epsilon (), p_no_outliers); // Avoid division by 0.
if (p_no_outliers == 1.0f)
k++;
else
k = log (1.0f - probability_) / log (p_no_outliers);
k = std::log (1.0f - probability_) / std::log (p_no_outliers);
}

}
Expand Down
14 changes: 7 additions & 7 deletions cuda/sample_consensus/src/multi_ransac.cu
Original file line number Diff line number Diff line change
Expand Up @@ -178,15 +178,15 @@ namespace pcl
n_best_inliers_count = n_inliers_count;
good_coeff = cur_iteration;

// Compute the k parameter (k=log(z)/log(1-w^n))
// Compute the k parameter (k=std::log(z)/std::log(1-w^n))
float w = (float)((float)n_best_inliers_count / (float)nr_remaining_points);
float p_no_outliers = 1.0f - pow (w, 1.0f);
p_no_outliers = (std::max) (std::numeric_limits<float>::epsilon (), p_no_outliers); // Avoid division by -Inf
p_no_outliers = (std::min) (1.0f - std::numeric_limits<float>::epsilon (), p_no_outliers); // Avoid division by 0.
if (p_no_outliers == 1.0f)
k++;
else
k = log (1.0f - probability_) / log (p_no_outliers);
k = std::log (1.0f - probability_) / std::log (p_no_outliers);
}

//fprintf (stderr, "[pcl::cuda::MultiRandomSampleConsensus::computeModel] Trial %d out of %f: %d inliers (best is: %d so far).\n",
Expand All @@ -205,18 +205,18 @@ namespace pcl
//if (nr_remaining_points != nr_remaining_points_before_delete)
{

// Compute the k parameter (k=log(z)/log(1-w^n))
// Compute the k parameter (k=std::log(z)/std::log(1-w^n))
float w = (float)((float)min_nr_in_shape / (float)nr_remaining_points);
float p_no_outliers = 1.0f - pow (w, 1.0f);
p_no_outliers = (std::max) (std::numeric_limits<float>::epsilon (), p_no_outliers); // Avoid division by -Inf
p_no_outliers = (std::min) (1.0f - std::numeric_limits<float>::epsilon (), p_no_outliers); // Avoid division by 0.
if (p_no_outliers != 1.0f)
{
if (log (1.0f - probability_) / log (p_no_outliers) < valid_iterations) // we won't find a model with min_nr_in_shape points anymore...
if (std::log (1.0f - probability_) / std::log (p_no_outliers) < valid_iterations) // we won't find a model with min_nr_in_shape points anymore...
find_no_better = true;
else
if (debug_verbosity_level > 1)
std::cerr << "------->" << log (1.0f - probability_) / log (p_no_outliers) << " -vs- " << valid_iterations << std::endl;
std::cerr << "------->" << std::log (1.0f - probability_) / std::log (p_no_outliers) << " -vs- " << valid_iterations << std::endl;
}
}

Expand Down Expand Up @@ -297,15 +297,15 @@ namespace pcl
n_best_inliers_count = n_inliers_count;
good_coeff = b * iterations_per_batch_ + j;

// Compute the k parameter (k=log(z)/log(1-w^n))
// Compute the k parameter (k=std::log(z)/std::log(1-w^n))
float w = (float)((float)n_best_inliers_count / (float)nr_remaining_points);
float p_no_outliers = 1.0f - pow (w, 1.0f);
p_no_outliers = (std::max) (std::numeric_limits<float>::epsilon (), p_no_outliers); // Avoid division by -Inf
p_no_outliers = (std::min) (1.0f - std::numeric_limits<float>::epsilon (), p_no_outliers); // Avoid division by 0.
if (p_no_outliers == 1.0f)
k++;
else
k = log (1.0f - probability_) / log (p_no_outliers);
k = std::log (1.0f - probability_) / std::log (p_no_outliers);
}

}
Expand Down
4 changes: 2 additions & 2 deletions cuda/sample_consensus/src/ransac.cu
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ namespace pcl
good_coeff = iterations_;
#endif

// Compute the k parameter (k=log(z)/log(1-w^n))
// Compute the k parameter (k=std::log(z)/std::log(1-w^n))
float w = (float)((float)n_best_inliers_count / (float)sac_model_->getIndices ()->size ());
// float p_no_outliers = 1.0 - pow (w, (float)selection.size ());
float p_no_outliers = 1.0f - pow (w, (float)1);
Expand All @@ -136,7 +136,7 @@ namespace pcl
if (p_no_outliers == 1.0f)
k++;
else
k = log (1.0f - probability_) / log (p_no_outliers);
k = std::log (1.0f - probability_) / std::log (p_no_outliers);
}

++iterations_;
Expand Down
2 changes: 1 addition & 1 deletion examples/segmentation/example_supervoxels.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -269,7 +269,7 @@ main (int argc, char ** argv)
////////////////////////////// //////////////////////////////

// If the cloud is organized and we haven't disabled the transform we need to
// check that there are no negative z values, since we use log(z)
// check that there are no negative z values, since we use std::log(z)
if (cloud->isOrganized () && !disable_transform)
{
for (const auto &point : *cloud)
Expand Down
2 changes: 1 addition & 1 deletion features/include/pcl/features/impl/3dsc.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ pcl::ShapeContext3DEstimation<PointInT, PointNT, PointOutT>::initCompute ()
// Fills radii interval based on formula (1) in section 2.1 of Frome's paper
radii_interval_.resize (radius_bins_ + 1);
for (size_t j = 0; j < radius_bins_ + 1; j++)
radii_interval_[j] = static_cast<float> (std::exp (log (min_radius_) + ((static_cast<float> (j) / static_cast<float> (radius_bins_)) * log (search_radius_ / min_radius_))));
radii_interval_[j] = static_cast<float> (std::exp (std::log (min_radius_) + ((static_cast<float> (j) / static_cast<float> (radius_bins_)) * std::log (search_radius_ / min_radius_))));

// Fill theta divisions of elevation
theta_divisions_.resize (elevation_bins_ + 1);
Expand Down
2 changes: 1 addition & 1 deletion features/include/pcl/features/impl/brisk_2d.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ pcl::BRISK2DEstimation<PointInT, PointOutT, KeypointT, IntensityT>::generateKern
BriskPatternPoint* pattern_iterator = pattern_points_;

// define the scale discretization:
static const float lb_scale = logf (scalerange_) / logf (2.0);
static const float lb_scale = std::log (scalerange_) / std::log (2.0);
static const float lb_scale_step = lb_scale / (float (scales_));

scale_list_ = new float[scales_];
Expand Down
2 changes: 1 addition & 1 deletion features/include/pcl/features/impl/usc.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ pcl::UniqueShapeContext<PointInT, PointOutT, PointRFT>::initCompute ()
// Fills radii interval based on formula (1) in section 2.1 of Frome's paper
radii_interval_.resize (radius_bins_ + 1);
for (size_t j = 0; j < radius_bins_ + 1; j++)
radii_interval_[j] = static_cast<float> (std::exp (log (min_radius_) + ((static_cast<float> (j) / static_cast<float> (radius_bins_)) * log (search_radius_/min_radius_))));
radii_interval_[j] = static_cast<float> (std::exp (std::log (min_radius_) + ((static_cast<float> (j) / static_cast<float> (radius_bins_)) * std::log (search_radius_/min_radius_))));

// Fill theta didvisions of elevation
theta_divisions_.resize (elevation_bins_+1);
Expand Down
4 changes: 2 additions & 2 deletions gpu/people/include/pcl/gpu/people/tree_train.h
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ namespace pcl
for(int li=0;li<NUMLABELS;++li) {
if( h[li] != 0 ) {
double p = double(h[li]) / Ntotal;
entropy -= p*log(p);
entropy -= p*std::log(p);
}
}
return entropy;
Expand All @@ -115,7 +115,7 @@ namespace pcl
uint64_t Ni = uint64_t(htrue[li]) + uint64_t(hfalse[li]);
if( Ni != 0) {
double p = double(Ni) / Ntotal;
entropy -= p*log(p);
entropy -= p*std::log(p);
}
}
return entropy;
Expand Down
2 changes: 1 addition & 1 deletion io/src/dinast_grabber.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -330,7 +330,7 @@ pcl::DinastGrabber::getXYZIPointCloud ()
if (pixel > A)
pixel = A;
double dy = y*0.1;
double dist = (log (static_cast<double> (pixel / A)) / B - dy) * (7E-07*r3 - 0.0001*r2 + 0.004*r1 + 0.9985) * 1.5;
double dist = (std::log (static_cast<double> (pixel / A)) / B - dy) * (7E-07*r3 - 0.0001*r2 + 0.004*r1 + 0.9985) * 1.5;
double theta_colati = fov_ * r1 * dist_max_2d_;
double c_theta = std::cos (theta_colati);
double s_theta = sin (theta_colati);
Expand Down
10 changes: 5 additions & 5 deletions ml/src/svm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1989,7 +1989,7 @@ static void sigmoid_train (
// Initial Point and Initial Fun Value
A = 0.0;

B = log ( (prior0 + 1.0) / (prior1 + 1.0));
B = std::log ( (prior0 + 1.0) / (prior1 + 1.0));

double fval = 0.0;

Expand All @@ -2003,9 +2003,9 @@ static void sigmoid_train (
double fApB = dec_values[i] * A + B;

if (fApB >= 0)
fval += t[i] * fApB + log (1 + std::exp (-fApB));
fval += t[i] * fApB + std::log (1 + std::exp (-fApB));
else
fval += (t[i] - 1) * fApB + log (1 + std::exp (fApB));
fval += (t[i] - 1) * fApB + std::log (1 + std::exp (fApB));
}

int iter = 0;
Expand Down Expand Up @@ -2073,9 +2073,9 @@ static void sigmoid_train (
double fApB = dec_values[i] * newA + newB;

if (fApB >= 0)
newf += t[i] * fApB + log (1 + std::exp (-fApB));
newf += t[i] * fApB + std::log (1 + std::exp (-fApB));
else
newf += (t[i] - 1) * fApB + log (1 + std::exp (fApB));
newf += (t[i] - 1) * fApB + std::log (1 + std::exp (fApB));
}

// Check sufficient decrease
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -413,9 +413,9 @@ namespace pcl
std::vector<float> hr (num_of_branches + 1, 0.f);
for (size_t branch_index = 0; branch_index < (num_of_branches + 1); ++branch_index)
{
hr[branch_index] = static_cast<float>(0.5f * log (std::pow (2 * M_PI, 3)
hr[branch_index] = static_cast<float>(0.5f * std::log (std::pow (2 * M_PI, 3)
* offset_covariances[branch_index].determinant ())
+ 0.5f * log (std::pow (2 * M_PI, 3)
+ 0.5f * std::log (std::pow (2 * M_PI, 3)
* angle_covariances[branch_index].determinant ()));
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,7 @@ pcl::recognition::SimpleOctree<NodeData, NodeDataCreator, Scalar>::build (const

// Compute the number of tree levels
if ( arg > 1 )
tree_levels_ = static_cast<int> (ceil (log (arg)/log (2.0)) + 0.5);
tree_levels_ = static_cast<int> (ceil (std::log (arg)/std::log (2.0)) + 0.5);
else
tree_levels_ = 0;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -338,7 +338,7 @@ namespace pcl
if ( 1.0 - p <= 0.0 )
return 1;

return static_cast<int> (log (1.0-success_probability)/log (1.0-p) + 1.0);
return static_cast<int> (std::log (1.0-success_probability)/std::log (1.0-p) + 1.0);
}

inline void
Expand Down
2 changes: 1 addition & 1 deletion recognition/src/ransac_based/orr_octree.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ pcl::recognition::ORROctree::build (const float* bounds, float voxel_size)

// Compute the number of tree levels
if ( arg > 1.0f )
tree_levels_ = static_cast<int> (ceil (log (arg)/log (2.0)) + 0.5);
tree_levels_ = static_cast<int> (ceil (std::log (arg)/std::log (2.0)) + 0.5);
else
tree_levels_ = 0;

Expand Down
12 changes: 6 additions & 6 deletions registration/include/pcl/registration/impl/ndt.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -59,9 +59,9 @@ pcl::NormalDistributionsTransform<PointSource, PointTarget>::NormalDistributions
// Initializes the gaussian fitting parameters (eq. 6.8) [Magnusson 2009]
gauss_c1 = 10.0 * (1 - outlier_ratio_);
gauss_c2 = outlier_ratio_ / pow (resolution_, 3);
gauss_d3 = -log (gauss_c2);
gauss_d1_ = -log ( gauss_c1 + gauss_c2 ) - gauss_d3;
gauss_d2_ = -2 * log ((-log ( gauss_c1 * std::exp ( -0.5 ) + gauss_c2 ) - gauss_d3) / gauss_d1_);
gauss_d3 = -std::log (gauss_c2);
gauss_d1_ = -std::log ( gauss_c1 + gauss_c2 ) - gauss_d3;
gauss_d2_ = -2 * std::log ((-std::log ( gauss_c1 * std::exp ( -0.5 ) + gauss_c2 ) - gauss_d3) / gauss_d1_);

transformation_epsilon_ = 0.1;
max_iterations_ = 35;
Expand All @@ -79,9 +79,9 @@ pcl::NormalDistributionsTransform<PointSource, PointTarget>::computeTransformati
// Initializes the gaussian fitting parameters (eq. 6.8) [Magnusson 2009]
gauss_c1 = 10 * (1 - outlier_ratio_);
gauss_c2 = outlier_ratio_ / pow (resolution_, 3);
gauss_d3 = -log (gauss_c2);
gauss_d1_ = -log ( gauss_c1 + gauss_c2 ) - gauss_d3;
gauss_d2_ = -2 * log ((-log ( gauss_c1 * std::exp ( -0.5 ) + gauss_c2 ) - gauss_d3) / gauss_d1_);
gauss_d3 = -std::log (gauss_c2);
gauss_d1_ = -std::log ( gauss_c1 + gauss_c2 ) - gauss_d3;
gauss_d2_ = -2 * std::log ((-std::log ( gauss_c1 * std::exp ( -0.5 ) + gauss_c2 ) - gauss_d3) / gauss_d1_);

if (guess != Eigen::Matrix4f::Identity ())
{
Expand Down
8 changes: 4 additions & 4 deletions sample_consensus/include/pcl/sample_consensus/impl/mlesac.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -130,10 +130,10 @@ pcl::MaximumLikelihoodSampleConsensus<PointT>::computeModel (int debug_verbosity
gamma /= static_cast<double>(sac_model_->getIndices ()->size ());
}

// Find the log likelihood of the model -L = -sum [log (pInlierProb + pOutlierProb)]
// Find the std::log likelihood of the model -L = -sum [std::log (pInlierProb + pOutlierProb)]
double d_cur_penalty = 0;
for (size_t i = 0; i < indices_size; ++i)
d_cur_penalty += log (p_inlier_prob[i] + p_outlier_prob);
d_cur_penalty += std::log (p_inlier_prob[i] + p_outlier_prob);
d_cur_penalty = - d_cur_penalty;

// Better match ?
Expand All @@ -151,12 +151,12 @@ pcl::MaximumLikelihoodSampleConsensus<PointT>::computeModel (int debug_verbosity
if (distance <= 2 * sigma_)
n_inliers_count++;

// Compute the k parameter (k=log(z)/log(1-w^n))
// Compute the k parameter (k=std::log(z)/std::log(1-w^n))
double w = static_cast<double> (n_inliers_count) / static_cast<double> (sac_model_->getIndices ()->size ());
double p_no_outliers = 1 - pow (w, static_cast<double> (selection.size ()));
p_no_outliers = (std::max) (std::numeric_limits<double>::epsilon (), p_no_outliers); // Avoid division by -Inf
p_no_outliers = (std::min) (1 - std::numeric_limits<double>::epsilon (), p_no_outliers); // Avoid division by 0.
k = log (1 - probability_) / log (p_no_outliers);
k = std::log (1 - probability_) / std::log (p_no_outliers);
}

++iterations_;
Expand Down
4 changes: 2 additions & 2 deletions sample_consensus/include/pcl/sample_consensus/impl/msac.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -109,12 +109,12 @@ pcl::MEstimatorSampleConsensus<PointT>::computeModel (int debug_verbosity_level)
if (distance <= threshold_)
++n_inliers_count;

// Compute the k parameter (k=log(z)/log(1-w^n))
// Compute the k parameter (k=std::log(z)/std::log(1-w^n))
double w = static_cast<double> (n_inliers_count) / static_cast<double> (sac_model_->getIndices ()->size ());
double p_no_outliers = 1.0 - pow (w, static_cast<double> (selection.size ()));
p_no_outliers = (std::max) (std::numeric_limits<double>::epsilon (), p_no_outliers); // Avoid division by -Inf
p_no_outliers = (std::min) (1.0 - std::numeric_limits<double>::epsilon (), p_no_outliers); // Avoid division by 0.
k = log (1.0 - probability_) / log (p_no_outliers);
k = std::log (1.0 - probability_) / std::log (p_no_outliers);
}

++iterations_;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,7 @@ pcl::ProgressiveSampleConsensus<PointT>::computeModel (int debug_verbosity_level
else if (bottom_log == 1)
k_n_star = T_N;
else
k_n_star = static_cast<int> (ceil (log (0.05) / log (bottom_log)));
k_n_star = static_cast<int> (ceil (std::log (0.05) / std::log (bottom_log)));
// It seems weird to have very few iterations, so do have a few (totally empirical)
k_n_star = (std::max)(k_n_star, 2 * m);
}
Expand Down
Loading