From d4686fb3f1fea286cefbe0e746d7af0eb35f24e1 Mon Sep 17 00:00:00 2001
From: Jesper Andersson <jesper@fmrib.ox.ac.uk>
Date: Wed, 8 Apr 2009 14:19:50 +0000
Subject: [PATCH] Explicitly prefixed all sqrt with std::

---
 nonlin.cpp | 26 +++++++++++++++++---------
 1 file changed, 17 insertions(+), 9 deletions(-)

diff --git a/nonlin.cpp b/nonlin.cpp
index e13e58b..ed5be3a 100644
--- a/nonlin.cpp
+++ b/nonlin.cpp
@@ -1,5 +1,6 @@
 // Definitions for module nonlin
 
+#include <ctime>
 #include <iostream>
 #include <fstream>
 #include <iomanip>
@@ -263,6 +264,7 @@ ColumnVector operator*(const VarmetMatrix& m, const ColumnVector& v)
   }
 }
 
+
 // Gateway function to routines for non-linear optimisation
 
 NonlinOut nonlin(const NonlinParam& p, const NonlinCF& cfo)
@@ -480,8 +482,9 @@ NonlinOut sccngr(const NonlinParam& np, const NonlinCF& cfo)
 
   while (np.NextIter()) {
     double p2 = DotProduct(p,p);                            // p'*p, Temporary variable to save some time
-    if (success == true) {                                  // If last step led to reduction of cos-function
-      double sigma_k = sigma/sqrt(p2);                      // Normalised step-length when estimating H*p
+    if (success == true) {                                  // If last step led to reduction of cost-function
+      double sigma_k = sigma/std::sqrt(p2);                      // Normalised step-length when estimating H*p
+      // cout << "np.NIter() = " << np.NIter() << ", p2 = " << p2 << ", sigma_k = " << sigma_k << endl;
       s = (cfo.grad(np.Par()+sigma_k*p) + r) / sigma_k;     // Approximation to H*p
       delta = DotProduct(p,s);                              // Approximation to p'*H*p
     }
@@ -497,12 +500,16 @@ NonlinOut sccngr(const NonlinParam& np, const NonlinCF& cfo)
     double alpha = mu/delta;                              // Step size in direction p
     double tmp_cf = cfo.cf(np.Par()+alpha*p);             // Value of cost-function at attempted new point
 
+    // cout << "np.NIter() " << np.NIter() << ", delta = " << delta << ", mu = " << mu << ", alpha = " << alpha << endl;
+
+    /*
     char fname[100]; 
-    sprintf(fname,"/Users/jesper/Desktop/gradient_%02d.txt",np.NIter());
+    sprintf(fname,"scg_debug_gradient_%02d.txt",np.NIter());
     print_newmat(r,fname);
-    sprintf(fname,"/Users/jesper/Desktop/step_%02d.txt",np.NIter());
+    sprintf(fname,"scg_debug_step_%02d.txt",np.NIter());
     ColumnVector  step(p); step *= alpha;
     print_newmat(step,fname);
+    */
     
     
     double Delta = 2.0*delta*(np.CF()-tmp_cf) / (mu*mu);  // > 0 means attempted step reduced cost-function
@@ -511,7 +518,7 @@ NonlinOut sccngr(const NonlinParam& np, const NonlinCF& cfo)
       np.SetPar(np.Par() + alpha*p);                      // Update best set of parameters
       lambda_bar = 0.0;
       success = true;
-      if ((np.NIter()%np.NPar()) == 0) {                   // If npar iterations since last resetting of directions
+      if ((np.NIter()%np.NPar()) == 0) {                  // If npar iterations since last resetting of directions
         r = -cfo.grad(np.Par());                          // Reset search direction to negative gradient
         p = r;
       }
@@ -519,6 +526,7 @@ NonlinOut sccngr(const NonlinParam& np, const NonlinCF& cfo)
         ColumnVector oldr = r;
         r = -cfo.grad(np.Par());
         double beta = (DotProduct(r,r)-DotProduct(oldr,r)) / mu;
+        // cout << "np.NIter() = " << np.NIter() << ", beta = " << beta << endl;
         p = r + beta*p;                              // New search direction
       } 
       if (Delta > 0.75) {                            // If attempted step was \emph{REALLY} good
@@ -633,7 +641,7 @@ LinOut linsrch(// Input
   // First make sure that the step-length suggested
   // by pdir isn't completely unreasonable.
 
-  double totstep=sqrt(DotProduct(dir,dir));
+  double totstep=std::sqrt(DotProduct(dir,dir));
   ColumnVector pdir(dir);
   if (totstep > sm) {pdir *= sm/totstep;}
 
@@ -690,7 +698,7 @@ LinOut linsrch(// Input
     y << f1-fp0*l1-f0 << f2-fp0*l2-f0;
     ColumnVector b = X.i()*y;
     // Find value for lambda that yield minimum of cubic
-    *lambda = (-b.element(1) + sqrt(std::pow(b.element(1),2.0) - 3.0*b.element(0)*fp0)) / (3.0*b.element(0));
+    *lambda = (-b.element(1) + std::sqrt(std::pow(b.element(1),2.0) - 3.0*b.element(0)*fp0)) / (3.0*b.element(0));
     // Make sure new lambda is 0.1*old_l < lambda < 0.5*old_l
     *lambda = std::max(lmin*l1,*lambda);
     *lambda = std::min(lmax*l1,*lambda);
@@ -786,7 +794,7 @@ LinOut linmin(// Input
       return(LM_CONV);
     }
     // Try parabolic fit, but not before third iteration
-    double tmp = 10.0*sqrt(MISCMATHS::EPS);
+    double tmp = 10.0*std::sqrt(MISCMATHS::EPS);
     if (std::abs(ostep) > tol/2.0 &&           // If second to last step big enough
         std::abs(x->first-w.first) > tmp && 
         std::abs(x->first-v.first) > tmp && 
@@ -903,7 +911,7 @@ pair<double,double> bracket(// Input
       return(p_l);
     }
     // Let's see if a parabolic might help us
-    if (std::abs(l2-l1) > 10.0*sqrt(MISCMATHS::EPS)) {
+    if (std::abs(l2-l1) > 10.0*std::sqrt(MISCMATHS::EPS)) {
       X << std::pow(l1,2.0) << l1 << std::pow(l2,2.0) << l2;
       y << cf1 << cf2;
       ColumnVector b = X.i()*y;
-- 
GitLab