From 37fa235785714fb26556d86ea95e0621567e02d8 Mon Sep 17 00:00:00 2001
From: Mark Jenkinson <mark@fmrib.ox.ac.uk>
Date: Fri, 13 Feb 2004 14:05:46 +0000
Subject: [PATCH] Added and fixed up conjugate gradient matrix solver

---
 miscmaths.cc | 38 ++++++++++++++++++++++++++++++++++----
 miscmaths.h  | 11 +++++++++--
 2 files changed, 43 insertions(+), 6 deletions(-)

diff --git a/miscmaths.cc b/miscmaths.cc
index c047a33..df4ac77 100644
--- a/miscmaths.cc
+++ b/miscmaths.cc
@@ -10,6 +10,7 @@
 
 #include "miscmaths.h"
 #include "stdlib.h"
+#include "newmatio.h"
 
 using namespace std;
 
@@ -1647,28 +1648,49 @@ ReturnMatrix read_vest(string p_fname)
   return p_mat;
 }
 
-int conjgrad(ColumnVector x, const Matrix& A, const ColumnVector& b, int maxit)
+int conjgrad(ColumnVector& x, const Matrix& A, const ColumnVector& b, int maxit,
+	     float reltol)
 {
   // solves:  A * x = b    (for x)
   // implementation of algorithm in Golub and Van Loan (3rd ed, page 527)
   ColumnVector rk1, rk2, pk, apk;
-  double betak, alphak, rk1rk1=0, rk2rk2;
+  double betak, alphak, rk1rk1=0, rk2rk2, r00=0;
   int k=0;
   rk1 = b - A*x;  // a *big* calculation
   for (int n=1; n<=maxit; n++) {
-    //if (sufficiently accurate) break;
     k++;
     if (k==1) {
       pk = rk1;
       rk1rk1 = (rk1.t() * rk1).AsScalar();
+      r00=rk1rk1;
     } else {
       rk2rk2 = rk1rk1;  // from before
       rk1rk1 = (rk1.t() * rk1).AsScalar();
+      if (rk2rk2<1e-10*rk1rk1) {
+	cerr << "WARNING:: Conj Grad - low demoninator (rk2rk2)" << endl; 
+	if (rk2rk2<=0) {
+	  cerr << "Aborting conj grad ..." << endl;
+	  return 1;  
+	}
+      }
       betak = rk1rk1 / rk2rk2;
       pk = rk1 + betak * pk;  // note RHS pk is p(k-1) in algorithm
     }  
+    // stop if sufficient accuracy is achieved
+    if (rk1rk1<reltol*reltol*r00) return 0;
+
     apk = A * pk;  // the *big* calculation in this algorithm
-    alphak = rk1rk1 / (pk*apk).AsScalar();
+
+    ColumnVector pap = pk.t() * apk;
+    if (pap.AsScalar()<0) { 
+      cerr << "ERROR:: Conj Grad - negative eigenvector found (matrix must be symmetric and positive-definite)\nAborting ... " << endl; 
+      return 2;
+    } else if (pap.AsScalar()<1e-10) { 
+      cerr << "WARNING:: Conj Grad - nearly null eigenvector found (terminating early)" << endl; 
+      return 1;
+    } else {
+      alphak = rk1rk1 / pap.AsScalar();
+    }
     x = x + alphak * pk;  // note LHS is x(k) and RHS is x(k-1) in algorithm
     rk2 = rk1;  // update prior to the next step
     rk1 = rk1 - alphak * apk;  // note LHS is r(k) in algorithm
@@ -1676,6 +1698,14 @@ int conjgrad(ColumnVector x, const Matrix& A, const ColumnVector& b, int maxit)
   return 0;
 }
 
+int conjgrad(ColumnVector& x, const Matrix& A, const ColumnVector& b, int maxit)
+{ 
+  return conjgrad(x,A,b,maxit,1e-10);
+}
+
+
+
+
 vector<float> ColumnVector2vector(const ColumnVector& col)
 {
   vector<float> vec(col.Nrows());
diff --git a/miscmaths.h b/miscmaths.h
index 64af9ed..557ce9d 100644
--- a/miscmaths.h
+++ b/miscmaths.h
@@ -195,8 +195,15 @@ namespace MISCMATHS {
   void symm_orth(Matrix &Mat);
   void powerspectrum(const Matrix &Mat1, Matrix &Result, bool useLog);
 
-  int conjgrad(ColumnVector x, const Matrix& A, const ColumnVector& b, 
-	       int maxit=3);   // solves (for x)   A * x = b 
+  // Conjugate Gradient methods to solve for x in:   A * x = b 
+  // A must be symmetric and positive definite
+  int conjgrad(ColumnVector& x, const Matrix& A, const ColumnVector& b, 
+	       int maxit=3);
+  // allow specification of reltol = relative tolerance of residual error
+  //  (stops when error < reltol * initial error)
+  int conjgrad(ColumnVector& x, const Matrix& A, const ColumnVector& b, 
+	       int maxit, float reltol);
+  
 
   vector<float> ColumnVector2vector(const ColumnVector& col);
   
-- 
GitLab