Skip to content
Snippets Groups Projects
Commit d3274330 authored by Jesper Andersson's avatar Jesper Andersson
Browse files

Added new functionality to SpMat class + Improved the Levenberg-Marquardt code in nonlin

parent eebc1c58
No related branches found
No related tags found
No related merge requests found
......@@ -90,6 +90,7 @@ public:
SpMat(unsigned int m, unsigned int n) : _m(m), _n(n), _nz(0), _ri(n), _val(n) {}
SpMat(unsigned int m, unsigned int n, const unsigned int *irp, const unsigned int *jcp, const double *sp);
SpMat(const NEWMAT::GeneralMatrix& M);
~SpMat() {}
unsigned int Nrows() const {return(_m);}
unsigned int Ncols() const {return(_n);}
......
......@@ -290,6 +290,58 @@ NonlinOut nonlin(const NonlinParam& p, const NonlinCF& cfo)
// Main routine for Levenberg-Marquardt optimisation
NonlinOut levmar(const NonlinParam& p, const NonlinCF& cfo)
{
// Calculate initial values
p.SetCF(cfo.cf(p.Par())); // Cost-function evaluated at current parameters
bool success = true; // True if last step decreased CF
double olambda = 0.0; // How much the diagonal of H was nudged last time
ColumnVector g; // Gradient
boost::shared_ptr<BFMatrix> H; // Hessian
while (p.NextIter(success)) {
if (success) { // If last attempt decreased cost-function
g = cfo.grad(p.Par()); // Gradient evaluated at current parameters
H = cfo.hess(p.Par(),H); // Hessian evaluated at current parameters
}
for (int i=1; i<=p.NPar(); i++) { // Nudge it
if (p.GaussNewtonType() == LM_LM) { // If Levenberg-Marquardt
H->AddTo(i,i,(p.Lambda()-olambda)*H->Peek(i,i));
}
else if (p.GaussNewtonType() == LM_L) { // If Levenberg
H->AddTo(i,i,p.Lambda()-olambda);
}
}
ColumnVector step = -H->SolveForx(g,SYM_POSDEF,p.EquationSolverTol(),p.EquationSolverMaxIter());
double ncf = cfo.cf(p.Par()+step);
if (success = (ncf < p.CF())) { // If last step successful
olambda = 0.0; // Pristine Hessian, so no need to undo old lambda
p.SetPar(p.Par()+step); // Set attempt as new parameters
p.SetLambda(p.Lambda()/10.0); // Decrease nudge factor
// Check for convergence based on small decrease of cf
if (zero_cf_diff_conv(p.CF(),ncf,p.FractionalCFTolerance())) {
p.SetCF(ncf); p.SetStatus(NL_CFCONV); return(p.Status());
}
p.SetCF(ncf); // Store value of cost-function
}
else { // If last step was unsuccesful
olambda = p.Lambda(); // Returning to same H, so must undo old lambda
p.SetLambda(10.0*p.Lambda()); // Increase nudge factor
p.SetCF(p.CF()); // Push another copy of best cost function value thus far
// Check for convergence based on _really_ large lambda
if (p.Lambda() > p.LambdaConvergenceCriterion()) {
p.SetStatus(NL_LCONV); return(p.Status());
}
}
}
// Getting here means we did too many iterations
p.SetStatus(NL_MAXITER);
return(p.Status());
}
/*
// Main routine for Levenberg-Marquardt optimisation
NonlinOut levmar(const NonlinParam& p, const NonlinCF& cfo)
{
// Calculate initial values
......@@ -338,7 +390,7 @@ NonlinOut levmar(const NonlinParam& p, const NonlinCF& cfo)
return(p.Status());
}
*/
// Main routine for conjugate-gradient optimisation. The
// implementation follows that of Numerical Recipies
// reasonably closely.
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment