Commit d564f1ae authored by inhuszar's avatar inhuszar
Browse files

Made small optimisations and changed the log.

parent 647fe406
......@@ -238,7 +238,7 @@ class GNOptimiserDiffusion(Optimiser):
start_time = time()
ndim = self.fixed.domain.ndim
numel = self.fixed.domain.numel
self.f0 = self._get_total_cost()
self.f0 = [self._get_total_cost()]
format = "csr"
# Create regularisation
......@@ -250,7 +250,9 @@ class GNOptimiserDiffusion(Optimiser):
pc = self.transformation.domain.get_physical_coordinates()
chain = self.transformation.domain.offset + \
self.transformation.domain.chain
invchain = chain.inverse()
invchain = None
if self.transformation.mode == NL_ABS:
invchain = chain.inverse()
target = self.costs[0].target
edgemask = np.zeros(target.vshape)
......@@ -263,23 +265,25 @@ class GNOptimiserDiffusion(Optimiser):
while True:
niter += 1
source = self.costs[0].source # evaluate only once!
# Obtain cost differential
S = self.costs[0].source - target
S = source - target
S.order = VOXEL_MAJOR
S = S.data.reshape(numel, -1, 1)
# Apply mask on the dissimilarity vector (S)
combined_mask = self.costs[0].combine_masks(
target_mask=target.mask,
source_mask=self.costs[0].source.mask
source_mask=source.mask
)
S = S * combined_mask.ravel()[:, np.newaxis, np.newaxis]
# If the transformation parameters are vectors in physical space,
# then so should be the cost differentials. I.e. if the tx is
# absolute, map the derivatives.
dS = self.costs[0].dx().reshape(-1, ndim) # rel
dS = self.costs[0].dx(source=source).reshape(-1, ndim) # rel
dS = dS.reshape((numel, -1, ndim))
dS[edge, ...] = 0 # Neumann
# S[edge, ...] = 0 # Dirichlet
# Note: dx() already applied the mask!
# dS = dS * combined_mask.ravel()[:, np.newaxis, np.newaxis]
SdS = np.mean(S * dS, axis=1)
......@@ -305,7 +309,9 @@ class GNOptimiserDiffusion(Optimiser):
# Solve linear system
updates = sp.linalg.minres(A, b)[0] # rel or abs
self.log(str((np.min(updates), np.max(updates))))
self.log(str((
np.percentile(updates, 5), np.percentile(updates, 95))))
# self.log(str((np.min(updates), np.max(updates))))
# Update parameters and evaluate solution and convergence
if self.transformation.mode == NL_ABS:
......@@ -313,16 +319,13 @@ class GNOptimiserDiffusion(Optimiser):
updates = chain.map_vector(vects=updates, coords=vc)
updates = updates.T.ravel()
self.transformation.parameters.parameters[:] += updates
f = self._get_total_cost()
if self._converged(niter, start_time, updates, f):
if self._converged(niter, start_time, updates):
break
else:
self.f0 = f
# As the transformation parameters are update in-place, there is no
# need to return them from this function.
def _converged(self, niter, start_time, dp, f):
def _converged(self, niter, start_time, dp):
# Convergence by reaching maximum iteration count
if (self.maxiter > 0) and (niter >= self.maxiter):
......@@ -339,17 +342,27 @@ class GNOptimiserDiffusion(Optimiser):
if (self.xtol_abs > 0) and (dp < self.xtol_abs):
return XTOL_ABS_REACHED
if (self.costval is None) and (self.ftol_rel is None) and \
(self.ftol_abs is None):
return False
f = self._get_total_cost()
# Convergence by reaching a specific cost value
if self.costval and (f <= self.costval):
return COSTVAL_REACHED
# Convergence by diminishing relative cost update
if (self.ftol_rel > 0) and \
(np.abs(f - self.f0) < np.abs(self.ftol_rel * self.f0)):
(np.abs(f - self.f0[0]) < np.abs(self.ftol_rel * self.f0[0])):
return FTOL_REL_REACHED
# Convergence by diminishing absolute cost update
if (self.ftol_abs > 0) and (f - self.f0 < self.ftol_abs):
if (self.ftol_abs > 0) and (f - self.f0[0] < self.ftol_abs):
return FTOL_ABS_REACHED
# No convergence yet -> update the last cost value
self.f0[0] = f
return False
def _get_total_cost(self):
"""
Calculates the total scalar cost, which is the sum of all image
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment