Commit 9622caec authored by Christoph Arthofer's avatar Christoph Arthofer
Browse files

module issue

parent 88648255
This diff is collapsed.
def writeConfig(step,mod,fpath):
"""! Writes the nonlinear registration parameters for a given resolution level and modalities to a file readable by MMORF.
@param step: Resolution level provided as integer
@param mod: Modalities provided as a dictionary
@param fpath: Output filepath
"""
T1_head = True if mod['T1_head_key'] is not None else False
T2_head = True if mod['T2_head_key'] is not None else False
DTI = True if mod['DTI_tensor_key'] is not None else False
# This will be defined in a separate file in the future and,
# I know, this could be implemented more efficiently but for the sake of easy readability:
if step == 1:
s = 'warp_res_init = 32 \n' \
'warp_scaling = 1 1 \n' \
'lambda_reg = 4.0e5 3.7e-1 \n' \
'hires = 3.9 \n' \
'optimiser_max_it_lowres = 5 \n' \
'optimiser_max_it_hires = 5 \n'
if T1_head:
s += '\n' \
'; Whole-head T1 \n' \
'use_implicit_mask = 0 \n' \
'use_mask_ref_scalar = 1 1 \n' \
'use_mask_mov_scalar = 0 0 \n' \
'fwhm_ref_scalar = 8.0 8.0 \n' \
'fwhm_mov_scalar = 8.0 8.0 \n' \
'lambda_scalar = 1 1 \n' \
'estimate_bias = 1 \n' \
'bias_res_init = 32 \n' \
'lambda_bias_reg = 1e9 1e9 \n'
if T2_head:
s += '\n' \
'; Whole-head T2 \n' \
'use_implicit_mask = 0 \n' \
'use_mask_ref_scalar = 0 0 \n' \
'use_mask_mov_scalar = 0 0 \n' \
'fwhm_ref_scalar = 8.0 8.0 \n' \
'fwhm_mov_scalar = 8.0 8.0 \n' \
'lambda_scalar = 1 1 \n' \
'estimate_bias = 1 \n' \
'bias_res_init = 32 \n' \
'lambda_bias_reg = 1e9 1e9 \n'
if DTI:
s += '\n' \
'; DTI \n' \
'use_mask_ref_tensor = 0 0 \n' \
'use_mask_mov_tensor = 0 0 \n' \
'fwhm_ref_tensor = 8.0 8.0 \n' \
'fwhm_mov_tensor = 8.0 8.0 \n' \
'lambda_tensor = 1 1 \n'
elif step == 2:
s = 'warp_res_init = 32 \n' \
'warp_scaling = 1 1 2 \n' \
'lambda_reg = 4.0e5 3.7e-1 3.1e-1 \n' \
'hires = 3.9 \n' \
'optimiser_max_it_lowres = 5 \n' \
'optimiser_max_it_hires = 5 \n'
if T1_head:
s += '\n' \
'; Whole-head T1 \n' \
'use_implicit_mask = 0 \n' \
'use_mask_ref_scalar = 1 1 1 \n' \
'use_mask_mov_scalar = 0 0 0 \n' \
'fwhm_ref_scalar = 8.0 8.0 4.0 \n' \
'fwhm_mov_scalar = 8.0 8.0 4.0 \n' \
'lambda_scalar = 1 1 1 \n' \
'estimate_bias = 1 \n' \
'bias_res_init = 32 \n' \
'lambda_bias_reg = 1e9 1e9 1e9 \n'
if T2_head:
s += '\n' \
'; Whole-head T2 \n' \
'use_implicit_mask = 0 \n' \
'use_mask_ref_scalar = 0 0 0 \n' \
'use_mask_mov_scalar = 0 0 0 \n' \
'fwhm_ref_scalar = 8.0 8.0 4.0 \n' \
'fwhm_mov_scalar = 8.0 8.0 4.0 \n' \
'lambda_scalar = 1 1 1 \n' \
'estimate_bias = 1 \n' \
'bias_res_init = 32 \n' \
'lambda_bias_reg = 1e9 1e9 1e9 \n'
if DTI:
s += '\n' \
'; DTI \n' \
'use_mask_ref_tensor = 0 0 0 \n' \
'use_mask_mov_tensor = 0 0 0 \n' \
'fwhm_ref_tensor = 8.0 8.0 4.0 \n' \
'fwhm_mov_tensor = 8.0 8.0 4.0 \n' \
'lambda_tensor = 1 1 1 \n'
elif step == 3:
s = 'warp_res_init = 32 \n' \
'warp_scaling = 1 1 2 2 \n' \
'lambda_reg = 4.0e5 3.7e-1 3.1e-1 2.6e-1 \n' \
'hires = 3.9 \n' \
'optimiser_max_it_lowres = 5 \n' \
'optimiser_max_it_hires = 5 \n'
if T1_head:
s += '\n' \
'; Whole-head T1 \n' \
'use_implicit_mask = 0 \n' \
'use_mask_ref_scalar = 1 1 1 1 \n' \
'use_mask_mov_scalar = 0 0 0 0 \n' \
'fwhm_ref_scalar = 8.0 8.0 4.0 2.0 \n' \
'fwhm_mov_scalar = 8.0 8.0 4.0 2.0 \n' \
'lambda_scalar = 1 1 1 1 \n' \
'estimate_bias = 1 \n' \
'bias_res_init = 32 \n' \
'lambda_bias_reg = 1e9 1e9 1e9 1e9 \n'
if T2_head:
s += '\n' \
'; Whole-head T2 \n' \
'use_implicit_mask = 0 \n' \
'use_mask_ref_scalar = 0 0 0 0 \n' \
'use_mask_mov_scalar = 0 0 0 0 \n' \
'fwhm_ref_scalar = 8.0 8.0 4.0 2.0 \n' \
'fwhm_mov_scalar = 8.0 8.0 4.0 2.0 \n' \
'lambda_scalar = 1 1 1 1 \n' \
'estimate_bias = 1 \n' \
'bias_res_init = 32 \n' \
'lambda_bias_reg = 1e9 1e9 1e9 1e9 \n'
if DTI:
s += '\n' \
'; DTI \n' \
'use_mask_ref_tensor = 0 0 0 0 \n' \
'use_mask_mov_tensor = 0 0 0 0 \n' \
'fwhm_ref_tensor = 8.0 8.0 4.0 2.0 \n' \
'fwhm_mov_tensor = 8.0 8.0 4.0 2.0 \n' \
'lambda_tensor = 1 1 1 1 \n'
elif step == 4:
s = 'warp_res_init = 32 \n' \
'warp_scaling = 1 1 2 2 2 \n' \
'lambda_reg = 4.0e5 3.7e-1 3.1e-1 2.6e-1 2.2e-1 \n' \
'hires = 3.9 \n' \
'optimiser_max_it_lowres = 5 \n' \
'optimiser_max_it_hires = 5 \n'
if T1_head:
s += '\n' \
'; Whole-head T1 \n' \
'use_implicit_mask = 0 \n' \
'use_mask_ref_scalar = 1 1 1 1 1 \n' \
'use_mask_mov_scalar = 0 0 0 0 0 \n' \
'fwhm_ref_scalar = 8.0 8.0 4.0 2.0 1.0 \n' \
'fwhm_mov_scalar = 8.0 8.0 4.0 2.0 1.0 \n' \
'lambda_scalar = 1 1 1 1 1 \n' \
'estimate_bias = 1 \n' \
'bias_res_init = 32 \n' \
'lambda_bias_reg = 1e9 1e9 1e9 1e9 1e9 \n'
if T2_head:
s += '\n' \
'; Whole-head T2 \n' \
'use_implicit_mask = 0 \n' \
'use_mask_ref_scalar = 0 0 0 0 0 \n' \
'use_mask_mov_scalar = 0 0 0 0 0 \n' \
'fwhm_ref_scalar = 8.0 8.0 4.0 2.0 1.0 \n' \
'fwhm_mov_scalar = 8.0 8.0 4.0 2.0 1.0 \n' \
'lambda_scalar = 1 1 1 1 1 \n' \
'estimate_bias = 1 \n' \
'bias_res_init = 32 \n' \
'lambda_bias_reg = 1e9 1e9 1e9 1e9 1e9 \n'
if DTI:
s += '\n'\
'; DTI \n' \
'use_mask_ref_tensor = 0 0 0 0 0 \n' \
'use_mask_mov_tensor = 0 0 0 0 0 \n' \
'fwhm_ref_tensor = 8.0 8.0 4.0 2.0 1.0 \n' \
'fwhm_mov_tensor = 8.0 8.0 4.0 2.0 1.0 \n' \
'lambda_tensor = 1 1 1 1 1 \n'
elif step == 5:
s = 'warp_res_init = 32 \n' \
'warp_scaling = 1 1 2 2 2 2 \n' \
'lambda_reg = 4.0e5 3.7e-1 3.1e-1 2.6e-1 2.2e-1 1.8e-1 \n' \
'hires = 3.9 \n' \
'optimiser_max_it_lowres = 5 \n' \
'optimiser_max_it_hires = 5 \n'
if T1_head:
s += '\n' \
'; Whole-head T1 \n' \
'use_implicit_mask = 0 \n' \
'use_mask_ref_scalar = 1 1 1 1 1 1 \n' \
'use_mask_mov_scalar = 0 0 0 0 0 0 \n' \
'fwhm_ref_scalar = 8.0 8.0 4.0 2.0 1.0 0.5 \n' \
'fwhm_mov_scalar = 8.0 8.0 4.0 2.0 1.0 0.5 \n' \
'lambda_scalar = 1 1 1 1 1 1 \n' \
'estimate_bias = 1 \n' \
'bias_res_init = 32 \n' \
'lambda_bias_reg = 1e9 1e9 1e9 1e9 1e9 1e9 \n'
if T2_head:
s += '\n' \
'; Whole-head T2 \n' \
'use_implicit_mask = 0 \n' \
'use_mask_ref_scalar = 0 0 0 0 0 0 \n' \
'use_mask_mov_scalar = 0 0 0 0 0 0 \n' \
'fwhm_ref_scalar = 8.0 8.0 4.0 2.0 1.0 0.5 \n' \
'fwhm_mov_scalar = 8.0 8.0 4.0 2.0 1.0 0.5 \n' \
'lambda_scalar = 1 1 1 1 1 1 \n' \
'estimate_bias = 1 \n' \
'bias_res_init = 32 \n' \
'lambda_bias_reg = 1e9 1e9 1e9 1e9 1e9 1e9 \n'
if DTI:
s += '\n' \
'; DTI \n' \
'use_mask_ref_tensor = 0 0 0 0 0 0 \n' \
'use_mask_mov_tensor = 0 0 0 0 0 0 \n' \
'fwhm_ref_tensor = 8.0 8.0 4.0 2.0 1.0 0.5 \n' \
'fwhm_mov_tensor = 8.0 8.0 4.0 2.0 1.0 0.5 \n' \
'lambda_tensor = 1 1 1 1 1 1 \n'
elif step == 6:
s = 'warp_res_init = 32 \n' \
'warp_scaling = 1 1 2 2 2 2 2 \n' \
'lambda_reg = 4.0e5 3.7e-1 3.1e-1 2.6e-1 2.2e-1 1.8e-1 1.5e-1 \n' \
'hires = 3.9 \n' \
'optimiser_max_it_lowres = 5 \n' \
'optimiser_max_it_hires = 5 \n'
if T1_head:
s += '\n' \
'; Whole-head T1 \n' \
'use_implicit_mask = 0 \n' \
'use_mask_ref_scalar = 1 1 1 1 1 1 1 \n' \
'use_mask_mov_scalar = 0 0 0 0 0 0 0 \n' \
'fwhm_ref_scalar = 8.0 8.0 4.0 2.0 1.0 0.5 0.25 \n' \
'fwhm_mov_scalar = 8.0 8.0 4.0 2.0 1.0 0.5 0.25 \n' \
'lambda_scalar = 1 1 1 1 1 1 1 \n' \
'estimate_bias = 1 \n' \
'bias_res_init = 32 \n' \
'lambda_bias_reg = 1e9 1e9 1e9 1e9 1e9 1e9 1e9 \n'
if T2_head:
s += '\n' \
'; Whole-head T2 \n' \
'use_implicit_mask = 0 \n' \
'use_mask_ref_scalar = 0 0 0 0 0 0 0 \n' \
'use_mask_mov_scalar = 0 0 0 0 0 0 0 \n' \
'fwhm_ref_scalar = 8.0 8.0 4.0 2.0 1.0 0.5 0.25 \n' \
'fwhm_mov_scalar = 8.0 8.0 4.0 2.0 1.0 0.5 0.25 \n' \
'lambda_scalar = 1 1 1 1 1 1 1 \n' \
'estimate_bias = 1 \n' \
'bias_res_init = 32 \n' \
'lambda_bias_reg = 1e9 1e9 1e9 1e9 1e9 1e9 1e9 \n'
if DTI:
s += '\n' \
'; DTI \n' \
'use_mask_ref_tensor = 0 0 0 0 0 0 0 \n' \
'use_mask_mov_tensor = 0 0 0 0 0 0 0 \n' \
'fwhm_ref_tensor = 8.0 8.0 4.0 2.0 1.0 0.5 0.25 \n' \
'fwhm_mov_tensor = 8.0 8.0 4.0 2.0 1.0 0.5 0.25 \n' \
'lambda_tensor = 1 1 1 1 1 1 1 \n'
f = open(fpath, 'w+')
f.write(s)
f.close()
\ No newline at end of file
import os
import shutil
import pandas as pd
import nibabel as nib
import numpy as np
import shlex
import subprocess
import sys
from fsl.wrappers import fslmaths,flirt,applyxfm,concatxfm,bet,fast,fslstats
from fsl.wrappers.fnirt import invwarp, applywarp, convertwarp
from file_tree import FileTree
# from fsl.utils.filetree import FileTree
from fsl.utils.fslsub import func_to_cmd
from operator import itemgetter
import tempfile
def correctBiasMidtransWrapper(aff_matrix_paths, temp_dir, ref_path, unbiasing_invmtx_path, unbiased_matrix_paths):
"""! Writes the nonlinear registration parameters for a given resolution level and modalities to a file readable by MMORF.
@param aff_matrix_paths: List of filepaths to affine transformations
@param temp_dir: Output directory
@param ref_path: Path to reference template
@param unbiasing_invmtx_path: Path to unbiasing matrix
@param unbiased_matrix_paths: List of filepaths to unbiased transformations
"""
separate_path = os.path.join(temp_dir, 'T1_to_unbiased')
command = 'midtrans -v --separate=' + separate_path + ' --template=' + ref_path + ' -o ' + unbiasing_invmtx_path + ' '
count = 0
for omat_path in aff_matrix_paths:
if os.path.exists(omat_path):
count += 1
print(count, ' ', omat_path)
command += omat_path + ' '
stream = os.popen(command)
output = stream.read()
print(output)
# Renaming matrices
for i, sub_unbiasing_mat in enumerate(unbiased_matrix_paths):
sub_unbiasing_mat_temp = os.path.join(temp_dir, 'T1_to_unbiased%04d.mat' % (i + 1))
os.rename(sub_unbiasing_mat_temp, sub_unbiasing_mat)
print(i, ' ', sub_unbiasing_mat_temp, ' renamed to ', sub_unbiasing_mat)
print('T1 unbiasing matrices constructed!')
def soft_clamp(x, k):
"""! Piecewise function for soft intensity clamping of T1 images. Takes a single parameter k which defines the transition to the clamping part of the function.
f(x) = 0 | x <= 0
f(x) = x | 0 < x <= k
f(x) = 3k/4 + k/(2(1 + exp(-8(x - k)/k))) | x > k
@param x: Image as numpy array
@param k: Defines the transition to the clamping part of the function
Date: 08/02/2021
Author: Frederik J Lange
Copyright: FMRIB 2021
"""
return np.piecewise(x,
[x <= 0, (0 < x) & (x <= k), x > k],
[lambda x: 0, lambda x: x, lambda x: k / (2 * (1 + np.exp(-8 * (x - k) / k))) + 0.75 * k])
def clampImage(img_path, out_path):
"""! Performs preprocessing steps and clamping on an image.
@param img_path: Path to input image
@param out_path: Path to clamped output image
"""
out_dir = os.path.split(out_path)[0]
mask_path = os.path.splitext(os.path.splitext(os.path.basename(out_path))[0])[0] + '_brain.nii.gz'
mask_path = os.path.join(out_dir, mask_path)
bet(img_path, mask_path, robust=True)
with tempfile.TemporaryDirectory(dir=out_dir) as tmpdirname:
fast(mask_path, tmpdirname + '/fast', iter=0, N=True, g=True, v=False)
wm_intensity_mean = fslstats(mask_path).k(tmpdirname + '/fast_seg_2').M.run()
print('White matter mean intensity is: ', wm_intensity_mean)
img_nib = nib.load(img_path)
img_clamped_np = soft_clamp(img_nib.get_fdata(), wm_intensity_mean)
img_clamped_nib = nib.Nifti1Image(img_clamped_np, affine=img_nib.affine, header=img_nib.header)
img_clamped_nib.to_filename(out_path)
def averageImages(img_paths, out_path, norm_bool=False):
"""! Creates an average image from individual (non)normalised images.
@param img_paths: List of filepaths
@param out_path: Path to average output image
@param norm_bool: Normalise intensities of each image before averaging true or false
"""
n_exist = 0
n_imgs = len(img_paths)
for i, img_path in enumerate(img_paths):
if os.path.exists(img_path):
n_exist += 1
print(i, ' ', img_path)
img_nib = nib.load(img_path)
if norm_bool:
img_nib = fslmaths(img_nib).inm(1000).run()
if i == 0:
sum_img = img_nib
else:
sum_img = fslmaths(sum_img).add(img_nib).run()
else:
print(i, ' ', img_path, ' does not exist!')
if n_exist > 0:
mean_img = fslmaths(sum_img).div(n_exist).run()
mean_img.to_filename(out_path)
assert n_exist == n_imgs, "Not all images available!"
def applyWarpWrapper(img_path, ref_path, warped_path, warp_path, interp='spline', norm_bool=False):
"""! Wrapper for FSL applywarp - applies a warp (deformation field) to an image.
@param img_path: Path to input image
@param ref_path: Path to reference image
@param warped_path: Path to warped output image
@param warp_path: Path to warp (deformation field)
@param interp: Interpolation method (same options as FSL applywarp)
@param norm_bool: Normalise intensities of each image before averaging true or false
"""
print(img_path, warp_path)
if os.path.exists(img_path):
img_nib = nib.load(img_path)
if norm_bool:
img_nib = fslmaths(img_nib).inm(1000).run()
print('applywarp(src=img_nib,ref=ref_path,out=warped_path,warp=warp_path,interp=interp)')
applywarp(src=img_nib, ref=ref_path, out=warped_path, warp=warp_path, interp=interp)
# def submitJob_fsl_sub(name, log_dir, queue, wait_for=[], script=None, command=None, coprocessor_class=None, export_var=None,
# debug=False):
# """! Wrapper for fslsub - submits a job to the cluster. This function can be easily extended to work with other workload managers.
#
# @param name: Job name
# @param log_dir: Directory where output log-files will be saved
# @param queue: Name of queue to submit the job to
# @param wait_for: List of IDs of jobs required to finish before running this job.
# @param script: Path to a shell script, which contains one command per line - commands will be submitted as an array job
# @param command: Alternatively a single command can be provided as a string - command will be submitted as single job
# @param coprocessor_class: Coprocessor class, if not None cuda will be selected
# @param export_var: Environment variables to be exported to the submission node
# @param debug: If True, information about job will be written to output
#
# @return The job ID.
# """
#
# fsl_sub.submit()
#
# cmd = 'fsl_sub'
# if wait_for and any(job != '' for job in wait_for):
# cmd += ' -j '
# for j, job in enumerate(wait_for):
# if job != '':
# cmd += job.replace("\n", "")
# if j < len(wait_for) - 1:
# cmd += ','
#
# cmd += ' -N ' + name + \
# ' -l ' + log_dir + \
# ' -q ' + queue
#
# if coprocessor_class is not None :
# cmd += ' --coprocessor cuda'
#
# if export_var is not None :
# cmd += ' --export ' + export_var
#
# if debug:
# cmd += ' --debug'
#
# if script is not None and os.path.exists(script):
# cmd += ' -t ' + script
# elif command is not None :
# cmd += shlex.split(command)
#
# # stream = os.popen(cmd)
# # job_id = stream.read()
#
# try:
# result = subprocess.run(command, capture_output, text=True, check=True)
# except subprocess.CalledProcessError as e:
# print(str(e), file=sys.stderr)
# return None
#
# job_id = result.stdout.strip()
#
# return job_id
def submitJob(name, log_dir, queue, wait_for=[], script=None, command=None, coprocessor_class=None, export_var=None,
debug=False):
"""! Wrapper for fslsub - submits a job to the cluster. This function can be easily extended to work with other workload managers.
@param name: Job name
@param log_dir: Directory where output log-files will be saved
@param queue: Name of queue to submit the job to
@param wait_for: List of IDs of jobs required to finish before running this job.
@param script: Path to a shell script, which contains one command per line - commands will be submitted as an array job
@param command: Alternatively a single command can be provided as a string - command will be submitted as single job
@param coprocessor_class: Coprocessor class, if not None cuda will be selected
@param export_var: Environment variables to be exported to the submission node
@param debug: If True, information about job will be written to output
@return The job ID.
"""
cmd = 'fsl_sub'
if wait_for and any(job != '' for job in wait_for):
cmd += ' -j '
for j, job in enumerate(wait_for):
if job != '':
cmd += job.replace("\n", "")
if j < len(wait_for) - 1:
cmd += ','
cmd += ' -N ' + name + \
' -l ' + log_dir + \
' -q ' + queue
if coprocessor_class is not None :
cmd += ' --coprocessor cuda'
if export_var is not None :
cmd += ' --export ' + export_var
if debug:
cmd += ' --debug'
if script is not None and os.path.exists(script):
cmd += ' -t ' + script
elif command is not None :
cmd += ' ' + command + ' '
# cmd += ' "' + command + '"'
# stream = os.popen(cmd)
# job_id = stream.read()
print(cmd)
try:
result = subprocess.run(shlex.split(cmd), capture_output=True, text=True, check=True)
except subprocess.CalledProcessError as e:
print(str(e), file=sys.stderr)
return None
job_id = result.stdout.strip()
return job_id
def RMSdifference(img1_path, img2_path, mask1_path=None, mask2_path=None, rms_path=None):
"""! Calculates the difference between two images or warps as the root mean squared (RMS)
@param img1_path: Path to first image or deformation field
@param img2_path: Path to second image or deformation field
@param mask1_path: Path to mask for first image
@param mask2_path: Path to mask for second image
@param rms_path: Path to output text file that RMS is written to
"""
img1_arr = nib.load(img1_path).get_fdata()
img2_arr = nib.load(img2_path).get_fdata()
if mask1_path is not None and mask2_path is not None:
mask1_arr = nib.load(mask1_path).get_fdata()
mask2_arr = nib.load(mask2_path).get_fdata()
if len(img1_arr.shape) > 3:
n_dim = img1_arr.shape[-1]
img1_mask_stack_arr = np.stack((mask1_arr,) * n_dim, -1)
n_dim = img2_arr.shape[-1]
img2_mask_stack_arr = np.stack((mask2_arr,) * n_dim, -1)
else:
img1_mask_stack_arr = mask1_arr
img2_mask_stack_arr = mask2_arr
img_mask_stack_arr = np.logical_or(img1_mask_stack_arr, img2_mask_stack_arr)
img1_masked_arr = img1_arr[img_mask_stack_arr > 0]
img2_masked_arr = img2_arr[img_mask_stack_arr > 0]
diff_img = img1_masked_arr - img2_masked_arr
else:
diff_img = img1_arr - img2_arr
dim = np.prod(diff_img.shape)
rms = np.sqrt((diff_img ** 2).sum() / dim)
print('RMS difference between {} and {}: {}'.format(img1_path, img2_path, rms))
if rms_path is not None:
with open(rms_path, 'w+') as f:
f.write('{}'.format(rms))
def RMSstandardDeviation(img_paths, mean_img_path, mask_path, sd_img_out_path=None, rms_out_path=None):
"""! Calculates the standard deviation of images as the root mean squared (RMS) (== coefficient of variation)
@param img_paths: List of paths to images
@param mean_img_path: Path to average image
@param mask_path: Path to mask
@param sd_img_out_path: Path to standard deviation output image
@param rms_out_path: Path to output text file that RMS is written to
"""
mean_img_nib = nib.load(mean_img_path)
for i, path in enumerate(img_paths):
print(i, ' ', path)
diff_img = fslmaths(path).inm(1000).sub(mean_img_nib).run()
if i == 0:
diffsum_img = fslmaths(diff_img).mul(diff_img).run()
else:
diffsum_img = fslmaths(diff_img).mul(diff_img).add(diffsum_img).run()
stdtemp_img = fslmaths(diffsum_img).div(len(img_paths)).run()
stdtemp_img_np = np.sqrt(stdtemp_img.get_fdata())
if sd_img_out_path is not None:
std_img = nib.Nifti1Image(stdtemp_img_np, affine=stdtemp_img.affine, header=stdtemp_img.header)
std_img.to_filename(sd_img_out_path)
mask_np = nib.load(mask_path).get_fdata()
mean_img_np = mean_img_nib.get_fdata()
stdtemp_img_masked_np = stdtemp_img_np[mask_np > 0]
mean_img_masked_np = mean_img_np[mask_np > 0]
cv = stdtemp_img_masked_np / mean_img_masked_np # coefficient of variation
dim = np.prod(cv.shape)
rms = np.sqrt((cv ** 2).sum() / dim)
print('RMS standard deviation: {}'.format(rms))
if rms_out_path is not None:
with open(rms_out_path, 'w+') as f:
f.write('{}'.format(rms))
def mmorfWrapper(mmorf_run_cmd, config_path, img_warp_space,
img_ref_scalar, img_mov_scalar, aff_ref_scalar, aff_mov_scalar,
mask_ref_scalar, mask_mov_scalar,
img_ref_tensor, img_mov_tensor, aff_ref_tensor, aff_mov_tensor,
mask_ref_tensor, mask_mov_tensor,
warp_out, jac_det_out, bias_out):