#!/usr/bin/env python # # run_template_construction.py - Constructs multimodal templates # # Author: Christoph Arthofer # Copyright: FMRIB 2021 # """! This script allows the construction of an unbiased, multimodal template from T1, T1+T2 or T1+T2+DTI modalities. """ import os import shutil import pandas as pd import nibabel as nib import numpy as np import shlex import subprocess import sys from fsl.wrappers import fslmaths,flirt,applyxfm,concatxfm,bet,fast,fslstats from fsl.wrappers.fnirt import invwarp, applywarp, convertwarp from file_tree import FileTree # from fsl.utils.filetree import FileTree from fsl.utils.fslsub import func_to_cmd from operator import itemgetter import tempfile import argparse import fsl_sub def writeConfig(step,mod,fpath): """! Writes the nonlinear registration parameters for a given resolution level and modalities to a file readable by MMORF. @param step: Resolution level provided as integer @param mod: Modalities provided as a dictionary @param fpath: Output filepath """ T1_head = True if mod['T1_head_key'] is not None else False T2_head = True if mod['T2_head_key'] is not None else False DTI = True if mod['DTI_tensor_key'] is not None else False # This will be defined in a separate file in the future and, # I know, this could be implemented more efficiently but for the sake of easy readability: if step == 1: s = 'warp_res_init = 32 \n' \ 'warp_scaling = 1 1 \n' \ 'lambda_reg = 4.0e5 3.7e-1 \n' \ 'hires = 3.9 \n' \ 'optimiser_max_it_lowres = 5 \n' \ 'optimiser_max_it_hires = 5 \n' if T1_head: s += '\n' \ '; Whole-head T1 \n' \ 'use_implicit_mask = 0 \n' \ 'use_mask_ref_scalar = 1 1 \n' \ 'use_mask_mov_scalar = 1 1 \n' \ 'fwhm_ref_scalar = 8.0 8.0 \n' \ 'fwhm_mov_scalar = 8.0 8.0 \n' \ 'lambda_scalar = 1 1 \n' \ 'estimate_bias = 1 \n' \ 'bias_res_init = 32 \n' \ 'lambda_bias_reg = 1e9 1e9 \n' if T2_head: s += '\n' \ '; Whole-head T2 \n' \ 'use_implicit_mask = 0 \n' \ 'use_mask_ref_scalar = 0 0 \n' \ 'use_mask_mov_scalar = 0 0 \n' \ 'fwhm_ref_scalar = 8.0 8.0 \n' \ 'fwhm_mov_scalar = 8.0 8.0 \n' \ 'lambda_scalar = 1 1 \n' \ 'estimate_bias = 1 \n' \ 'bias_res_init = 32 \n' \ 'lambda_bias_reg = 1e9 1e9 \n' if DTI: s += '\n' \ '; DTI \n' \ 'use_mask_ref_tensor = 0 0 \n' \ 'use_mask_mov_tensor = 0 0 \n' \ 'fwhm_ref_tensor = 8.0 8.0 \n' \ 'fwhm_mov_tensor = 8.0 8.0 \n' \ 'lambda_tensor = 1 1 \n' elif step == 2: s = 'warp_res_init = 32 \n' \ 'warp_scaling = 1 1 2 \n' \ 'lambda_reg = 4.0e5 3.7e-1 3.1e-1 \n' \ 'hires = 3.9 \n' \ 'optimiser_max_it_lowres = 5 \n' \ 'optimiser_max_it_hires = 5 \n' if T1_head: s += '\n' \ '; Whole-head T1 \n' \ 'use_implicit_mask = 0 \n' \ 'use_mask_ref_scalar = 1 1 1 \n' \ 'use_mask_mov_scalar = 1 1 1 \n' \ 'fwhm_ref_scalar = 8.0 8.0 4.0 \n' \ 'fwhm_mov_scalar = 8.0 8.0 4.0 \n' \ 'lambda_scalar = 1 1 1 \n' \ 'estimate_bias = 1 \n' \ 'bias_res_init = 32 \n' \ 'lambda_bias_reg = 1e9 1e9 1e9 \n' if T2_head: s += '\n' \ '; Whole-head T2 \n' \ 'use_implicit_mask = 0 \n' \ 'use_mask_ref_scalar = 0 0 0 \n' \ 'use_mask_mov_scalar = 0 0 0 \n' \ 'fwhm_ref_scalar = 8.0 8.0 4.0 \n' \ 'fwhm_mov_scalar = 8.0 8.0 4.0 \n' \ 'lambda_scalar = 1 1 1 \n' \ 'estimate_bias = 1 \n' \ 'bias_res_init = 32 \n' \ 'lambda_bias_reg = 1e9 1e9 1e9 \n' if DTI: s += '\n' \ '; DTI \n' \ 'use_mask_ref_tensor = 0 0 0 \n' \ 'use_mask_mov_tensor = 0 0 0 \n' \ 'fwhm_ref_tensor = 8.0 8.0 4.0 \n' \ 'fwhm_mov_tensor = 8.0 8.0 4.0 \n' \ 'lambda_tensor = 1 1 1 \n' elif step == 3: s = 'warp_res_init = 32 \n' \ 'warp_scaling = 1 1 2 2 \n' \ 'lambda_reg = 4.0e5 3.7e-1 3.1e-1 2.6e-1 \n' \ 'hires = 3.9 \n' \ 'optimiser_max_it_lowres = 5 \n' \ 'optimiser_max_it_hires = 5 \n' if T1_head: s += '\n' \ '; Whole-head T1 \n' \ 'use_implicit_mask = 0 \n' \ 'use_mask_ref_scalar = 1 1 1 1 \n' \ 'use_mask_mov_scalar = 1 1 1 1 \n' \ 'fwhm_ref_scalar = 8.0 8.0 4.0 2.0 \n' \ 'fwhm_mov_scalar = 8.0 8.0 4.0 2.0 \n' \ 'lambda_scalar = 1 1 1 1 \n' \ 'estimate_bias = 1 \n' \ 'bias_res_init = 32 \n' \ 'lambda_bias_reg = 1e9 1e9 1e9 1e9 \n' if T2_head: s += '\n' \ '; Whole-head T2 \n' \ 'use_implicit_mask = 0 \n' \ 'use_mask_ref_scalar = 0 0 0 0 \n' \ 'use_mask_mov_scalar = 0 0 0 0 \n' \ 'fwhm_ref_scalar = 8.0 8.0 4.0 2.0 \n' \ 'fwhm_mov_scalar = 8.0 8.0 4.0 2.0 \n' \ 'lambda_scalar = 1 1 1 1 \n' \ 'estimate_bias = 1 \n' \ 'bias_res_init = 32 \n' \ 'lambda_bias_reg = 1e9 1e9 1e9 1e9 \n' if DTI: s += '\n' \ '; DTI \n' \ 'use_mask_ref_tensor = 0 0 0 0 \n' \ 'use_mask_mov_tensor = 0 0 0 0 \n' \ 'fwhm_ref_tensor = 8.0 8.0 4.0 2.0 \n' \ 'fwhm_mov_tensor = 8.0 8.0 4.0 2.0 \n' \ 'lambda_tensor = 1 1 1 1 \n' elif step == 4: s = 'warp_res_init = 32 \n' \ 'warp_scaling = 1 1 2 2 2 \n' \ 'lambda_reg = 4.0e5 3.7e-1 3.1e-1 2.6e-1 2.2e-1 \n' \ 'hires = 3.9 \n' \ 'optimiser_max_it_lowres = 5 \n' \ 'optimiser_max_it_hires = 5 \n' if T1_head: s += '\n' \ '; Whole-head T1 \n' \ 'use_implicit_mask = 0 \n' \ 'use_mask_ref_scalar = 1 1 1 1 1 \n' \ 'use_mask_mov_scalar = 1 1 1 1 1 \n' \ 'fwhm_ref_scalar = 8.0 8.0 4.0 2.0 1.0 \n' \ 'fwhm_mov_scalar = 8.0 8.0 4.0 2.0 1.0 \n' \ 'lambda_scalar = 1 1 1 1 1 \n' \ 'estimate_bias = 1 \n' \ 'bias_res_init = 32 \n' \ 'lambda_bias_reg = 1e9 1e9 1e9 1e9 1e9 \n' if T2_head: s += '\n' \ '; Whole-head T2 \n' \ 'use_implicit_mask = 0 \n' \ 'use_mask_ref_scalar = 0 0 0 0 0 \n' \ 'use_mask_mov_scalar = 0 0 0 0 0 \n' \ 'fwhm_ref_scalar = 8.0 8.0 4.0 2.0 1.0 \n' \ 'fwhm_mov_scalar = 8.0 8.0 4.0 2.0 1.0 \n' \ 'lambda_scalar = 1 1 1 1 1 \n' \ 'estimate_bias = 1 \n' \ 'bias_res_init = 32 \n' \ 'lambda_bias_reg = 1e9 1e9 1e9 1e9 1e9 \n' if DTI: s += '\n'\ '; DTI \n' \ 'use_mask_ref_tensor = 0 0 0 0 0 \n' \ 'use_mask_mov_tensor = 0 0 0 0 0 \n' \ 'fwhm_ref_tensor = 8.0 8.0 4.0 2.0 1.0 \n' \ 'fwhm_mov_tensor = 8.0 8.0 4.0 2.0 1.0 \n' \ 'lambda_tensor = 1 1 1 1 1 \n' elif step == 5: s = 'warp_res_init = 32 \n' \ 'warp_scaling = 1 1 2 2 2 2 \n' \ 'lambda_reg = 4.0e5 3.7e-1 3.1e-1 2.6e-1 2.2e-1 1.8e-1 \n' \ 'hires = 3.9 \n' \ 'optimiser_max_it_lowres = 5 \n' \ 'optimiser_max_it_hires = 5 \n' if T1_head: s += '\n' \ '; Whole-head T1 \n' \ 'use_implicit_mask = 0 \n' \ 'use_mask_ref_scalar = 1 1 1 1 1 1 \n' \ 'use_mask_mov_scalar = 1 1 1 1 1 1 \n' \ 'fwhm_ref_scalar = 8.0 8.0 4.0 2.0 1.0 0.5 \n' \ 'fwhm_mov_scalar = 8.0 8.0 4.0 2.0 1.0 0.5 \n' \ 'lambda_scalar = 1 1 1 1 1 1 \n' \ 'estimate_bias = 1 \n' \ 'bias_res_init = 32 \n' \ 'lambda_bias_reg = 1e9 1e9 1e9 1e9 1e9 1e9 \n' if T2_head: s += '\n' \ '; Whole-head T2 \n' \ 'use_implicit_mask = 0 \n' \ 'use_mask_ref_scalar = 0 0 0 0 0 0 \n' \ 'use_mask_mov_scalar = 0 0 0 0 0 0 \n' \ 'fwhm_ref_scalar = 8.0 8.0 4.0 2.0 1.0 0.5 \n' \ 'fwhm_mov_scalar = 8.0 8.0 4.0 2.0 1.0 0.5 \n' \ 'lambda_scalar = 1 1 1 1 1 1 \n' \ 'estimate_bias = 1 \n' \ 'bias_res_init = 32 \n' \ 'lambda_bias_reg = 1e9 1e9 1e9 1e9 1e9 1e9 \n' if DTI: s += '\n' \ '; DTI \n' \ 'use_mask_ref_tensor = 0 0 0 0 0 0 \n' \ 'use_mask_mov_tensor = 0 0 0 0 0 0 \n' \ 'fwhm_ref_tensor = 8.0 8.0 4.0 2.0 1.0 0.5 \n' \ 'fwhm_mov_tensor = 8.0 8.0 4.0 2.0 1.0 0.5 \n' \ 'lambda_tensor = 1 1 1 1 1 1 \n' elif step == 6: s = 'warp_res_init = 32 \n' \ 'warp_scaling = 1 1 2 2 2 2 2 \n' \ 'lambda_reg = 4.0e5 3.7e-1 3.1e-1 2.6e-1 2.2e-1 1.8e-1 1.5e-1 \n' \ 'hires = 3.9 \n' \ 'optimiser_max_it_lowres = 5 \n' \ 'optimiser_max_it_hires = 5 \n' if T1_head: s += '\n' \ '; Whole-head T1 \n' \ 'use_implicit_mask = 0 \n' \ 'use_mask_ref_scalar = 1 1 1 1 1 1 1 \n' \ 'use_mask_mov_scalar = 1 1 1 1 1 1 1 \n' \ 'fwhm_ref_scalar = 8.0 8.0 4.0 2.0 1.0 0.5 0.25 \n' \ 'fwhm_mov_scalar = 8.0 8.0 4.0 2.0 1.0 0.5 0.25 \n' \ 'lambda_scalar = 1 1 1 1 1 1 1 \n' \ 'estimate_bias = 1 \n' \ 'bias_res_init = 32 \n' \ 'lambda_bias_reg = 1e9 1e9 1e9 1e9 1e9 1e9 1e9 \n' if T2_head: s += '\n' \ '; Whole-head T2 \n' \ 'use_implicit_mask = 0 \n' \ 'use_mask_ref_scalar = 0 0 0 0 0 0 0 \n' \ 'use_mask_mov_scalar = 0 0 0 0 0 0 0 \n' \ 'fwhm_ref_scalar = 8.0 8.0 4.0 2.0 1.0 0.5 0.25 \n' \ 'fwhm_mov_scalar = 8.0 8.0 4.0 2.0 1.0 0.5 0.25 \n' \ 'lambda_scalar = 1 1 1 1 1 1 1 \n' \ 'estimate_bias = 1 \n' \ 'bias_res_init = 32 \n' \ 'lambda_bias_reg = 1e9 1e9 1e9 1e9 1e9 1e9 1e9 \n' if DTI: s += '\n' \ '; DTI \n' \ 'use_mask_ref_tensor = 0 0 0 0 0 0 0 \n' \ 'use_mask_mov_tensor = 0 0 0 0 0 0 0 \n' \ 'fwhm_ref_tensor = 8.0 8.0 4.0 2.0 1.0 0.5 0.25 \n' \ 'fwhm_mov_tensor = 8.0 8.0 4.0 2.0 1.0 0.5 0.25 \n' \ 'lambda_tensor = 1 1 1 1 1 1 1 \n' f = open(fpath, 'w+') f.write(s) f.close() def correctBiasMidtransWrapper(aff_matrix_paths, temp_dir, ref_path, unbiasing_invmtx_path, unbiased_matrix_paths): """! Writes the nonlinear registration parameters for a given resolution level and modalities to a file readable by MMORF. @param aff_matrix_paths: List of filepaths to affine transformations @param temp_dir: Output directory @param ref_path: Path to reference template @param unbiasing_invmtx_path: Path to unbiasing matrix @param unbiased_matrix_paths: List of filepaths to unbiased transformations """ separate_path = os.path.join(temp_dir, 'T1_to_unbiased') command = 'midtrans -v --separate=' + separate_path + ' --template=' + ref_path + ' -o ' + unbiasing_invmtx_path + ' ' count = 0 for omat_path in aff_matrix_paths: if os.path.exists(omat_path): count += 1 print(count, ' ', omat_path) command += omat_path + ' ' stream = os.popen(command) output = stream.read() print(output) # Renaming matrices for i, sub_unbiasing_mat in enumerate(unbiased_matrix_paths): sub_unbiasing_mat_temp = os.path.join(temp_dir, 'T1_to_unbiased%04d.mat' % (i + 1)) os.rename(sub_unbiasing_mat_temp, sub_unbiasing_mat) print(i, ' ', sub_unbiasing_mat_temp, ' renamed to ', sub_unbiasing_mat) print('T1 unbiasing matrices constructed!') def soft_clamp(x, k): """! Piecewise function for soft intensity clamping of T1 images. Takes a single parameter k which defines the transition to the clamping part of the function. f(x) = 0 | x <= 0 f(x) = x | 0 < x <= k f(x) = 3k/4 + k/(2(1 + exp(-8(x - k)/k))) | x > k @param x: Image as numpy array @param k: Defines the transition to the clamping part of the function Date: 08/02/2021 Author: Frederik J Lange Copyright: FMRIB 2021 """ return np.piecewise(x, [x <= 0, (0 < x) & (x <= k), x > k], [lambda x: 0, lambda x: x, lambda x: k / (2 * (1 + np.exp(-8 * (x - k) / k))) + 0.75 * k]) def clampImage(img_path, out_path): """! Performs preprocessing steps and clamping on an image. @param img_path: Path to input image @param out_path: Path to clamped output image """ out_dir = os.path.split(out_path)[0] mask_path = os.path.splitext(os.path.splitext(os.path.basename(out_path))[0])[0] + '_brain.nii.gz' mask_path = os.path.join(out_dir, mask_path) bet(img_path, mask_path, robust=True) with tempfile.TemporaryDirectory(dir=out_dir) as tmpdirname: fast(mask_path, tmpdirname + '/fast', iter=0, N=True, g=True, v=False) wm_intensity_mean = fslstats(mask_path).k(tmpdirname + '/fast_seg_2').M.run() print('White matter mean intensity is: ', wm_intensity_mean) img_nib = nib.load(img_path) img_clamped_np = soft_clamp(img_nib.get_fdata(), wm_intensity_mean) img_clamped_nib = nib.Nifti1Image(img_clamped_np, affine=img_nib.affine, header=img_nib.header) img_clamped_nib.to_filename(out_path) def averageImages(img_paths, out_path, mod='average', norm_bool=False): """! Creates an average image from individual (non)normalised images. @param img_paths: List of filepaths @param out_path: Path to average/median output image @param mode: Choose between 'average' or 'median' image. @param norm_bool: Normalise intensities of each image before averaging true or false """ n_imgs = len(img_paths) n_exist = 0 if mod == 'average': for i, img_path in enumerate(img_paths): if os.path.exists(img_path): n_exist += 1 print(i, ' ', img_path) img_nib = nib.load(img_path) if norm_bool: img_nib = fslmaths(img_nib).inm(1000).run() if i == 0: sum_img = img_nib else: sum_img = fslmaths(sum_img).add(img_nib).run() else: print(i, ' ', img_path, ' does not exist!') if n_exist > 0: mean_img = fslmaths(sum_img).div(n_exist).run() mean_img.to_filename(out_path) elif mod == 'median': images = [] for i, img_path in enumerate(img_paths): if os.path.exists(img_path): n_exist += 1 print(i, ' ', img_path) img_nib = nib.load(img_path) if norm_bool: img_nib = fslmaths(img_nib).inm(1000).run() images.append(img_nib.get_fdata()) if n_exist > 0: median_img = np.median(np.array(images),axis=-1) median_nib = nib.Nifti1Image(median_img, affine=img_nib.affine, header=img_nib.header) median_nib.to_filename(out_path) assert n_exist == n_imgs, "Not all images available!" def applyWarpWrapper(img_path, ref_path, warped_path, warp_path, interp='spline', norm_bool=False): """! Wrapper for FSL applywarp - applies a warp (deformation field) to an image. @param img_path: Path to input image @param ref_path: Path to reference image @param warped_path: Path to warped output image @param warp_path: Path to warp (deformation field) @param interp: Interpolation method (same options as FSL applywarp) @param norm_bool: Normalise intensities of each image before averaging true or false """ print(img_path, warp_path) if os.path.exists(img_path): img_nib = nib.load(img_path) if norm_bool: img_nib = fslmaths(img_nib).inm(1000).run() print('applywarp(src=img_nib,ref=ref_path,out=warped_path,warp=warp_path,interp=interp)') applywarp(src=img_nib, ref=ref_path, out=warped_path, warp=warp_path, interp=interp) # https://git.fmrib.ox.ac.uk/fsl/fsl_sub def submitJob(command, name, log_dir, queue, wait_for=None, array_task=False, coprocessor=None, coprocessor_class=None, coprocessor_multi="1", threads=1, export_var=None): coprocessor_class_strict = True if coprocessor_class is not None else False job_id = fsl_sub.submit(command=command, array_task=array_task, jobhold=wait_for, name=name, logdir=log_dir, queue=queue, coprocessor=coprocessor, coprocessor_class=coprocessor_class, coprocessor_class_strict=coprocessor_class_strict, coprocessor_multi=coprocessor_multi, threads=threads, export_vars=export_var ) return job_id # def submitJob(name, log_dir, queue, wait_for=[], script=None, command=None, coprocessor_class=None, # coprocessor=None, export_var=None, debug=False): # """! Wrapper for fslsub - submits a job to the cluster. This function can be easily extended to work with other workload managers. # # @param name: Job name # @param log_dir: Directory where output log-files will be saved # @param queue: Name of queue to submit the job to # @param wait_for: List of IDs of jobs required to finish before running this job. # @param script: Path to a shell script, which contains one command per line - commands will be submitted as an array job # @param command: Alternatively a single command can be provided as a string - command will be submitted as single job # @param coprocessor_class: Coprocessor class # @param export_var: Environment variables to be exported to the submission node # @param debug: If True, information about job will be written to output # # @return The job ID. # """ # cmd = 'fsl_sub' # # if wait_for: # job_ids_bool = [job != '' for job in wait_for] # if any(job_ids_bool): # cmd += ' -j ' # wait_for_arr = np.array(wait_for) # wait_for_arr = wait_for_arr[job_ids_bool] # for j, job in enumerate(wait_for_arr): # cmd += job.replace("\n", "") # if j < len(wait_for_arr) - 1: # cmd += ',' # # cmd += ' -N ' + name + \ # ' -l ' + log_dir + \ # ' -q ' + queue # # if coprocessor_class is not None: # cmd += ' --coprocessor_class ' + coprocessor_class # cmd += ' --coprocessor_class_strict ' # if coprocessor is not None: # cmd += ' --coprocessor ' + coprocessor + ' -R 32' # # if export_var is not None : # cmd += ' --export ' + export_var # # if debug: # cmd += ' --debug' # # if script is not None and os.path.exists(script): # cmd += ' -t ' + script # elif command is not None : # cmd += ' ' + command + ' ' # # cmd += ' "' + command + '"' # # # stream = os.popen(cmd) # # job_id = stream.read() # # try: # result = subprocess.run(shlex.split(cmd), capture_output=True, text=True, check=True) # except subprocess.CalledProcessError as e: # print(str(e), file=sys.stderr) # return None # # job_id = result.stdout.strip() # # return job_id def RMSdifference(img1_path, img2_path, mask1_path=None, mask2_path=None, rms_path=None): """! Calculates the difference between two images or warps as the root mean squared (RMS) @param img1_path: Path to first image or deformation field @param img2_path: Path to second image or deformation field @param mask1_path: Path to mask for first image @param mask2_path: Path to mask for second image @param rms_path: Path to output text file that RMS is written to """ img1_arr = nib.load(img1_path).get_fdata() img2_arr = nib.load(img2_path).get_fdata() if mask1_path is not None and mask2_path is not None: mask1_arr = nib.load(mask1_path).get_fdata() mask2_arr = nib.load(mask2_path).get_fdata() if len(img1_arr.shape) > 3: n_dim = img1_arr.shape[-1] img1_mask_stack_arr = np.stack((mask1_arr,) * n_dim, -1) n_dim = img2_arr.shape[-1] img2_mask_stack_arr = np.stack((mask2_arr,) * n_dim, -1) else: img1_mask_stack_arr = mask1_arr img2_mask_stack_arr = mask2_arr img_mask_stack_arr = np.logical_or(img1_mask_stack_arr, img2_mask_stack_arr) img1_masked_arr = img1_arr[img_mask_stack_arr > 0] img2_masked_arr = img2_arr[img_mask_stack_arr > 0] diff_img = img1_masked_arr - img2_masked_arr else: diff_img = img1_arr - img2_arr dim = np.prod(diff_img.shape) rms = np.sqrt((diff_img ** 2).sum() / dim) print('RMS difference between {} and {}: {}'.format(img1_path, img2_path, rms)) if rms_path is not None: with open(rms_path, 'w+') as f: f.write('{}'.format(rms)) def RMSstandardDeviation(img_paths, mean_img_path, mask_path, sd_img_out_path=None, rms_out_path=None): """! Calculates the standard deviation of images as the root mean squared (RMS) (== coefficient of variation) @param img_paths: List of paths to images @param mean_img_path: Path to average image @param mask_path: Path to mask @param sd_img_out_path: Path to standard deviation output image @param rms_out_path: Path to output text file that RMS is written to """ mean_img_nib = nib.load(mean_img_path) for i, path in enumerate(img_paths): print(i, ' ', path) diff_img = fslmaths(path).inm(1000).sub(mean_img_nib).run() if i == 0: diffsum_img = fslmaths(diff_img).mul(diff_img).run() else: diffsum_img = fslmaths(diff_img).mul(diff_img).add(diffsum_img).run() stdtemp_img = fslmaths(diffsum_img).div(len(img_paths)).run() stdtemp_img_np = np.sqrt(stdtemp_img.get_fdata()) if sd_img_out_path is not None: std_img = nib.Nifti1Image(stdtemp_img_np, affine=stdtemp_img.affine, header=stdtemp_img.header) std_img.to_filename(sd_img_out_path) mask_np = nib.load(mask_path).get_fdata() mean_img_np = mean_img_nib.get_fdata() stdtemp_img_masked_np = stdtemp_img_np[mask_np > 0] mean_img_masked_np = mean_img_np[mask_np > 0] cv = stdtemp_img_masked_np / mean_img_masked_np # coefficient of variation dim = np.prod(cv.shape) rms = np.sqrt((cv ** 2).sum() / dim) print('RMS standard deviation: {}'.format(rms)) if rms_out_path is not None: with open(rms_out_path, 'w+') as f: f.write('{}'.format(rms)) def mmorfWrapper(mmorf_run_cmd, config_path, img_warp_space, img_ref_scalar, img_mov_scalar, aff_ref_scalar, aff_mov_scalar, mask_ref_scalar, mask_mov_scalar, img_ref_tensor, img_mov_tensor, aff_ref_tensor, aff_mov_tensor, mask_ref_tensor, mask_mov_tensor, warp_out, jac_det_out, bias_out): """! Wrapper function for running MMORF. @param mmorf_run_cmd: Singularity command to run MMORF @param config_path: Path to config file with fixed parameters @param img_warp_space: Path to image defining the space in which the warp field will be calculated @param img_ref_scalar: List of paths to scalar reference images @param img_mov_scalar: List of paths to scalar moving images @param aff_ref_scalar: List of paths to affine transformations for scalar reference images @param aff_mov_scalar: List of paths to affine transformations for scalar moving images @param mask_ref_scalar: List of paths to masks in reference image spaces @param mask_mov_scalar: List of paths to masks in moving image spaces @param img_ref_tensor: List of paths to reference tensors @param img_mov_tensor: List of paths to moving tensors @param aff_ref_tensor: List of paths to affine transformations for reference tensors @param aff_mov_tensor: List of paths to affine transformations for moving tensors @param mask_ref_tensor: List of paths to masks in reference tensor spaces @param mask_mov_tensor: List of paths to masks in moving tensor spaces @param warp_out: Path to output warp field @param jac_det_out: Path to output Jacobian determinant of final warp field @param bias_out: Path to output bias field for scalar image pairs @return The command as a string and a dictionary of environment variables. """ export_var = [] cmd = mmorf_run_cmd cmd += ' --config ' + config_path split = os.path.split(config_path) export_var.append(split[0]) cmd += ' --img_warp_space ' + img_warp_space split = os.path.split(img_warp_space) export_var.append(split[0]) for path in img_ref_scalar: cmd += ' --img_ref_scalar ' + path split = os.path.split(path) export_var.append(split[0]) for path in img_mov_scalar: cmd += ' --img_mov_scalar ' + path split = os.path.split(path) export_var.append(split[0]) for path in aff_ref_scalar: cmd += ' --aff_ref_scalar ' + path split = os.path.split(path) export_var.append(split[0]) for path in aff_mov_scalar: cmd += ' --aff_mov_scalar ' + path split = os.path.split(path) export_var.append(split[0]) for path in mask_ref_scalar: cmd += ' --mask_ref_scalar ' + path split = os.path.split(path) export_var.append(split[0]) for path in mask_mov_scalar: cmd += ' --mask_mov_scalar ' + path split = os.path.split(path) export_var.append(split[0]) for path in img_ref_tensor: cmd += ' --img_ref_tensor ' + path split = os.path.split(path) export_var.append(split[0]) for path in img_mov_tensor: cmd += ' --img_mov_tensor ' + path split = os.path.split(path) export_var.append(split[0]) for path in aff_ref_tensor: cmd += ' --aff_ref_tensor ' + path split = os.path.split(path) export_var.append(split[0]) for path in aff_mov_tensor: cmd += ' --aff_mov_tensor ' + path split = os.path.split(path) export_var.append(split[0]) for path in mask_ref_tensor: cmd += ' --mask_ref_tensor ' + path split = os.path.split(path) export_var.append(split[0]) for path in mask_mov_tensor: cmd += ' --mask_mov_tensor ' + path split = os.path.split(path) export_var.append(split[0]) cmd += ' --warp_out ' + warp_out split = os.path.split(warp_out) export_var.append(split[0]) cmd += ' --jac_det_out ' + jac_det_out split = os.path.split(jac_det_out) export_var.append(split[0]) cmd += ' --bias_out ' + bias_out split = os.path.split(bias_out) export_var.append(split[0]) cmd += '\n' export_var = list(filter(None, list(set(export_var)))) export_var = {'SINGULARITY_BIND': export_var} return cmd, export_var if __name__ == "__main__": """! Main function submitting the jobs. """ mni_path = os.getenv('FSLDIR')+'/data/standard/MNI152lin_T1_1mm_brain.nii.gz' identity_path = os.getenv('FSLDIR')+'/etc/flirtsch/ident.mat' mmorf_path = os.getenv('MMORFDIR') mmorf_run_cmd = 'singularity run --nv ' + mmorf_path mmorf_exec_cmd = 'singularity exec ' + mmorf_path flags_required = { 'input': [('-i', '--input'),''], 'tree': [('-t', '--tree'),''], 'output': [('-o', '--output'),''] } help_required = { 'input': 'Directory containing the subjects/timepoints', 'tree': 'Path to FSL Filetree describing the subject-specific directory structure', 'output': 'Output directory', } flags_optional = { 'subids': [('-s', '--subids'),''], 'affine': [('-aff', '--affine'),'[True,False]'], 'nonlinear': [('-nln', '--nonlinear'),'[True,False]'], 'n_resolutions': [('-nres', '--n_resolutions'),''], 'n_iterations': [('-nit', '--n_iterations'),''], 'cpuq': [('-c', '--cpuq'),''], 'gpuq': [('-g', '--gpuq'), ''] } help_optional = { 'subids': 'Path to .csv file containing one subject ID per row: subject IDs have to indentify the sub-directories of the \'input\' argument (optional)' 'if not provided all sub-directories of the \'input\' argument will be used', 'affine': 'Run affine template construction (required for affine)', 'nonlinear': 'Run nonlinear template construction (required for nonlinear)', 'n_resolutions': 'Number of resolution levels (has to be <= number of resolutions defined in the MMORF config (required for nonlinear template construction)', 'n_iterations': 'Number of iterations per resolution level (required for nonlinear template construction)', 'cpuq': 'Name of cluster queue to submit CPU jobs to (required for affine and nonlinear template construction)', 'gpuq': 'Name of cluster queue to submit GPU jobs to (required for nonlinear template construction)' } parser = argparse.ArgumentParser(description='Constructs a multimodal template from T1, T1+T2 or T1+T2+DTI data.', usage='\npython run_template_construction.py -i -t -o -aff True --cpuq short.qc\n' 'python run_template_construction.py -i -t -o -aff True -nln True -nres 2 -nit 1 --cpuq short.qc --gpuq gpu8.q\n' 'python run_template_construction.py -i -t -o -nln True -nres 2 -nit 1 --cpuq short.qc --gpuq gpu8.q\n') for key in flags_required.keys(): parser.add_argument(*flags_required[key][0], help=help_required[key], metavar=flags_required[key][1], required=True) for key in flags_optional.keys(): parser.add_argument(*flags_optional[key][0], help=help_optional[key], metavar=flags_optional[key][1]) args = parser.parse_args() data_dir = args.input tag = os.path.basename(os.path.abspath(args.output)) base_dir = args.output tree_path = args.tree if args.subids is not None: id_path = args.subids df_ids = pd.read_csv(id_path, header=None, names=['subject_ID'], dtype={'subject_ID': str}) ls_ids = df_ids['subject_ID'].tolist() else: ls_ids = [f.name for f in os.scandir(args.input) if f.is_dir()] ls_ids.sort() affine_on = args.affine == 'True' nln_on = args.nonlinear == 'True' if nln_on: if args.n_resolutions is None or args.n_iterations is None: sys.exit('No \'n_resolutions\' or \'n_iterations\' provided') else: step_id = np.arange(int(args.n_resolutions))+1 it_at_step_id = np.arange(int(args.n_iterations))+1 if args.cpuq is None or args.gpuq is None: sys.exit('No CPU or GPU queue provided') else: cpuq = args.cpuq gpuq = args.gpuq if affine_on: if args.cpuq is None: sys.exit('No CPU queue provided') else: cpuq = args.cpuq job_ids = ['' for _ in range(100)] task_count = 0 os.mkdir(base_dir, mode=0o770) if not os.path.exists(base_dir) else print(base_dir + ' exists') os.chmod(base_dir, 0o770) tree = FileTree.read(tree_path, top_level='') tree = tree.update(data_dir=data_dir, template_dir=base_dir) script_dir = tree.get('script_dir') os.mkdir(script_dir, mode=0o770) if not os.path.exists(script_dir) else print(script_dir + ' exists') log_dir = tree.get('log_dir') os.mkdir(log_dir, mode=0o770) if not os.path.exists(log_dir) else print(log_dir + ' exists') misc_dir = tree.get('misc_dir') os.mkdir(misc_dir, mode=0o770) if not os.path.exists(misc_dir) else print(misc_dir + ' exists') shutil.copyfile(identity_path,tree.get('identity_mat')) ref_idx = 0 filetree_keys = tree.template_keys() mod = {'T1_brain_key': None, 'T1_head_key': None, 'T2_brain_key': None, 'T2_head_key': None, 'DTI_tensor_key': None, 'DTI_scalar_key': None } if 'data/T1_head' in filetree_keys: mod['T1_head_key'] = 'data/T1_head' mod['T1_brain_key'] = 'data/T1_brain' mod['T1_brain_mask_key'] = 'data/T1_brain_mask' if 'data/T2_head' in filetree_keys: mod['T2_head_key'] = 'data/T2_head' mod['T2_brain_key'] = 'data/T2_brain' if 'data/DTI_tensor' in filetree_keys: mod['DTI_tensor_key'] = 'data/DTI_tensor' mod['DTI_scalar_key'] = 'data/DTI_scalar' # Affine template construction if affine_on: aff_ref_id = ls_ids[ref_idx] tree = tree.update(sub_id=aff_ref_id, ref_id=aff_ref_id) affine_ref_path = tree.get(mod['T1_brain_key']) # Soft clamping of high skull intensities task_name = '{:03d}_prep_clamping'.format(task_count) script_path = os.path.join(script_dir, task_name + '.sh') with open(script_path, 'w+') as f: for id in ls_ids: tree = tree.update(sub_id=id) T1_head_path = tree.get(mod['T1_head_key']) T1_clamped_path = tree.get('T1_head_clamped', make_dir=True) jobcmd = func_to_cmd(clampImage, args=(T1_head_path, T1_clamped_path), tmp_dir=script_dir, kwargs=None, clean="never") jobcmd = jobcmd + '\n' f.write(jobcmd) # job_ids[0] = submitJob(tag+'_'+task_name, log_dir, script=script_path, queue=cpuq) job_ids[0] = submitJob(script_path, tag+'_'+task_name, log_dir, queue=cpuq, wait_for=None, array_task=True) print('submitted: ' + task_name) # Register all individual images to one reference image task_count += 1 task_name = '{:03d}_affT_registrations_2_ref'.format(task_count) script_path = os.path.join(script_dir, task_name + '.sh') with open(script_path, 'w+') as f: for id in ls_ids: tree = tree.update(sub_id=id, ref_id=aff_ref_id) if id == aff_ref_id: shutil.copyfile(tree.get('identity_mat'), tree.get('T1_to_ref_mat', make_dir=True)) else: cmd = flirt(tree.get(mod['T1_brain_key']), affine_ref_path, omat=tree.get('T1_to_ref_mat', make_dir=True), dof=6, cmdonly=True) cmd = ' '.join(cmd) + '\n' f.write(cmd) # job_ids[1] = submitJob(tag+'_'+task_name, log_dir, script=script_path, queue=cpuq) job_ids[1] = submitJob(script_path, tag+'_'+task_name, log_dir, queue=cpuq, wait_for=None, array_task=True) print('submitted: ' + task_name) # Register T2 images to corresponding T1 images if mod['T2_brain_key'] is not None: task_count += 1 task_name = '{:03d}_affT_registrations_T2_2_T1'.format(task_count) script_path = os.path.join(script_dir, task_name + '.sh') with open(script_path, 'w+') as f: for id in ls_ids: tree = tree.update(sub_id=id, ref_id=id) cmd = flirt(tree.get(mod['T2_brain_key']), tree.get(mod['T1_brain_key']), omat=tree.get('T2_to_T1_mat'), dof=6, cmdonly=True) cmd = ' '.join(cmd) + '\n' f.write(cmd) # job_ids[2] = submitJob(tag+'_'+task_name, log_dir, script=script_path, queue=cpuq) job_ids[2] = submitJob(script_path, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=None, array_task=True) print('submitted: ' + task_name) # Register DTI images to corresponding T2 images if mod['T2_brain_key'] is not None and mod['DTI_scalar_key'] is not None: task_count += 1 task_name = '{:03d}_affT_registrations_DTI_2_T2'.format(task_count) script_path = os.path.join(script_dir, task_name + '.sh') with open(script_path, 'w+') as f: for id in ls_ids: tree = tree.update(sub_id=id, ref_id=id) cmd = flirt(tree.get(mod['DTI_scalar_key']), tree.get(mod['T2_brain_key']), omat=tree.get('DTI_to_T2_mat'), dof=6, cmdonly=True) cmd = ' '.join(cmd) + '\n' f.write(cmd) # job_ids[3] = submitJob(tag+'_'+task_name, log_dir, script=script_path, queue=cpuq) job_ids[3] = submitJob(script_path, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=None, array_task=True) print('submitted: ' + task_name) # Unbiasing of T1 images task_count += 1 task_name = '{:03d}_affT_correct_bias'.format(task_count) aff_matrix_paths = [] unbiased_matrix_paths = [] for id in ls_ids: tree = tree.update(sub_id=id, ref_id=aff_ref_id) aff_matrix_paths.append(tree.get('T1_to_ref_mat')) unbiased_matrix_paths.append(tree.get('T1_to_unbiased_mat')) temp_dir = tree.get('affine_it1_dir') unbiasing_invmtx_path = tree.get('T1_unbiasing_affine_matrix', make_dir=True) jobcmd = func_to_cmd(correctBiasMidtransWrapper, args=( aff_matrix_paths, temp_dir, affine_ref_path, unbiasing_invmtx_path, unbiased_matrix_paths), tmp_dir=script_dir, kwargs=None, clean="never") # job_ids[4] = submitJob(tag+'_'+task_name, log_dir, command=jobcmd, queue=cpuq, wait_for=[job_ids[1]]) job_ids[4] = submitJob(jobcmd, tag+'_'+task_name, log_dir, queue=cpuq, wait_for=[job_ids[1]], array_task=False) print('submitted: ' + task_name) # Apply unbiased matrix to T1 images task_count += 1 task_name = '{:03d}_affT_unbiased_transform_of_T1'.format(task_count) script_path = os.path.join(script_dir, task_name + '.sh') with open(script_path, 'w+') as f: for id in ls_ids: tree = tree.update(sub_id=id, ref_id=aff_ref_id) cmd = applyxfm(src=tree.get(mod['T1_brain_key']), ref=affine_ref_path, mat=tree.get('T1_to_unbiased_mat'), out=tree.get('T1_to_unbiased_img'), interp='spline', cmdonly=True) cmd = ' '.join(cmd) + '\n' f.write(cmd) # job_ids[5] = submitJob(tag+'_'+task_name, log_dir, script=script_path, queue=cpuq, # wait_for=[job_ids[4]]) job_ids[5] = submitJob(script_path, tag+'_'+task_name, log_dir, queue=cpuq, wait_for=[job_ids[4]], array_task=True) print('submitted: ' + task_name) # Concat T2_to_T1 and T1_to_unbiased if mod['T2_head_key'] is not None: task_count += 1 task_name = '{:03d}_affT_concat_T2_and_unbiased'.format(task_count) script_path = os.path.join(script_dir, task_name + '.sh') with open(script_path, 'w+') as f: for id in ls_ids: tree = tree.update(sub_id=id, ref_id=aff_ref_id) cmd = concatxfm(tree.get('T2_to_T1_mat'), tree.get('T1_to_unbiased_mat'), tree.get('T2_to_unbiased_mat'), cmdonly=True) cmd = ' '.join(cmd) + '\n' f.write(cmd) # job_ids[6] = submitJob(tag+'_'+task_name, log_dir, script=script_path, queue=cpuq, # wait_for=[job_ids[4]]) job_ids[6] = submitJob(script_path, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=[job_ids[4]], array_task=True) print('submitted: ' + task_name) # Concat DTI_to_T2 and T2_to_unbiased if mod['T2_head_key'] is not None and mod['DTI_scalar_key'] is not None: task_count += 1 task_name = '{:03d}_affT_concat_DTI_and_unbiased'.format(task_count) script_path = os.path.join(script_dir, task_name + '.sh') with open(script_path, 'w+') as f: for id in ls_ids: tree = tree.update(sub_id=id, ref_id=aff_ref_id) cmd = concatxfm(tree.get('DTI_to_T2_mat'), tree.get('T2_to_unbiased_mat'), tree.get('DTI_to_unbiased_mat'), cmdonly=True) cmd = ' '.join(cmd) + '\n' f.write(cmd) # job_ids[7] = submitJob(tag+'_'+task_name, log_dir, script=script_path, queue=cpuq, # wait_for=[job_ids[6]]) job_ids[7] = submitJob(script_path, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=[job_ids[6]], array_task=True) print('submitted: ' + task_name) # Averaging unbiased T1 images task_count += 1 task_name = '{:03d}_affT_average_unbiased_T1'.format(task_count) img_paths = [] for id in ls_ids: tree = tree.update(sub_id=id, ref_id=aff_ref_id) img_paths.append(tree.get('T1_to_unbiased_img')) aff_template_path = tree.get('T1_unbiased_affine_template') jobcmd = func_to_cmd(averageImages, args=(img_paths, aff_template_path, 'median', True), tmp_dir=script_dir, kwargs=None, clean="never") # job_ids[8] = submitJob(tag+'_'+task_name, log_dir, command=jobcmd, queue=cpuq, wait_for=[job_ids[5]]) job_ids[8] = submitJob(jobcmd, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=[job_ids[5]], array_task=False) print('submitted: ' + task_name) # Register unbiased template to MNI space with 6 dof task_count += 1 task_name = '{:03d}_affT_unbiased_T1_template_to_MNI'.format(task_count) cmd = flirt(aff_template_path, mni_path, omat=tree.get('T1_unbiased_affine_template_to_MNI_mat'), out=tree.get('T1_unbiased_affine_template_to_MNI_img'), dof=6, cmdonly=True) cmd = ' '.join(cmd) + '\n' # job_ids[9] = submitJob(tag+'_'+task_name, log_dir, command=cmd, queue=cpuq, wait_for=[job_ids[8]]) job_ids[9] = submitJob(cmd, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=[job_ids[8]], array_task=False) print('submitted: ' + task_name) # Concatenate individual affine transformations (T1 brain to unbiased T1 and the rigid transformation to MNI) task_count += 1 task_name = '{:03d}_affT_concat_T1_brain_to_MNI'.format(task_count) script_path = os.path.join(script_dir, task_name + '.sh') with open(script_path, 'w+') as f: for id in ls_ids: tree = tree.update(sub_id=id, ref_id=aff_ref_id) cmd = concatxfm(tree.get('T1_to_unbiased_mat'), tree.get('T1_unbiased_affine_template_to_MNI_mat'), tree.get('T1_to_MNI_mat', make_dir=True), cmdonly=True) cmd = ' '.join(cmd) + '\n' f.write(cmd) # job_ids[10] = submitJob(tag+'_'+task_name, log_dir, script=script_path, queue=cpuq, # wait_for=[job_ids[9]]) job_ids[10] = submitJob(script_path, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=[job_ids[9]], array_task=True) print('submitted: ' + task_name) # Concatenate individual affine transformations (T2 brain to unbiased T2 and the rigid transformation to MNI) if mod['T2_head_key'] is not None: task_count += 1 task_name = '{:03d}_affT_concat_T2_brain_to_MNI'.format(task_count) script_path = os.path.join(script_dir, task_name + '.sh') with open(script_path, 'w+') as f: for id in ls_ids: tree = tree.update(sub_id=id, ref_id=aff_ref_id) cmd = concatxfm(tree.get('T2_to_unbiased_mat'), tree.get('T1_unbiased_affine_template_to_MNI_mat'), tree.get('T2_to_MNI_mat'), cmdonly=True) cmd = ' '.join(cmd) + '\n' f.write(cmd) # job_ids[12] = submitJob(tag+'_'+task_name, log_dir, script=script_path, queue=cpuq, # wait_for=[job_ids[9]]) job_ids[12] = submitJob(script_path, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=[job_ids[9]], array_task=True) print('submitted: ' + task_name) # Concatenate individual affine transformations (DTI to unbiased T2 and the rigid transformation to MNI) if mod['T2_head_key'] is not None and mod['DTI_scalar_key'] is not None: task_count += 1 task_name = '{:03d}_affT_concat_DTI_to_MNI'.format(task_count) script_path = os.path.join(script_dir, task_name + '.sh') with open(script_path, 'w+') as f: for id in ls_ids: tree = tree.update(sub_id=id, ref_id=aff_ref_id) cmd = concatxfm(tree.get('DTI_to_unbiased_mat'), tree.get('T1_unbiased_affine_template_to_MNI_mat'), tree.get('DTI_to_MNI_mat'), cmdonly=True) cmd = ' '.join(cmd) + '\n' f.write(cmd) # job_ids[14] = submitJob(tag+'_'+task_name, log_dir, script=script_path, queue=cpuq, # wait_for=[job_ids[9]]) job_ids[14] = submitJob(script_path, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=[job_ids[9]], array_task=True) print('submitted: ' + task_name) # Transform individual T1 brain images to MNI space if mod['T1_brain_key'] is not None: task_count += 1 task_name = '{:03d}_affT_transform_T1_brain_to_MNI'.format(task_count) script_path = os.path.join(script_dir, task_name + '.sh') with open(script_path, 'w+') as f: for id in ls_ids: tree = tree.update(sub_id=id, ref_id=aff_ref_id) cmd = applyxfm(src=tree.get(mod['T1_brain_key']), ref=mni_path, mat=tree.get('T1_to_MNI_mat'), out=tree.get('T1_brain_to_MNI_img'), interp='spline', cmdonly=True) cmd = ' '.join(cmd) + '\n' f.write(cmd) # job_ids[15] = submitJob(tag+'_'+task_name, log_dir, script=script_path, queue=cpuq, # wait_for=[job_ids[10]]) job_ids[15] = submitJob(script_path, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=[job_ids[10]], array_task=True) print('submitted: ' + task_name) # Transform individual T1 brain masks to MNI space if mod['T1_brain_mask_key'] is not None: task_count += 1 task_name = '{:03d}_affT_transform_T1_brain_masks_to_MNI'.format(task_count) script_path = os.path.join(script_dir, task_name + '.sh') with open(script_path, 'w+') as f: for id in ls_ids: tree = tree.update(sub_id=id, ref_id=aff_ref_id) cmd = applyxfm(src=tree.get(mod['T1_brain_mask_key']), ref=mni_path, mat=tree.get('T1_to_MNI_mat'), out=tree.get('T1_brain_mask_to_MNI_img'), interp='trilinear', cmdonly=True) cmd = ' '.join(cmd) + '\n' f.write(cmd) # job_ids[16] = submitJob(tag+'_'+task_name, log_dir, script=script_path, queue=cpuq, # wait_for=[job_ids[10]]) job_ids[16] = submitJob(script_path, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=[job_ids[10]], array_task=True) print('submitted: ' + task_name) # Transform individual T1 head images to MNI space if mod['T1_head_key'] is not None: task_count += 1 task_name = '{:03d}_affT_transform_T1_head_to_MNI'.format(task_count) script_path = os.path.join(script_dir, task_name + '.sh') with open(script_path, 'w+') as f: for id in ls_ids: tree = tree.update(sub_id=id, ref_id=aff_ref_id) cmd = applyxfm(src=tree.get('T1_head_clamped'), ref=mni_path, mat=tree.get('T1_to_MNI_mat'), out=tree.get('T1_head_to_MNI_img'), interp='spline', cmdonly=True) cmd = ' '.join(cmd) + '\n' f.write(cmd) # job_ids[17] = submitJob(tag+'_'+task_name, log_dir, script=script_path, queue=cpuq, # wait_for=list(itemgetter(*[0, 10])(job_ids))) job_ids[17] = submitJob(script_path, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=list(itemgetter(*[0, 10])(job_ids)), array_task=True) print('submitted: ' + task_name) # Transform individual T2 head images to MNI space task_count += 1 task_name = '{:03d}_affT_transform_T2_head_to_MNI'.format(task_count) script_path = os.path.join(script_dir, task_name + '.sh') if mod['T2_head_key'] is not None: with open(script_path, 'w+') as f: for id in ls_ids: tree = tree.update(sub_id=id, ref_id=aff_ref_id) cmd = applyxfm(src=tree.get(mod['T2_head_key']), ref=mni_path, mat=tree.get('T2_to_MNI_mat'), out=tree.get('T2_head_to_MNI_img'), interp='spline', cmdonly=True) cmd = ' '.join(cmd) + '\n' f.write(cmd) # job_ids[18] = submitJob(tag+'_'+task_name, log_dir, script=script_path, queue=cpuq, # wait_for=[job_ids[12]]) job_ids[18] = submitJob(script_path, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=[job_ids[12]], array_task=True) print('submitted: ' + task_name) # Transform individual DTI images to MNI space if mod['T2_head_key'] is not None and mod['DTI_scalar_key'] is not None: task_count += 1 task_name = '{:03d}_affT_transform_DTI_scalar_to_MNI'.format(task_count) script_path = os.path.join(script_dir, task_name + '.sh') with open(script_path, 'w+') as f: for id in ls_ids: tree = tree.update(sub_id=id, ref_id=aff_ref_id) cmd = applyxfm(src=tree.get(mod['DTI_scalar_key']), ref=mni_path, mat=tree.get('DTI_to_MNI_mat'), out=tree.get('DTI_to_MNI_img'), interp='spline', cmdonly=True) cmd = ' '.join(cmd) + '\n' f.write(cmd) # job_ids[19] = submitJob(tag+'_'+task_name, log_dir, script=script_path, queue=cpuq, # wait_for=[job_ids[14]]) job_ids[19] = submitJob(script_path, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=[job_ids[14]], array_task=True) print('submitted: ' + task_name) # Transform individual DTI tensors to MNI space if mod['T2_head_key'] is not None and mod['DTI_tensor_key'] is not None: task_count += 1 task_name = '{:03d}_affT_transform_DTI_tensor_to_MNI'.format(task_count) script_path = os.path.join(script_dir, task_name + '.sh') with open(script_path, 'w+') as f: for id in ls_ids: tree = tree.update(sub_id=id, ref_id=aff_ref_id) cmd = 'vecreg -i ' + tree.get(mod['DTI_tensor_key']) + \ ' -r ' + mni_path + \ ' -o ' + tree.get('DTI_tensor_to_MNI') + \ ' -t ' + tree.get('DTI_to_MNI_mat') + \ ' --interp=spline \n' f.write(cmd) # job_ids[20] = submitJob(tag+'_'+task_name, log_dir, script=script_path, queue=cpuq, # wait_for=[job_ids[14]]) job_ids[20] = submitJob(script_path, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=[job_ids[14]], array_task=True) print('submitted: ' + task_name) # Averaging transformed T1 brain images in MNI space if mod['T1_brain_key'] is not None: task_count += 1 task_name = '{:03d}_affT_average_T1_brain_in_MNI'.format(task_count) img_paths = [] for id in ls_ids: tree = tree.update(sub_id=id, ref_id=aff_ref_id) img_paths.append(tree.get('T1_brain_to_MNI_img')) aff_template_path = tree.get('T1_brain_affine_template') jobcmd = func_to_cmd(averageImages, args=(img_paths, aff_template_path, 'median', True), tmp_dir=script_dir, kwargs=None, clean="never") # job_ids[21] = submitJob(tag+'_'+task_name, log_dir, command=jobcmd, queue=cpuq, # wait_for=[job_ids[15]]) job_ids[21] = submitJob(jobcmd, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=[job_ids[15]], array_task=False) print('submitted: ' + task_name) # Averaging transformed T1 brain masks in MNI space if mod['T1_brain_mask_key'] is not None: task_count += 1 task_name = '{:03d}_affT_average_T1_brain_masks_in_MNI'.format(task_count) img_paths = [] for id in ls_ids: tree = tree.update(sub_id=id, ref_id=aff_ref_id) img_paths.append(tree.get('T1_brain_mask_to_MNI_img')) aff_template_path = tree.get('T1_brain_mask_affine_template') jobcmd = func_to_cmd(averageImages, args=(img_paths, aff_template_path, 'average', False), tmp_dir=script_dir, kwargs=None, clean="never") # job_ids[22] = submitJob(tag+'_'+task_name, log_dir, command=jobcmd, queue=cpuq, # wait_for=[job_ids[16]]) job_ids[22] = submitJob(jobcmd, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=[job_ids[16]], array_task=False) print('submitted: ' + task_name) task_name = '{:03d}_affT_create_weighted_brain_mask'.format(task_count) jobcmd = 'fslmaths ' + aff_template_path + ' -bin -mul 7 -add 1 -inm 1 ' + tree.get('T1_brain_mask_weighted_affine_template') # job_ids[23] = submitJob(tag+'_'+task_name, log_dir, command=jobcmd, queue=cpuq, wait_for=[job_ids[22]]) job_ids[23] = submitJob(jobcmd, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=[job_ids[22]], array_task=False) print('submitted: ' + task_name) # Averaging transformed T1 non-defaced whole-head images in MNI space if mod['T1_head_key'] is not None: task_count += 1 task_name = '{:03d}_affT_average_T1_head_in_MNI'.format(task_count) img_paths = [] for id in ls_ids: tree = tree.update(sub_id=id, ref_id=aff_ref_id) img_paths.append(tree.get('T1_head_to_MNI_img')) aff_template_path = tree.get('T1_head_affine_template') jobcmd = func_to_cmd(averageImages, args=(img_paths, aff_template_path, 'median', True), tmp_dir=script_dir, kwargs=None, clean="never") # job_ids[24] = submitJob(tag+'_'+task_name, log_dir, command=jobcmd, queue=cpuq, # wait_for=[job_ids[17]]) job_ids[24] = submitJob(jobcmd, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=[job_ids[17]], array_task=False) print('submitted: ' + task_name) # Averaging transformed T2 non-defaced whole-head images in MNI space if mod['T2_head_key'] is not None: task_count += 1 task_name = '{:03d}_affT_average_T2_head_in_MNI'.format(task_count) img_paths = [] for id in ls_ids: tree = tree.update(sub_id=id, ref_id=aff_ref_id) img_paths.append(tree.get('T2_head_to_MNI_img')) aff_template_path = tree.get('T2_head_affine_template') jobcmd = func_to_cmd(averageImages, args=(img_paths, aff_template_path, 'median', True), tmp_dir=script_dir, kwargs=None, clean="never") # job_ids[25] = submitJob(tag+'_'+task_name, log_dir, command=jobcmd, queue=cpuq, # wait_for=[job_ids[18]]) job_ids[25] = submitJob(jobcmd, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=[job_ids[18]], array_task=False) print('submitted: ' + task_name) # Averaging transformed DTI images in MNI space if mod['T2_head_key'] is not None and mod['DTI_scalar_key'] is not None: task_count += 1 task_name = '{:03d}_affT_average_DTI_scalar_in_MNI'.format(task_count) img_paths = [] for id in ls_ids: tree = tree.update(sub_id=id, ref_id=aff_ref_id) img_paths.append(tree.get('DTI_to_MNI_img')) aff_template_path = tree.get('DTI_affine_template') jobcmd = func_to_cmd(averageImages, args=(img_paths, aff_template_path, 'median', True), tmp_dir=script_dir, kwargs=None, clean="never") # job_ids[26] = submitJob(tag+'_'+task_name, log_dir, command=jobcmd, queue=cpuq, # wait_for=[job_ids[19]]) job_ids[26] = submitJob(jobcmd, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=[job_ids[19]], array_task=False) print('submitted: ' + task_name) # Averaging transformed DTI tensors in MNI space if mod['T2_head_key'] is not None and mod['DTI_tensor_key']: task_count += 1 task_name = '{:03d}_affT_average_DTI_tensor_in_MNI'.format(task_count) export_paths = [] cmd = mmorf_exec_cmd + ' tensor_average' + ' -i ' for id in ls_ids: tree = tree.update(sub_id=id, ref_id=aff_ref_id) cmd += tree.get('DTI_tensor_to_MNI') + ' ' export_paths.append(tree.get('DTI_tensor_to_MNI')) cmd += '-o ' + tree.get('DTI_tensor_affine_template') export_paths.append(tree.get('DTI_tensor_affine_template')) common_path = os.path.commonpath(export_paths) export_var_str = {'SINGULARITY_BIND': '"SINGULARITY_BIND=' + ','.join([common_path]) + '"'} # job_ids[27] = submitJob(tag+'_'+task_name, log_dir, command=cmd, queue=cpuq, # export_var=export_var_str['SINGULARITY_BIND'], wait_for=[job_ids[20]]) job_ids[27] = submitJob(cmd, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=[job_ids[20]], array_task=False, export_var=[export_var_str['SINGULARITY_BIND']]) print('submitted: ' + task_name) # Nonlinear template construction if nln_on: it_total = 0 for s, step in enumerate(step_id): tree = tree.update(step_id='{:02d}'.format(step)) config_path = tree.get('mmorf_params', make_dir=True) writeConfig(step, mod, config_path) for i, it in enumerate(it_at_step_id): print('Step ID: ', step) print('Iteration ID: ', it) it_total += 1 if it_total == 1: tree = tree.update(step_id='{:02d}'.format(step), it_id='{:02d}'.format(it)) img_ref_T1brain_path = tree.get('T1_brain_affine_template') img_ref_T1head_path = tree.get('T1_head_affine_template') img_ref_T2head_path = tree.get('T2_head_affine_template') img_ref_tensor_path = tree.get('DTI_tensor_affine_template') img_ref_T1brain_mask_path = tree.get('T1_brain_mask_weighted_affine_template') else: if it == 1: prev_it = it_at_step_id[-1] prev_step = step - 1 elif it > 1: prev_it = it - 1 prev_step = step tree = tree.update(step_id='{:02d}'.format(prev_step), it_id='{:02d}'.format(prev_it)) img_ref_T1brain_path = tree.get('T1_brain_nln_template') img_ref_T1head_path = tree.get('T1_head_nln_template') img_ref_T2head_path = tree.get('T2_head_nln_template') img_ref_tensor_path = tree.get('DTI_tensor_nln_template') img_ref_T1brain_mask_path = tree.get('T1_brain_mask_weighted_nln_template') avgwarp_prev_it_path = tree.get('avg_warp') avgmask_prev_it_path = tree.get('T1_brain_mask_nln_template') task_count += 1 task_name = '{:03d}_nlnT_mmorf'.format(task_count) # Nonlinear registration to template from previous iteration script_path = os.path.join(script_dir, task_name + '.sh') with open(script_path, 'w+') as f: export_vars = {} for i, id in enumerate(ls_ids): tree = tree.update(sub_id=id, step_id='{:02d}'.format(step), it_id='{:02d}'.format(it)) if mod['T1_head_key'] is not None and mod['T2_head_key'] is not None and mod['DTI_tensor_key'] is not None: img_warp_space = img_ref_T1head_path img_ref_scalar = [img_ref_T1head_path, img_ref_T2head_path] img_mov_scalar = [tree.get('T1_head_clamped'), tree.get(mod['T2_head_key'])] aff_ref_scalar = [tree.get('identity_mat'), tree.get('identity_mat')] aff_mov_scalar = [tree.get('T1_to_MNI_mat'), tree.get('T2_to_MNI_mat')] mask_ref_scalar = [img_ref_T1brain_mask_path, 'NULL'] mask_mov_scalar = ['NULL', 'NULL'] img_ref_tensor = [img_ref_tensor_path] img_mov_tensor = [tree.get(mod['DTI_tensor_key'])] aff_ref_tensor = [tree.get('identity_mat')] aff_mov_tensor = [tree.get('DTI_to_MNI_mat')] mask_tensor = ['NULL'] elif mod['T1_head_key'] is not None and mod['T2_head_key'] is not None: img_warp_space = img_ref_T1head_path img_ref_scalar = [img_ref_T1head_path, img_ref_T2head_path] img_mov_scalar = [tree.get('T1_head_clamped'), tree.get(mod['T2_head_key'])] aff_ref_scalar = [tree.get('identity_mat'), tree.get('identity_mat')] aff_mov_scalar = [tree.get('T1_to_MNI_mat'), tree.get('T2_to_MNI_mat')] mask_ref_scalar = [img_ref_T1brain_mask_path, 'NULL'] mask_mov_scalar = ['NULL', 'NULL'] img_ref_tensor = [] img_mov_tensor = [] aff_ref_tensor = [] aff_mov_tensor = [] mask_tensor = [] elif mod['T1_head_key'] is not None: img_warp_space = img_ref_T1head_path img_ref_scalar = [img_ref_T1head_path] img_mov_scalar = [tree.get('T1_head_clamped')] aff_ref_scalar = [tree.get('identity_mat')] aff_mov_scalar = [tree.get('T1_to_MNI_mat')] mask_ref_scalar = [img_ref_T1brain_mask_path] mask_mov_scalar = [tree.get('data/lesion_mask_in_T1')] img_ref_tensor = [] img_mov_tensor = [] aff_ref_tensor = [] aff_mov_tensor = [] mask_tensor = [] mmorf_script, export_var = mmorfWrapper(mmorf_run_cmd, config_path, img_warp_space=img_warp_space, img_ref_scalar=img_ref_scalar, img_mov_scalar=img_mov_scalar, aff_ref_scalar=aff_ref_scalar, aff_mov_scalar=aff_mov_scalar, mask_ref_scalar=mask_ref_scalar, mask_mov_scalar=mask_mov_scalar, img_ref_tensor=img_ref_tensor, img_mov_tensor=img_mov_tensor, aff_ref_tensor=aff_ref_tensor, aff_mov_tensor=aff_mov_tensor, mask_ref_tensor=mask_tensor, mask_mov_tensor=mask_tensor, warp_out=tree.get('mmorf_warp', make_dir=True), jac_det_out=tree.get('mmorf_jac'), bias_out=tree.get('mmorf_bias')) f.write(mmorf_script) for key, value in export_var.items(): if i == 0: export_vars[key] = value else: export_vars[key] = export_vars[key] + value export_var_str = {} for key, value in export_vars.items(): common_path = os.path.commonpath(value) export_var_str[key] = '"' + key + '=' + ','.join([common_path]) + '"' # job_ids[28] = submitJob(tag+'_'+task_name, log_dir, script=script_path, queue=gpuq, # wait_for=list(itemgetter(*[21, 23, 24, 25, 26, 27, 28, 44, 45, 46, 47, 48, 50])(job_ids)), # coprocessor_class=None, coprocessor='cuda', # export_var=export_var_str['SINGULARITY_BIND'], debug=False) job_ids[28] = submitJob(script_path, tag + '_' + task_name, log_dir, queue=gpuq, wait_for=list(itemgetter(*[21, 23, 24, 25, 26, 27, 28, 44, 45, 46, 47, 48, 50])(job_ids)), array_task=True, coprocessor='cuda', export_var=[export_var_str['SINGULARITY_BIND']]) print('submitted: ' + task_name) # Averaging warps task_count += 1 task_name = '{:03d}_nlnT_average_warps'.format(task_count) warp_paths = [] for id in ls_ids: tree = tree.update(sub_id=id, step_id='{:02d}'.format(step), it_id='{:02d}'.format(it)) warp_paths.append(tree.get('mmorf_warp')) avg_warp_path = tree.get('avg_warp') jobcmd = func_to_cmd(averageImages, args=(warp_paths, avg_warp_path, 'average', False), tmp_dir=script_dir, kwargs=None, clean="never") # job_ids[29] = submitJob(tag+'_'+task_name, log_dir, command=jobcmd, queue=cpuq, # wait_for=[job_ids[28]]) job_ids[29] = submitJob(jobcmd, tag + '_' + task_name, log_dir, queue=cpuq,wait_for=[job_ids[28]], array_task=False) print('submitted: ' + task_name) # Inverse average warp task_count += 1 task_name = '{:03d}_nlnT_invert_average_warp'.format(task_count) avg_warp_path = tree.get('avg_warp') inv_avg_warp_path = tree.get('inv_avg_warp') cmd = invwarp(warp=avg_warp_path, ref=img_ref_T1head_path, out=inv_avg_warp_path, cmdonly=True) cmd = ' '.join(cmd) + '\n' # job_ids[30] = submitJob(tag+'_'+task_name, log_dir, command=cmd, queue=cpuq, # wait_for=[job_ids[29]]) job_ids[30] = submitJob(cmd, tag + '_' + task_name, log_dir, queue=cpuq,wait_for=[job_ids[29]], array_task=False) print('submitted: ' + task_name) # Create unbiased warps: (1) resample forward warp with inverse average warp and (2) add inverse average warp to resulting composition task_count += 1 task_name = '{:03d}_nlnT_resample_warps'.format(task_count) script_path = os.path.join(script_dir, task_name + '.sh') with open(script_path, 'w+') as f: for id in ls_ids: tree = tree.update(sub_id=id, step_id='{:02d}'.format(step), it_id='{:02d}'.format(it)) warp_path = tree.get('mmorf_warp') resampled_path = tree.get('mmorf_warp_resampled') inv_avg_warp_path = tree.get('inv_avg_warp') jobcmd = func_to_cmd(applyWarpWrapper, args=( warp_path, img_ref_T1head_path, resampled_path, inv_avg_warp_path, 'spline', False), tmp_dir=script_dir, kwargs=None, clean="never") f.write(jobcmd + '\n') # job_ids[31] = submitJob(tag+'_'+task_name, log_dir, script=script_path, queue=cpuq, # wait_for=[job_ids[30]]) job_ids[31] = submitJob(script_path, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=[job_ids[30]], array_task=True) print('submitted: ' + task_name) task_count += 1 task_name = '{:03d}_nlnT_unbias_warps'.format(task_count) script_path = os.path.join(script_dir, task_name + '.sh') with open(script_path, 'w+') as f: for id in ls_ids: tree = tree.update(sub_id=id, step_id='{:02d}'.format(step), it_id='{:02d}'.format(it)) f.write('fslmaths ' + tree.get('mmorf_warp_resampled') + ' -add ' + tree.get( 'inv_avg_warp') + ' ' + tree.get('mmorf_warp_resampled_unbiased') + '\n') # job_ids[32] = submitJob(tag+'_'+task_name, log_dir, script=script_path, queue=cpuq, # wait_for=[job_ids[31]]) job_ids[32] = submitJob(script_path, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=[job_ids[31]], array_task=True) print('submitted: ' + task_name) # Concatenate corresponding affine transforms and unbiased warps # T1 brain task_count += 1 task_name = '{:03d}_nlnT_concat_unbiased_warps_T1_brain'.format(task_count) script_path = os.path.join(script_dir, task_name + '.sh') with open(script_path, 'w+') as f: for id in ls_ids: tree = tree.update(sub_id=id, step_id='{:02d}'.format(step), it_id='{:02d}'.format(it)) full_resampled_path = tree.get('mmorf_warp_resampled_unbiased_full_T1brain') premat_path = tree.get('T1_to_MNI_mat') warp_path = tree.get('mmorf_warp_resampled_unbiased') cmd = convertwarp(out=full_resampled_path, ref=img_ref_T1head_path, premat=premat_path, warp1=warp_path, cmdonly=True) cmd = ' '.join(cmd) + '\n' f.write(cmd) # job_ids[33] = submitJob(tag+'_'+task_name, log_dir, script=script_path, queue=cpuq, # wait_for=[job_ids[32]]) job_ids[33] = submitJob(script_path, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=[job_ids[32]], array_task=True) print('submitted: ' + task_name) # T1 head if mod['T1_head_key'] is not None: task_count += 1 task_name = '{:03d}_nlnT_concat_unbiased_warps_T1_head'.format(task_count) script_path = os.path.join(script_dir, task_name + '.sh') with open(script_path, 'w+') as f: for id in ls_ids: tree = tree.update(sub_id=id, step_id='{:02d}'.format(step), it_id='{:02d}'.format(it)) full_resampled_path = tree.get('mmorf_warp_resampled_unbiased_full_T1head') premat_path = tree.get('T1_to_MNI_mat') warp_path = tree.get('mmorf_warp_resampled_unbiased') cmd = convertwarp(out=full_resampled_path, ref=img_ref_T1head_path, premat=premat_path, warp1=warp_path, cmdonly=True) cmd = ' '.join(cmd) + '\n' f.write(cmd) # job_ids[34] = submitJob(tag+'_'+task_name, log_dir, script=script_path, queue=cpuq, # wait_for=[job_ids[32]]) job_ids[34] = submitJob(script_path, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=[job_ids[32]], array_task=True) print('submitted: ' + task_name) # T2 head if mod['T2_head_key'] is not None: task_count += 1 task_name = '{:03d}_nlnT_concat_unbiased_warps_T2_head'.format(task_count) script_path = os.path.join(script_dir, task_name + '.sh') with open(script_path, 'w+') as f: for id in ls_ids: tree = tree.update(sub_id=id, step_id='{:02d}'.format(step), it_id='{:02d}'.format(it)) full_resampled_path = tree.get('mmorf_warp_resampled_unbiased_full_T2') premat_path = tree.get('T2_to_MNI_mat') warp_path = tree.get('mmorf_warp_resampled_unbiased') cmd = convertwarp(out=full_resampled_path, ref=img_ref_T1head_path, premat=premat_path, warp1=warp_path, cmdonly=True) cmd = ' '.join(cmd) + '\n' f.write(cmd) # job_ids[35] = submitJob(tag+'_'+task_name, log_dir, script=script_path, queue=cpuq, # wait_for=[job_ids[32]]) job_ids[35] = submitJob(script_path, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=[job_ids[32]], array_task=True) print('submitted: ' + task_name) # DTI if mod['T2_head_key'] is not None and mod['DTI_tensor_key'] is not None: task_count += 1 task_name = '{:03d}_nlnT_concat_unbiased_warps_DTI'.format(task_count) script_path = os.path.join(script_dir, task_name + '.sh') with open(script_path, 'w+') as f: for id in ls_ids: tree = tree.update(sub_id=id, step_id='{:02d}'.format(step), it_id='{:02d}'.format(it)) full_resampled_path = tree.get('mmorf_warp_resampled_unbiased_full_DTI') premat_path = tree.get('DTI_to_MNI_mat') warp_path = tree.get('mmorf_warp_resampled_unbiased') cmd = convertwarp(out=full_resampled_path, ref=img_ref_T1head_path, premat=premat_path, warp1=warp_path, cmdonly=True) cmd = ' '.join(cmd) + '\n' f.write(cmd) # job_ids[36] = submitJob(tag+'_'+task_name, log_dir, script=script_path, queue=cpuq, # wait_for=[job_ids[32]]) job_ids[36] = submitJob(script_path, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=[job_ids[32]], array_task=True) print('submitted: ' + task_name) # Apply warps to images # T1 brain if mod['T1_brain_key'] is not None: task_count += 1 task_name = '{:03d}_nlnT_warp_T1_brain'.format(task_count) script_path = os.path.join(script_dir, task_name + '.sh') with open(script_path, 'w+') as f: for id in ls_ids: tree = tree.update(sub_id=id, step_id='{:02d}'.format(step), it_id='{:02d}'.format(it)) img_path = tree.get(mod['T1_brain_key']) warped_path = tree.get('warped_T1brain') warp_path = tree.get('mmorf_warp_resampled_unbiased_full_T1brain') jobcmd = func_to_cmd(applyWarpWrapper, args=( img_path, img_ref_T1head_path, warped_path, warp_path, 'spline', False), tmp_dir=script_dir, kwargs=None, clean="never") f.write(jobcmd + '\n') # job_ids[37] = submitJob(tag+'_'+task_name, log_dir, script=script_path, queue=cpuq, # wait_for=[job_ids[33]]) job_ids[37] = submitJob(script_path, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=[job_ids[33]], array_task=True) print('submitted: ' + task_name) # T1 brain mask task_count += 1 task_name = '{:03d}_nlnT_warp_T1_brain_mask'.format(task_count) script_path = os.path.join(script_dir, task_name + '.sh') with open(script_path, 'w+') as f: for id in ls_ids: tree = tree.update(sub_id=id, step_id='{:02d}'.format(step), it_id='{:02d}'.format(it)) img_path = tree.get(mod['T1_brain_mask_key']) warped_path = tree.get('warped_T1brain_mask') warp_path = tree.get('mmorf_warp_resampled_unbiased_full_T1brain') jobcmd = func_to_cmd(applyWarpWrapper, args=(img_path, img_ref_T1head_path, warped_path, warp_path, 'nn', False), tmp_dir=script_dir, kwargs=None, clean="never") f.write(jobcmd + '\n') # job_ids[38] = submitJob(tag+'_'+task_name, log_dir, script=script_path, queue=cpuq, # wait_for=list(itemgetter(*[33, 34])(job_ids))) job_ids[38] = submitJob(script_path, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=list(itemgetter(*[33, 34])(job_ids)), array_task=True) print('submitted: ' + task_name) # T1 head if mod['T1_head_key'] is not None: task_count += 1 task_name = '{:03d}_nlnT_warp_T1_head'.format(task_count) script_path = os.path.join(script_dir, task_name + '.sh') if step == step_id[-1] and it == it_at_step_id[-1]: T1_head_key_temp = mod['T1_head_key'] else: T1_head_key_temp = 'T1_head_clamped' with open(script_path, 'w+') as f: for id in ls_ids: tree = tree.update(sub_id=id, step_id='{:02d}'.format(step), it_id='{:02d}'.format(it)) img_path = tree.get(T1_head_key_temp) warped_path = tree.get('warped_T1head') warp_path = tree.get('mmorf_warp_resampled_unbiased_full_T1head') jobcmd = func_to_cmd(applyWarpWrapper, args=( img_path, img_ref_T1head_path, warped_path, warp_path, 'spline', False), tmp_dir=script_dir, kwargs=None, clean="never") f.write(jobcmd + '\n') # job_ids[39] = submitJob(tag+'_'+task_name, log_dir, script=script_path, queue=cpuq, # wait_for=[job_ids[34]]) job_ids[39] = submitJob(script_path, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=[job_ids[34]], array_task=True) print('submitted: ' + task_name) # T2 head if mod['T2_head_key'] is not None: task_count += 1 task_name = '{:03d}_nlnT_warp_T2_head'.format(task_count) script_path = os.path.join(script_dir, task_name + '.sh') with open(script_path, 'w+') as f: for id in ls_ids: tree = tree.update(sub_id=id, step_id='{:02d}'.format(step), it_id='{:02d}'.format(it)) img_path = tree.get(mod['T2_head_key']) warped_path = tree.get('warped_T2head') warp_path = tree.get('mmorf_warp_resampled_unbiased_full_T2') jobcmd = func_to_cmd(applyWarpWrapper, args=( img_path, img_ref_T1head_path, warped_path, warp_path, 'spline', False), tmp_dir=script_dir, kwargs=None, clean="never") f.write(jobcmd + '\n') # job_ids[40] = submitJob(tag+'_'+task_name, log_dir, script=script_path, queue=cpuq, # wait_for=[job_ids[35]]) job_ids[40] = submitJob(script_path, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=[job_ids[35]], array_task=True) print('submitted: ' + task_name) # scalar DTI if mod['T2_head_key'] is not None and mod['DTI_scalar_key'] is not None: task_count += 1 task_name = '{:03d}_nlnT_warp_DTI_scalar'.format(task_count) script_path = os.path.join(script_dir, task_name + '.sh') with open(script_path, 'w+') as f: for id in ls_ids: tree = tree.update(sub_id=id, step_id='{:02d}'.format(step), it_id='{:02d}'.format(it)) img_path = tree.get(mod['DTI_scalar_key']) warped_path = tree.get('warped_DTIscalar') warp_path = tree.get('mmorf_warp_resampled_unbiased_full_DTI') jobcmd = func_to_cmd(applyWarpWrapper, args=( img_path, img_ref_T1head_path, warped_path, warp_path, 'spline', True), tmp_dir=script_dir, kwargs=None, clean="never") f.write(jobcmd + '\n') # job_ids[42] = submitJob(tag+'_'+task_name, log_dir, script=script_path, queue=cpuq, # wait_for=[job_ids[36]]) job_ids[42] = submitJob(script_path, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=[job_ids[36]], array_task=True) print('submitted: ' + task_name) # DTI tensor if mod['T2_head_key'] is not None and mod['DTI_tensor_key'] is not None: task_count += 1 task_name = '{:03d}_nlnT_warp_DTI_tensor'.format(task_count) script_path = os.path.join(script_dir, task_name + '.sh') with open(script_path, 'w+') as f: for id in ls_ids: tree = tree.update(sub_id=id, step_id='{:02d}'.format(step), it_id='{:02d}'.format(it)) cmd = 'vecreg -i ' + tree.get('data/DTI_tensor') + \ ' -r ' + img_ref_T1head_path + \ ' -o ' + tree.get('warped_DTItensor') + \ ' -w ' + tree.get('mmorf_warp_resampled_unbiased_full_DTI') + \ ' --interp=spline \n' f.write(cmd) # job_ids[43] = submitJob(tag+'_'+task_name, log_dir, script=script_path, queue=cpuq, # wait_for=[job_ids[36]]) job_ids[43] = submitJob(script_path, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=[job_ids[36]], array_task=True) print('submitted: ' + task_name) # Averaging transformed T1 brain images in template space if mod['T1_brain_key'] is not None: task_count += 1 task_name = '{:03d}_nlnT_average_T1_brain'.format(task_count) img_paths = [] for id in ls_ids: tree = tree.update(sub_id=id, step_id='{:02d}'.format(step), it_id='{:02d}'.format(it)) img_paths.append(tree.get('warped_T1brain')) nln_template_path = tree.get('T1_brain_nln_template') jobcmd = func_to_cmd(averageImages, args=(img_paths, nln_template_path, 'median', True), tmp_dir=script_dir, kwargs=None, clean="never") # job_ids[44] = submitJob(tag+'_'+task_name, log_dir, command=jobcmd, queue=cpuq, # wait_for=[job_ids[37]]) job_ids[44] = submitJob(jobcmd, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=[job_ids[37]], array_task=False) print('submitted: ' + task_name) # Averaging transformed T1 whole-head images in template space if mod['T1_head_key'] is not None: task_count += 1 task_name = '{:03d}_nlnT_average_T1_head'.format(task_count) img_paths = [] for id in ls_ids: tree = tree.update(sub_id=id, step_id='{:02d}'.format(step), it_id='{:02d}'.format(it)) img_paths.append(tree.get('warped_T1head')) nln_template_path = tree.get('T1_head_nln_template') jobcmd = func_to_cmd(averageImages, args=(img_paths, nln_template_path, 'median', True), tmp_dir=script_dir, kwargs=None, clean="never") # job_ids[45] = submitJob(tag+'_'+task_name, log_dir, command=jobcmd, queue=cpuq, # wait_for=[job_ids[39]]) job_ids[45] = submitJob(jobcmd, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=[job_ids[39]], array_task=False) print('submitted: ' + task_name) # Averaging transformed T2 whole-head images in template space if mod['T2_head_key'] is not None: task_count += 1 task_name = '{:03d}_nlnT_average_T2_head'.format(task_count) img_paths = [] for id in ls_ids: tree = tree.update(sub_id=id, step_id='{:02d}'.format(step), it_id='{:02d}'.format(it)) img_paths.append(tree.get('warped_T2head')) nln_template_path = tree.get('T2_head_nln_template') jobcmd = func_to_cmd(averageImages, args=(img_paths, nln_template_path, 'median', True), tmp_dir=script_dir, kwargs=None, clean="never") # job_ids[46] = submitJob(tag+'_'+task_name, log_dir, command=jobcmd, queue=cpuq, # wait_for=[job_ids[40]]) job_ids[46] = submitJob(jobcmd, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=[job_ids[40]], array_task=False) print('submitted: ' + task_name) # Averaging transformed DTI images in template space if mod['T2_head_key'] is not None and mod['DTI_scalar_key'] is not None: task_count += 1 task_name = '{:03d}_nlnT_average_DTI_scalar'.format(task_count) img_paths = [] for id in ls_ids: tree = tree.update(sub_id=id, step_id='{:02d}'.format(step), it_id='{:02d}'.format(it)) img_paths.append(tree.get('warped_DTIscalar')) nln_template_path = tree.get('DTI_nln_template') jobcmd = func_to_cmd(averageImages, args=(img_paths, nln_template_path, 'median', True), tmp_dir=script_dir, kwargs=None, clean="never") # job_ids[47] = submitJob(tag+'_'+task_name, log_dir, command=jobcmd, queue=cpuq, # wait_for=[job_ids[42]]) job_ids[47] = submitJob(jobcmd, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=[job_ids[42]], array_task=False) print('submitted: ' + task_name) # Averaging transformed DTI tensors in template space if mod['T2_head_key'] is not None and mod['DTI_tensor_key'] is not None: task_count += 1 task_name = '{:03d}_nlnT_average_DTI_tensor'.format(task_count) export_paths = [] cmd = mmorf_exec_cmd + ' tensor_average' + ' -i ' for id in ls_ids: tree = tree.update(sub_id=id, step_id='{:02d}'.format(step), it_id='{:02d}'.format(it)) cmd += tree.get('warped_DTItensor') + ' ' export_paths.append(tree.get('warped_DTItensor')) cmd += '-o ' + tree.get('DTI_tensor_nln_template') export_paths.append(tree.get('DTI_tensor_nln_template')) common_path = os.path.commonpath(export_paths) export_var_str = {'SINGULARITY_BIND': '"SINGULARITY_BIND=' + ','.join([common_path]) + '"'} # job_ids[48] = submitJob(tag+'_'+task_name, log_dir, command=cmd, queue=cpuq, # export_var=export_var_str['SINGULARITY_BIND'], wait_for=[job_ids[43]]) job_ids[48] = submitJob(cmd, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=[job_ids[43]], array_task=False, export_var=[export_var_str['SINGULARITY_BIND']]) print('submitted: ' + task_name) # Averaging transformed T1 brain masks in template space task_count += 1 task_name = '{:03d}_nlnT_average_T1_brain_mask'.format(task_count) img_paths = [] for id in ls_ids: tree = tree.update(sub_id=id, step_id='{:02d}'.format(step), it_id='{:02d}'.format(it)) img_paths.append(tree.get('warped_T1brain_mask')) nln_template_path = tree.get('T1_brain_mask_nln_template') jobcmd = func_to_cmd(averageImages, args=(img_paths, nln_template_path, 'average', False), tmp_dir=script_dir, kwargs=None, clean="never") # job_ids[49] = submitJob(tag+'_'+task_name, log_dir, command=jobcmd, queue=cpuq, # wait_for=[job_ids[38]]) job_ids[49] = submitJob(jobcmd, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=[job_ids[38]], array_task=False) print('submitted: ' + task_name) task_count += 1 task_name = '{:03d}_nlnT_average_T1_brain_mask_weighted'.format(task_count) jobcmd = 'fslmaths ' + nln_template_path + ' -bin -mul 7 -add 1 -inm 1 ' + tree.get( 'T1_brain_mask_weighted_nln_template') # job_ids[50] = submitJob(tag+'_'+task_name, log_dir, command=jobcmd, queue=cpuq, # wait_for=[job_ids[49]]) job_ids[50] = submitJob(jobcmd, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=[job_ids[49]], array_task=False) print('submitted: ' + task_name) # Convergence monitoring # Difference in average warp between consecutive iterations if it_total > 1: task_count += 1 task_name = '{:03d}_nlnT_average_warp_diff'.format(task_count) tree = tree.update(step_id='{:02d}'.format(step), it_id='{:02d}'.format(it)) avgwarp_curr_it_path = tree.get('avg_warp') avgmask_curr_it_path = tree.get('T1_brain_mask_nln_template') jobcmd = func_to_cmd(RMSdifference, args=( avgwarp_curr_it_path, avgwarp_prev_it_path, avgmask_curr_it_path, avgmask_prev_it_path, tree.get('delta_avgwarp_output')), tmp_dir=script_dir, kwargs=None, clean="never") # job_ids[51] = submitJob(tag+'_'+task_name, log_dir, command=jobcmd, queue=cpuq, # wait_for=list(itemgetter(*[29, 49])(job_ids))) job_ids[51] = submitJob(jobcmd, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=list(itemgetter(*[29, 49])(job_ids)), array_task=False) print('submitted: ' + task_name) # Difference in template intensity between consecutive iterations if it_total > 1: task_count += 1 task_name = '{:03d}_nlnT_temp_intensity_diff'.format(task_count) tree = tree.update(step_id='{:02d}'.format(step), it_id='{:02d}'.format(it)) nln_template_curr_it_path = tree.get('T1_brain_nln_template') avgmask_curr_it_path = tree.get('T1_brain_mask_nln_template') jobcmd = func_to_cmd(RMSdifference, args=( nln_template_curr_it_path, img_ref_T1brain_path, avgmask_curr_it_path, avgmask_prev_it_path, tree.get('delta_intensity_output')), tmp_dir=script_dir, kwargs=None, clean="never") # job_ids[52] = submitJob(tag+'_'+task_name, log_dir, command=jobcmd, queue=cpuq, # wait_for=list(itemgetter(*[44,49])(job_ids))) job_ids[52] = submitJob(jobcmd, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=list(itemgetter(*[44, 49])(job_ids)), array_task=False) print('submitted: ' + task_name) # RMS of intensity standard deviation of individual images in template space task_count += 1 task_name = '{:03d}_nlnT_temp_intensity_sd'.format(task_count) img_paths = [] for i, id in enumerate(ls_ids): tree = tree.update(sub_id=id, step_id='{:02d}'.format(step), it_id='{:02d}'.format(it)) img_paths.append(tree.get('warped_T1brain')) jobcmd = func_to_cmd(RMSstandardDeviation, args=(img_paths, tree.get('T1_head_nln_template'), tree.get('T1_brain_mask_nln_template'), tree.get('T1_head_SD_nln_template'), tree.get('sd_intensity_output')), tmp_dir=script_dir, kwargs=None, clean="never") # job_ids[53] = submitJob(tag+'_'+task_name, log_dir, command=jobcmd, queue=cpuq, # wait_for=list(itemgetter(*[45, 49])(job_ids))) job_ids[53] = submitJob(jobcmd, tag + '_' + task_name, log_dir, queue=cpuq, wait_for=list(itemgetter(*[45, 49])(job_ids)), array_task=False) print('submitted: ' + task_name)