diff --git a/unit_tests/fdt/dtifit/feedsRun b/unit_tests/fdt/dtifit/feedsRun index 15f5528adf139c0e394c2bd8e1698b047e49de36..41565733b68f280a7131b8f71651efcc422ff812 100755 --- a/unit_tests/fdt/dtifit/feedsRun +++ b/unit_tests/fdt/dtifit/feedsRun @@ -18,7 +18,8 @@ def gen_data(): :yield: directories containing reference noiseless data with the eigen-vectors and eigen-values used to generate the data """ - directory = 'dti' + directory = [''] * 10 + directory[0] = 'dti' # eigen vectors vary along x-axis eigen_vectors = np.array([ @@ -36,9 +37,9 @@ def gen_data(): # eigen-values vary along y-axis # eigen-values can not be the same or eigen-vectors will be ill-defined eigen_values = np.array([ - [1.2, 1., 0.8], + [1.2, 1., 0.4], [1., 0.5, 0.], - [2., 1.9, 1.8], + [0.8, .6, .3], ])[None, :, None, :, None] * 1e-3 # S0 varies along the z-axis @@ -50,11 +51,11 @@ def gen_data(): diffusion_tensor = (eigen_values[..., None] * eigen_vectors[..., None, :] * eigen_vectors[..., :, None]).sum(-3) for multi_shell in (False, True): # we should get the same results for single or multi-shell data - directory += '_multi' if multi_shell else '_single' - for kurt in (0, ):#1, 2): # 0: no kurtosis; 1: single kurtosis; 2: parallel and perpendicular kurtosis + directory[1] = 'multi' if multi_shell else 'single' + for kurt in (0, 1, 2): # 0: no kurtosis; 1: single kurtosis; 2: parallel and perpendicular kurtosis if kurt != 0 and not multi_shell: continue - directory += ['', '_kurt', '_kurtdir'][kurt] + directory[2] = ['', 'kurt', 'kurtdir'][kurt] bvals = np.full(50, 1000.) if multi_shell: @@ -66,93 +67,154 @@ def gen_data(): assert bvecs.shape == (50, 3), f"GPS produced bvecs-file with shape {bvecs.shape} rather than the expected (50, 3)" testing.assert_allclose((bvecs ** 2).sum(-1), 1.) - kurt_val = np.array([ - [0, 0, 0.], - [1., 1., 1.], - [1., 0.5, 0.5], - ][kurt]) - data = S0[..., None] * np.exp(np.sum( - -(bvals[:, None] * eigen_values[..., None, :, 0] + - kurt_val * eigen_values[..., None, :, 0] ** 2 * bvals[:, None] ** 2) * - np.sum(bvecs[:, None, :] * eigen_vectors[..., None, :, :], -1) ** 2, axis=-1 - )) + if kurt == 2: + beig = bvals[:, None] * eigen_values[..., None, :, 0] + beig[..., 0] -= 0.1 * (eigen_values[..., None, 0, 0] * bvals) ** 2 / 6 + beig[..., 1:] -= 0.1 * (np.mean(eigen_values[..., None, 1:, 0], -1) * bvals)[..., None] ** 2 / 6 + data = S0[..., None] * np.exp(np.sum( + -beig * + np.sum(bvecs[:, None, :] * eigen_vectors[..., None, :, :], -1) ** 2, axis=-1 + )) + else: + data = S0[..., None] * np.exp(np.sum( + -bvals[:, None] * eigen_values[..., None, :, 0] * + np.sum(bvecs[:, None, :] * eigen_vectors[..., None, :, :], -1) ** 2, axis=-1 + )) * np.exp((0 if kurt == 0 else 0.1) * (np.mean(eigen_values[..., 0], -1)[..., None] * bvals) ** 2 / 6.) if (data / S0[..., None]).min() < 0.01: raise ValueError("dtifit rounds attenuations below 0.01 up to 0.01, " "so these low values should not be in the test data") for flipped in (False, True): - if flipped: - directory += '_flipped' + directory[3] = 'flipped' if flipped else '' + + fdir = '_'.join([d for d in directory if len(d) > 0]) - if not os.path.isdir(directory): - os.mkdir(directory) + if not os.path.isdir(fdir): + os.mkdir(fdir) if flipped: - np.savetxt(f'{directory}/bvals', bvals[:, None]) - np.savetxt(f'{directory}/bvecs', bvecs.T) + np.savetxt(f'{fdir}/bvals', bvals[:, None]) + np.savetxt(f'{fdir}/bvecs', bvecs.T) else: - np.savetxt(f'{directory}/bvals', bvals[None, :]) - np.savetxt(f'{directory}/bvecs', bvecs) + np.savetxt(f'{fdir}/bvals', bvals[None, :]) + np.savetxt(f'{fdir}/bvecs', bvecs) affine = np.eye(4) * 1.25 affine[-1, -1] = 1. for idx in range(3): - nib.Nifti1Image(eigen_vectors[..., idx, :], affine=affine).to_filename(f'{directory}/ref_V{idx + 1}.nii.gz') - nib.Nifti1Image(eigen_values[..., idx, 0], affine=affine).to_filename(f'{directory}/ref_L{idx + 1}.nii.gz') - nib.Nifti1Image(S0, affine=affine).to_filename(f'{directory}/ref_S0.nii.gz') - nib.Nifti1Image(data, affine=affine).to_filename(f'{directory}/ref_data.nii.gz') - nib.Nifti1Image(np.ones(data.shape[:3], dtype=int), affine=affine).to_filename(f'{directory}/nodif_brain_mask.nii.gz') + nib.Nifti1Image(eigen_vectors[..., idx, :], affine=affine).to_filename(f'{fdir}/ref_V{idx + 1}.nii.gz') + nib.Nifti1Image(eigen_values[..., idx, 0], affine=affine).to_filename(f'{fdir}/ref_L{idx + 1}.nii.gz') + MD = eigen_values[..., 0].mean(-1) + FA = np.sqrt(1.5 * ((eigen_values[..., 0] - MD[..., None]) ** 2).sum(-1) / (eigen_values[..., 0] ** 2).sum(-1)) + mode = 3 * np.sqrt(6) * np.prod(eigen_values[..., 0] - MD[..., None], -1) / ((eigen_values[..., 0] - MD[..., None]) ** 2).sum(-1) ** 1.5 + print('mode', mode[0, :, 0]) + nib.Nifti1Image(MD, affine=affine).to_filename(f'{fdir}/ref_MD.nii.gz') + nib.Nifti1Image(FA, affine=affine).to_filename(f'{fdir}/ref_FA.nii.gz') + nib.Nifti1Image(mode, affine=affine).to_filename(f'{fdir}/ref_MO.nii.gz') + + nib.Nifti1Image(S0, affine=affine).to_filename(f'{fdir}/ref_S0.nii.gz') + nib.Nifti1Image(data, affine=affine).to_filename(f'{fdir}/ref_data.nii.gz') + nib.Nifti1Image(np.ones(data.shape[:3], dtype=int), affine=affine).to_filename(f'{fdir}/nodif_brain_mask.nii.gz') tensor_components = diffusion_tensor[:, :, :, [0, 0, 0, 1, 1, 2], [0, 1, 2, 1, 2, 2]] - nib.Nifti1Image(tensor_components, affine=affine).to_filename(f'{directory}/ref_tensor.nii.gz') - yield directory, kurt - - -def fit_data(directory, kurt=0): - for wls in (False, True): # in this noise-free data the --wls flag should not matter - base_output = f'{directory}/dti{"_wls" if wls else ""}' - cmd = [ - 'dtifit', - '-k', f'{directory}/ref_data.nii.gz', - '-m', f'{directory}/nodif_brain_mask.nii.gz', - '-r', f'{directory}/bvecs', - '-b', f'{directory}/bvals', - '-o', base_output, - '--sse', - '--save_tensor', - ] - if wls: - cmd += ['--wls'] - if kurt == 1: - cmd += ['--kurt'] - if kurt == 2: - cmd += ['--kurt'] - run(cmd, check=True) - yield base_output - - -for directory, kurt in gen_data(): - for base_output in fit_data(directory, kurt): - def compare(name): - ref = nib.load(f'{directory}/ref_{name}.nii.gz').get_fdata() - fit = nib.load(f'{base_output}_{name}.nii.gz').get_fdata() - assert ref.shape == fit.shape, f'incorrect NIFTI image shape for {name}' - testing.assert_allclose(ref, fit, atol=1e-8, - err_msg=f'mismatch in {name}') - - compare('S0') - compare('tensor') - for idx in (3, 2, 1): - compare(f'L{idx}') - - ref = nib.load(f'{directory}/ref_V{idx}.nii.gz').get_fdata() - fit = nib.load(f'{base_output}_V{idx}.nii.gz').get_fdata() - assert ref.shape == fit.shape - inner = (ref * fit).sum(-1) - testing.assert_allclose(abs(inner), 1.) - - sse = nib.load(f'{base_output}_sse.nii.gz').get_fdata() - testing.assert_allclose(sse, 0., atol=1e-8) + nib.Nifti1Image(tensor_components, affine=affine).to_filename(f'{fdir}/ref_tensor.nii.gz') + yield fdir, multi_shell, kurt + + +def fit_data(directory): + for kurtdir in (False, True): + for main_kurt in (False, True): + for wls in (False, True): # in this noise-free data the --wls flag should not matter + cmd = [ + 'dtifit', + '-k', f'{directory}/ref_data.nii.gz', + '-m', f'{directory}/nodif_brain_mask.nii.gz', + '-r', f'{directory}/bvecs', + '-b', f'{directory}/bvals', + '--sse', + '--save_tensor', + ] + base_output = f'{directory}/dti' + if wls: + cmd += ['--wls'] + base_output += '_wls' + if main_kurt: + cmd += ['--kurt'] + base_output += '_kurt' + if kurtdir: + cmd += ['--kurtdir'] + base_output += '_kurtdir' + cmd.extend(['-o', base_output]) + run(cmd, check=True) + yield base_output, main_kurt, kurtdir + + +for directory, multi_shell, kurt in gen_data(): + for base_output, fkurt, fkurtdir in fit_data(directory): + print(base_output) + if ( + (kurt == 0 and (multi_shell or not (fkurt or fkurtdir))) or + kurt == 1 and (fkurt or fkurtdir) or + kurt == 2 and fkurtdir + ): + print('This fit should be valid') + def compare(name): + ref = nib.load(f'{directory}/ref_{name}.nii.gz').get_fdata() + fit = nib.load(f'{base_output}_{name}.nii.gz').get_fdata() + assert ref.shape == fit.shape, f'incorrect NIFTI image shape for {name}' + testing.assert_allclose(ref, fit, atol=1e-6, + err_msg=f'mismatch in {name}') + + for idx in (1, 2, 3): + compare(f'L{idx}') + + ref = nib.load(f'{directory}/ref_V{idx}.nii.gz').get_fdata() + fit = nib.load(f'{base_output}_V{idx}.nii.gz').get_fdata() + assert ref.shape == fit.shape + inner = (ref * fit).sum(-1) + testing.assert_allclose(abs(inner), 1.) + + compare('S0') + compare('FA') + compare('MO') + compare('MD') + compare('tensor') + + sse = nib.load(f'{base_output}_sse.nii.gz').get_fdata() + testing.assert_allclose(sse, 0., atol=1e-8) + + if fkurt: + kurt_fit = nib.load(f'{base_output}_kurt.nii.gz').get_fdata() + if kurt != 2: + testing.assert_allclose(kurt_fit, 0. if kurt == 0 else 0.1, rtol=1e-5, atol=1e-6) + else: + assert not os.path.isfile(f'{base_output}_kurt.nii.gz') + if fkurtdir: + kurt_para = nib.load(f'{base_output}_kurt_para.nii.gz').get_fdata() + kurt_perp = nib.load(f'{base_output}_kurt_perp.nii.gz').get_fdata() + if kurt == 0: + testing.assert_allclose(kurt_para, 0., atol=1e-5) + testing.assert_allclose(kurt_perp, 0., atol=1e-5) + elif kurt == 1: + # k_para and k_perp = 1 is different from MK=1... + assert not np.allclose(kurt, 0.1, rtol=0.01) + assert not np.allclose(kurt, 0.1, rtol=0.01) + elif kurt == 2: + testing.assert_allclose(kurt_para, 0.1, rtol=1e-5) + testing.assert_allclose(kurt_perp, 0.05, rtol=1e-5) + # for kurt == 1; kurt_para + else: + assert not os.path.isfile(f'{base_output}_kurt_para.nii.gz') + assert not os.path.isfile(f'{base_output}_kurt_perp.nii.gz') + else: + print('This fit should be invalid') + ref = nib.load(f'{directory}/ref_L1.nii.gz').get_fdata() + fit = nib.load(f'{base_output}_L1.nii.gz').get_fdata() + assert not np.allclose(ref, fit, rtol=0.01) + + + +