Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • paulmc/fslpy
  • ndcn0236/fslpy
  • seanf/fslpy
3 results
Show changes
Showing
with 678 additions and 73 deletions
......@@ -17,7 +17,7 @@ import fsl.transform.affine as affine
import fsl.utils.image.resample as resample
import fsl.utils.cache as cache
from . import (testdir, make_random_mask)
from fsl.tests import (testdir, make_random_mask)
pytestmark = pytest.mark.fsltest
......@@ -41,17 +41,16 @@ _atlases = cache.Cache()
def _get_atlas(atlasID, res, summary=False):
atlas = _atlases.get((atlasID, res, summary), default=None)
if atlas is None:
atlas = fslatlases.loadAtlas(atlasID,
loadSummary=summary,
resolution=res)
# We need some atlases to be loaded into memory,
# so we can use boolean-mask-based indexing
if summary or atlasID in ('talairach', 'striatum-structural',
'jhu-labels', 'smatt'):
kwargs = {}
else:
kwargs = {'loadData' : False,
'calcRange' : False}
atlas.data
atlas = fslatlases.loadAtlas(atlasID,
loadSummary=summary,
resolution=res,
**kwargs)
_atlases.put((atlasID, res, summary), atlas)
return atlas
......@@ -85,7 +84,7 @@ def _get_zero_mask(aimg):
elif isinstance(aimg, fslatlases.ProbabilisticAtlas):
# Keep memory usage down
zmask = np.ones(aimg.shape[:3], dtype=np.bool)
zmask = np.ones(aimg.shape[:3], dtype=bool)
for vol in range(aimg.shape[-1]):
zmask = np.logical_and(zmask, aimg[..., vol] == 0)
......@@ -284,7 +283,7 @@ def _eval_mask_query(atlas, query, qtype, qin):
rmask = resample.resample(
mask, atlas.shape[:3], dtype=np.float32, order=0)[0]
rmask = np.array(rmask, dtype=np.bool)
rmask = np.array(rmask, dtype=bool)
def evalLabel():
......
......@@ -18,8 +18,11 @@ import fsl.utils.bids as fslbids
def test_parseFilename():
with pytest.raises(ValueError):
fslbids.parseFilename('bad.txt')
badtests = ['bad_file.txt']
for test in badtests:
with pytest.raises(ValueError):
fslbids.parseFilename(test)
tests = [
('sub-01_ses-01_t1w.nii.gz',
......@@ -62,12 +65,12 @@ def test_isBIDSFile():
Path('sub-01_ses-01_t1w.nii'),
Path('sub-01_ses-01_t1w.json'),
Path('a-1_b-2_c-3_d-4_e.nii.gz'),
Path('sub-01_ses-01_t1w.txt'),
]
badfiles = [
Path('sub-01_ses-01.nii.gz'),
Path('sub-01_ses-01_t1w'),
Path('sub-01_ses-01_t1w.'),
Path('sub-01_ses-01_t1w.txt'),
Path('sub_ses-01_t1w.nii.gz'),
Path('sub-01_ses_t1w.nii.gz'),
]
......@@ -105,3 +108,56 @@ def test_loadMetadata():
assert fslbids.loadMetadata(t1) == {**meta2, **meta1}
json4.write_text(json.dumps(meta4))
assert fslbids.loadMetadata(t1) == {**meta4, **meta2, **meta1}
def test_loadMetadata_control_characters():
dd = Path('dataset_description.json')
t1 = Path('sub-01/func/sub-01_task-stim_bold.nii.gz')
json1 = Path('sub-01/func/sub-01_task-stim_bold.json')
meta1 = {"a" : "1", "b" : "2\x19\x20"}
smeta1 = '{"a" : "1", "b" : "2\x19\x20"}'
with tempdir():
dd.touch()
Path(op.dirname(t1)).mkdir(parents=True)
t1.touch()
assert fslbids.loadMetadata(t1) == {}
json1.write_text(smeta1)
assert fslbids.loadMetadata(t1) == meta1
def test_loadMetadata_symlinked():
ddreal = Path('a')
t1real = Path('b')
j1real = Path('c')
j2real = Path('d')
j3real = Path('e')
j4real = Path('f')
dd = Path('data/dataset_description.json')
t1 = Path('data/sub-01/func/sub-01_task-stim_bold.nii.gz')
json1 = Path('data/sub-01/func/sub-01_task-stim_bold.json')
json2 = Path('data/sub-01/sub-01_bold.json')
json3 = Path('data/sub-01_t1w.json')
json4 = Path('data/sub-01/task-stim_bold.json')
meta1 = {'a' : '1', 'b' : '2'}
meta2 = {'a' : '10', 'c' : '3'}
meta3 = {'a' : '109', 'b' : '99'}
meta4 = {'c' : '9', 'd' : '5'}
with tempdir():
ddreal.touch()
t1real.touch()
j1real.write_text(json.dumps(meta1))
j2real.write_text(json.dumps(meta2))
j3real.write_text(json.dumps(meta3))
j4real.write_text(json.dumps(meta4))
Path(op.dirname(t1)).mkdir(parents=True)
dd .symlink_to(op.join('..', ddreal))
t1 .symlink_to(op.join('..', '..', '..', t1real))
json1.symlink_to(op.join('..', '..', '..', j1real))
json2.symlink_to(op.join('..', '..', j2real))
json3.symlink_to(op.join('..', j3real))
json4.symlink_to(op.join('..', '..', j4real))
assert fslbids.loadMetadata(t1) == {**meta4, **meta2, **meta1}
File moved
......@@ -146,3 +146,22 @@ def test_lru():
c[3]
c[4]
assert len(c) == 3
def test_accessors():
c = cache.Cache(maxsize=3)
c[0] = '0'
c[1] = '1'
c[2] = '2'
c[3] = '3'
assert list(c.keys()) == [ 1, 2, 3]
assert list(c.values()) == ['1', '2', '3']
assert list(c.items()) == [(1, '1'), (2, '2'), (3, '3')]
assert 0 not in c
assert 1 in c
assert 2 in c
assert 3 in c
from fsl.data import cifti
import os.path as op
import numpy as np
import nibabel as nib
from numpy import testing
import fsl.tests as tests
from nibabel.cifti2 import cifti2_axes
def volumetric_brain_model():
mask = np.random.randint(2, size=(10, 10, 10)) > 0
return cifti2_axes.BrainModelAxis.from_mask(mask, affine=np.eye(4))
def surface_brain_model():
mask = np.random.randint(2, size=100) > 0
return cifti2_axes.BrainModelAxis.from_mask(mask, name='cortex')
def volumetric_parcels(return_mask=False):
mask = np.random.randint(5, size=(10, 10, 10))
axis = cifti2_axes.ParcelsAxis(
[f'vol_{idx}' for idx in range(1, 5)],
voxels=[np.stack(np.where(mask == idx), axis=-1) for idx in range(1, 5)],
vertices=[{} for _ in range(1, 5)],
volume_shape=mask.shape,
affine=np.eye(4),
)
if return_mask:
return axis, mask
else:
return axis
def surface_parcels(return_mask=False):
mask = np.random.randint(5, size=100)
axis = cifti2_axes.ParcelsAxis(
[f'surf_{idx}' for idx in range(1, 5)],
voxels=[np.zeros((0, 3), dtype=int) for _ in range(1, 5)],
vertices=[{'CIFTI_STRUCTURE_CORTEX': np.where(mask == idx)[0]} for idx in range(1, 5)],
nvertices={'CIFTI_STRUCTURE_CORTEX': 100},
)
if return_mask:
return axis, mask
else:
return axis
def gen_data(axes):
return np.random.randn(*(5 if ax is None else len(ax) for ax in axes))
def test_read_gifti():
testdir = op.join(op.dirname(__file__), 'testdata')
shapefile = op.join(testdir, 'example.shape.gii')
ref_data = nib.load(shapefile).darrays[0].data
data = cifti.load(shapefile)
assert isinstance(data, cifti.DenseCifti)
assert data.arr.shape == (642, )
testing.assert_equal(data.arr, ref_data)
testing.assert_equal(data.brain_model_axis.vertex, np.arange(642))
assert len(data.brain_model_axis.nvertices) == 1
assert data.brain_model_axis.nvertices['CIFTI_STRUCTURE_OTHER'] == 642
data = cifti.load(shapefile, mask_values=(ref_data[0], ))
assert isinstance(data, cifti.DenseCifti)
assert data.arr.shape == (np.sum(ref_data != ref_data[0]), )
testing.assert_equal(data.arr, ref_data[ref_data != ref_data[0]])
testing.assert_equal(data.brain_model_axis.vertex, np.where(ref_data != ref_data[0])[0])
assert len(data.brain_model_axis.nvertices) == 1
assert data.brain_model_axis.nvertices['CIFTI_STRUCTURE_OTHER'] == 642
cifti.load(op.join(testdir, 'example'))
def test_read_nifti():
mask = np.random.randint(2, size=(10, 10, 10)) > 0
values = np.random.randn(10, 10, 10)
for mask_val in (0, np.nan):
values[~mask] = mask_val
affine = np.concatenate((np.random.randn(3, 4), np.array([0, 0, 0, 1])[None, :]), axis=0)
with tests.testdir():
nib.Nifti1Image(values, affine).to_filename("masked_image.nii.gz")
data = cifti.load("masked_image")
assert isinstance(data, cifti.DenseCifti)
testing.assert_equal(data.arr, values[mask])
testing.assert_allclose(data.brain_model_axis.affine, affine)
assert len(data.brain_model_axis.nvertices) == 0
def check_io(data: cifti.Cifti, extension):
with tests.testdir():
data.save("test")
assert op.isfile(f'test.{extension}.nii')
loaded = cifti.load("test")
if data.arr.ndim == 1:
testing.assert_equal(data.arr, loaded.arr[0])
assert data.axes == loaded.axes[1:]
else:
testing.assert_equal(data.arr, loaded.arr)
assert data.axes == loaded.axes
def test_io_cifti():
for cifti_class, cifti_type, main_axis_options in (
(cifti.DenseCifti, 'd', (volumetric_brain_model(), surface_brain_model(),
volumetric_brain_model() + surface_brain_model())),
(cifti.ParcelCifti, 'p', (volumetric_parcels(), surface_parcels(),
volumetric_parcels() + surface_parcels())),
):
for main_axis in main_axis_options:
with tests.testdir():
data_1d = cifti_class(gen_data([main_axis]), [main_axis])
check_io(data_1d, f'{cifti_type}scalar')
connectome = cifti_class(gen_data([main_axis, main_axis]), (main_axis, main_axis))
check_io(connectome, f'{cifti_type}conn')
scalar_axis = cifti2_axes.ScalarAxis(['A', 'B', 'C'])
scalar = cifti_class(gen_data([scalar_axis, main_axis]), (scalar_axis, main_axis))
check_io(scalar, f'{cifti_type}scalar')
label_axis = cifti2_axes.LabelAxis(['A', 'B', 'C'], {1: ('some parcel', (1, 0, 0, 1))})
label = cifti_class(gen_data([label_axis, main_axis]), (label_axis, main_axis))
check_io(label, f'{cifti_type}label')
series_axis = cifti2_axes.SeriesAxis(10, 3, 50, unit='HERTZ')
series = cifti_class(gen_data([series_axis, main_axis]), (series_axis, main_axis))
check_io(series, f'{cifti_type}tseries')
if cifti_type == 'd':
parcel_axis = surface_parcels()
dpconn = cifti_class(gen_data([parcel_axis, main_axis]), (parcel_axis, main_axis))
check_io(dpconn, 'dpconn')
else:
dense_axis = surface_brain_model()
pdconn = cifti_class(gen_data([dense_axis, main_axis]), (dense_axis, main_axis))
check_io(pdconn, 'pdconn')
def test_extract_dense():
vol_bm = volumetric_brain_model()
surf_bm = surface_brain_model()
for bm in (vol_bm + surf_bm, surf_bm + vol_bm):
for ndim, no_other_axis in ((1, True), (2, False), (2, True)):
if ndim == 1:
data = cifti.DenseCifti(gen_data([bm]), [bm])
else:
scl = cifti2_axes.ScalarAxis(['A', 'B', 'C'])
data = cifti.DenseCifti(gen_data([scl, bm]),
[None if no_other_axis else scl, bm])
# extract volume
ref_arr = data.arr[..., data.brain_model_axis.volume_mask]
vol_image = data.to_image(fill=np.nan)
if ndim == 1:
assert vol_image.shape == data.brain_model_axis.volume_shape
else:
assert vol_image.shape == data.brain_model_axis.volume_shape + (3, )
assert np.isfinite(vol_image.data).sum() == len(vol_bm) * (3 if ndim == 2 else 1)
testing.assert_equal(vol_image.data[tuple(vol_bm.voxel.T)], ref_arr.T)
from_image = cifti.DenseCifti.from_image(vol_image)
assert from_image.brain_model_axis == vol_bm
testing.assert_equal(from_image.arr, ref_arr)
# extract surface
ref_arr = data.arr[..., data.brain_model_axis.surface_mask]
mask, surf_data = data.surface('cortex', partial=True)
assert surf_data.shape[-1] < 100
testing.assert_equal(ref_arr, surf_data)
testing.assert_equal(surf_bm.vertex, mask)
surf_data_full = data.surface('cortex', fill=np.nan)
assert surf_data_full.shape[-1] == 100
mask_full = np.isfinite(surf_data_full)
if ndim == 2:
assert (mask_full.any(0) == mask_full.all(0)).all()
mask_full = mask_full[0]
assert mask_full.sum() == len(surf_bm)
assert mask_full[..., mask].sum() == len(surf_bm)
testing.assert_equal(surf_data_full[..., mask_full], ref_arr)
def test_extract_parcel():
vol_parcel, vol_mask = volumetric_parcels(return_mask=True)
surf_parcel, surf_mask = surface_parcels(return_mask=True)
parcel = vol_parcel + surf_parcel
for ndim, no_other_axis in ((1, True), (2, False), (2, True)):
if ndim == 1:
data = cifti.ParcelCifti(gen_data([parcel]), [parcel])
else:
scl = cifti2_axes.ScalarAxis(['A', 'B', 'C'])
data = cifti.ParcelCifti(gen_data([scl, parcel]),
[None if no_other_axis else scl, parcel])
# extract volume
vol_image = data.to_image(fill=np.nan)
if ndim == 1:
assert vol_image.shape == data.parcel_axis.volume_shape
else:
assert vol_image.shape == data.parcel_axis.volume_shape + (3, )
assert np.isfinite(vol_image.data).sum() == np.sum(vol_mask != 0) * (3 if ndim == 2 else 1)
if ndim == 1:
testing.assert_equal(vol_mask != 0, np.isfinite(vol_image.data))
for idx in range(1, 5):
testing.assert_allclose(vol_image.data[vol_mask == idx], data.arr[..., idx - 1])
else:
for idx in range(3):
testing.assert_equal(vol_mask != 0, np.isfinite(vol_image.data[..., idx]))
for idx2 in range(1, 5):
testing.assert_allclose(vol_image.data[vol_mask == idx2, idx], data.arr[idx, idx2 - 1])
# extract surface
mask, surf_data = data.surface('cortex', partial=True)
assert surf_data.shape[-1] == (surf_mask != 0).sum()
assert (surf_mask[mask] != 0).all()
print(data.arr)
for idx in range(1, 5):
if ndim == 1:
testing.assert_equal(surf_data.T[surf_mask[mask] == idx], data.arr[idx + 3])
else:
for idx2 in range(3):
testing.assert_equal(surf_data.T[surf_mask[mask] == idx, idx2], data.arr[idx2, idx + 3])
surf_data_full = data.surface('cortex', partial=False)
assert surf_data_full.shape[-1] == 100
if ndim == 1:
testing.assert_equal(np.isfinite(surf_data_full), surf_mask != 0)
for idx in range(1, 5):
testing.assert_equal(surf_data_full.T[surf_mask == idx], data.arr[idx + 3])
else:
for idx2 in range(3):
testing.assert_equal(np.isfinite(surf_data_full)[idx2], (surf_mask != 0))
for idx in range(1, 5):
testing.assert_equal(surf_data_full.T[surf_mask == idx, idx2], data.arr[idx2, idx + 3])
def test_brainstructure():
for primary in ['cortex', 'cerebellum']:
for secondary in [None, 'white', 'pial']:
for gtype in [None, 'volume', 'surface']:
for orientation in ['left', 'right', 'both']:
bst = cifti.BrainStructure(primary, secondary, orientation, gtype)
print(bst.cifti)
assert bst.cifti == 'CIFTI_STRUCTURE_%s%s' % (primary.upper(), '' if orientation == 'both' else '_' + orientation.upper())
assert bst.gifti['AnatomicalStructurePrimary'][:len(primary)] == primary.capitalize()
assert len(bst.gifti) == (1 if secondary is None else 2)
if secondary is not None:
assert bst.gifti['AnatomicalStructureSecondary'] == secondary.capitalize()
assert bst == cifti.BrainStructure(primary, secondary, orientation, gtype)
assert bst == bst
assert bst != cifti.BrainStructure('Thalamus', secondary, orientation, gtype)
if secondary is None:
assert bst == cifti.BrainStructure(primary, 'midplane', orientation, gtype)
else:
assert bst != cifti.BrainStructure(primary, 'midplane', orientation, gtype)
if (gtype == 'volume' and primary == 'cortex') or (gtype == 'surface' and primary != 'cortex'):
assert cifti.BrainStructure.from_string(bst.cifti) != bst
else:
assert cifti.BrainStructure.from_string(bst.cifti) == bst
assert cifti.BrainStructure.from_string(bst.cifti).secondary is None
File moved
......@@ -22,6 +22,9 @@ import fsl.data.dicom as fsldcm
import fsl.utils.tempdir as tempdir
from fsl.tests import mockFSLDIR, touch
datadir = op.join(op.dirname(__file__), 'testdata')
......@@ -29,15 +32,16 @@ pytestmark = pytest.mark.dicomtest
@contextlib.contextmanager
def install_dcm2niix(version='1.0.20190902'):
def install_dcm2niix(version='1.0.20220720'):
filenames = {
'1.0.20201102' : 'v1.0.20201102/dcm2niix_lnx.zip',
'1.0.20190902' : 'v1.0.20190902/dcm2niix_lnx.zip',
'1.0.20190410' : 'v1.0.20190410/dcm2niix_11-Apr-2019_lnx.zip',
'1.0.20181125' : 'v1.0.20181125/dcm2niix_25-Nov-2018_lnx.zip',
'1.0.20171017' : 'v1.0.20171017/dcm2niix_18-Oct-2017_lnx.zip',
'1.0.20220720' : 'v1.0.20220720/dcm2niix_lnx.zip',
}
prefix = 'https://github.com/rordenlab/dcm2niix/releases/download/'
url = prefix + filenames[version]
url = prefix + filenames.get(version, f'v{version}/dcm2niix_lnx.zip')
with tempdir.tempdir() as td:
request.urlretrieve(url, 'dcm2niix.zip')
......@@ -47,9 +51,9 @@ def install_dcm2niix(version='1.0.20190902'):
os.chmod(op.join(td, 'dcm2niix'), 0o755)
path = op.pathsep.join((op.abspath('.'), os.environ['PATH']))
path = op.abspath('dcm2niix')
with mock.patch.dict('os.environ', {'PATH' : path}):
with mock.patch('fsl.data.dicom.dcm2niix', return_value=path):
try:
yield
finally:
......@@ -64,6 +68,31 @@ def test_disabled():
fsldcm.loadSeries({})
def test_dcm2niix():
"""
"""
env = os.environ.copy()
env.pop('FSLDIR', None)
with tempdir.tempdir() as td:
env['PATH'] = td
with mock.patch('os.environ', env):
assert fsldcm.dcm2niix() == 'dcm2niix'
bindir = op.join(td, 'bin')
dcm2niix = op.join(bindir, 'dcm2niix')
os.makedirs(bindir)
touch(dcm2niix)
os.chmod(dcm2niix, 0o755)
env['PATH'] = bindir
with mock.patch('os.environ', env):
assert fsldcm.dcm2niix() == dcm2niix
with mockFSLDIR(bin=['dcm2niix']) as fsldir:
env['FSLDIR'] = fsldir
dcm2niix = op.join(fsldir, 'bin', 'dcm2niix')
with mock.patch('os.environ', env):
assert fsldcm.dcm2niix() == dcm2niix
def test_installedVersion():
tests = [
......@@ -122,14 +151,16 @@ def test_scanDir():
datafile = op.join(datadir, 'example_dicom.tbz2')
with tarfile.open(datafile) as f:
f.extractall()
f.extractall(filter='data')
series = fsldcm.scanDir('.')
assert len(series) == 2
for s in series:
assert (s['PatientName'] == 'MCCARTHY_PAUL' or
s['PatientName'] == 'MCCARTHY_PAUL_2')
assert s['PatientName'] in ('MCCARTHY_PAUL',
'MCCARTHY^PAUL',
'MCCARTHY_PAUL_2',
'MCCARTHY^PAUL^2')
def test_sersiesCRC():
......@@ -158,7 +189,7 @@ def test_sersiesCRC():
def test_loadSeries():
# test a pre-CRC and a post-CRC version
for version in ('1.0.20190410', '1.0.20190902'):
for version in ('1.0.20181125', '1.0.20201102'):
with install_dcm2niix(version):
......@@ -170,23 +201,26 @@ def test_loadSeries():
dcmdir = os.getcwd()
series = fsldcm.scanDir(dcmdir)
expShape = (512, 512, 1)
explens = [1, 1]
for s, explen in zip(series, explens):
for s in series:
imgs = fsldcm.loadSeries(s)
assert len(imgs) == explen
for img in imgs:
assert img.dicomDir == dcmdir
assert img.shape == expShape
assert img[:].shape == expShape
assert img.getMeta('PatientName') == 'MCCARTHY_PAUL' or \
img.getMeta('PatientName') == 'MCCARTHY_PAUL_2'
assert img.getMeta('PatientName') in ('MCCARTHY_PAUL',
'MCCARTHY^PAUL',
'MCCARTHY_PAUL_2',
'MCCARTHY^PAUL^2')
assert 'PatientName' in img.metaKeys()
assert 'MCCARTHY_PAUL' in img.metaValues() or \
'MCCARTHY_PAUL_2' in img.metaValues()
'MCCARTHY^PAUL' in img.metaValues() or \
'MCCARTHY_PAUL_2' in img.metaValues() or \
'MCCARTHY^PAUL^2' in img.metaValues()
assert ('PatientName', 'MCCARTHY_PAUL') in img.metaItems() or \
('PatientName', 'MCCARTHY_PAUL_2') in img.metaItems()
('PatientName', 'MCCARTHY^PAUL') in img.metaItems() or \
('PatientName', 'MCCARTHY_PAUL_2') in img.metaItems() or \
('PatientName', 'MCCARTHY^PAUL^2') in img.metaItems()
......@@ -10,7 +10,7 @@ import numpy as np
import pytest
import tests
import fsl.tests as tests
import fsl.data.dtifit as dtifit
import fsl.data.image as fslimage
......
......@@ -5,6 +5,7 @@
# Author: Paul McCarthy <pauldmccarthy@gmail.com>
#
import pathlib
import numpy as np
import nibabel as nib
......@@ -12,7 +13,7 @@ import nibabel as nib
import fsl.utils.tempdir as tempdir
import fsl.utils.ensure as ensure
from . import make_random_image
from fsl.tests import make_random_image
def test_ensureIsImage():
......@@ -22,8 +23,10 @@ def test_ensureIsImage():
assert ensure.ensureIsImage(img) is img
loaded = [ensure.ensureIsImage('image.nii'),
ensure.ensureIsImage('image')]
loaded = [ensure.ensureIsImage( 'image.nii'),
ensure.ensureIsImage( 'image'),
ensure.ensureIsImage(pathlib.Path('image')),
ensure.ensureIsImage(pathlib.Path('image.nii'))]
for l in loaded:
assert isinstance(l, nib.nifti1.Nifti1Image)
......
......@@ -17,7 +17,7 @@ import numpy as np
import pytest
import tests
import fsl.tests as tests
import fsl.data.featanalysis as featanalysis
import fsl.data.featdesign as featdesign
import fsl.data.image as fslimage
......@@ -59,7 +59,7 @@ def test_isFEATDir():
# it's not a feat directory
assert not featanalysis.isFEATDir('nonexistent.feat')
# If any of the above files are not
# If any of the above files are not
# present, it is not a FEAT directory
perms = it.chain(it.combinations(paths, 1),
it.combinations(paths, 2),
......@@ -69,7 +69,7 @@ def test_isFEATDir():
assert not featanalysis.isFEATDir(
op.join(testdir, 'analysis.feat'))
def test_hasStats():
with tests.testdir(['analysis.feat/stats/zstat1.nii.gz']) as testdir:
......@@ -78,7 +78,7 @@ def test_hasStats():
with tests.testdir(['analysis.feat/stats/zstat1.txt']) as testdir:
featdir = op.join(testdir, 'analysis.feat')
assert not featanalysis.hasStats(featdir)
assert not featanalysis.hasStats(featdir)
def test_hasMelodicDir():
......@@ -94,7 +94,7 @@ def test_getAnalysisDir():
'analysis.feat/design.fsf',
'analysis.feat/design.mat',
'analysis.feat/design.con']
testpaths = ['analysis.feat/filtered_func_data.nii.gz',
'analysis.feat/stats/zstat1.nii.gz',
'analysis.feat/logs/feat4_post',
......@@ -106,7 +106,7 @@ def test_getAnalysisDir():
t = op.join(testdir, t)
assert featanalysis.getAnalysisDir(t) == expected
def test_getTopLevelAnalysisDir():
testcases = [
('analysis.feat/filtered_func_data.ica/melodic_IC.nii.gz', 'analysis.feat'),
......@@ -127,7 +127,7 @@ def test_getReportFile():
for paths, expected in testcases:
with tests.testdir(paths) as testdir:
featdir = op.join(testdir, 'analysis.feat')
if expected:
......@@ -145,7 +145,7 @@ def test_loadContrasts():
/ContrastName1 c1
/ContrastName2 c2
/ContrastName3 c3
/NumContrasts 3
/NumContrasts 3
/Matrix
1 0 0
0 1 0
......@@ -188,9 +188,9 @@ def test_loadContrasts():
0 1 1
""",
]
with pytest.raises(Exception):
featanalysis.loadContrasts('no file')
featanalysis.loadContrasts('no file')
with tests.testdir() as testdir:
featdir = op.join(testdir, 'analysis.feat')
......@@ -210,6 +210,77 @@ def test_loadContrasts():
featanalysis.loadContrasts(featdir)
def test_loadFTests():
goodtests = [
("""
/NumWaves 4
/NumContrasts 3
/Matrix
0 1 0 1
0 0 1 1
1 1 1 1
""",
[[0, 1, 0, 1],
[0, 0, 1, 1],
[1, 1, 1, 1]]),
("""
/NumWaves 10
/NumContrasts 2
/Matrix
0 1 0 1 0 1 1 0 0 1
0 0 1 1 1 0 0 1 0 0
""",
[[0, 1, 0, 1, 0, 1, 1, 0, 0, 1],
[0, 0, 1, 1, 1, 0, 0, 1, 0, 0]]),
]
badtests = [
"""
/NumWaves 10
/NumContrasts 2
""",
"""
/NumContrasts 2
/Matrix
1 0
0 1
""",
"""
/NumWaves Burgers
/NumContrasts 2
/Matrix
1 0
0 1
""",
"""
/Matrix
1 0
0 1
""",
"""
/NumWaves 4
/NumContrasts 3
/Matrix
1 0 0 0 1 0 0
0 1 0 0 1 0 0
""",
]
with tests.testdir() as testdir:
featdir = op.join(testdir, 'analysis.feat')
for contents, expect in goodtests:
designcon = op.join(featdir, 'design.fts')
tests.make_dummy_file(designcon, textwrap.dedent(contents).strip())
assert featanalysis.loadFTests(featdir) == expect
for contents in badtests:
designcon = op.join(featdir, 'design.fts')
tests.make_dummy_file(designcon, textwrap.dedent(contents).strip())
with pytest.raises(Exception):
featanalysis.loadFTests(featdir)
def test_loadSettings():
contents = """
......@@ -240,9 +311,10 @@ def test_loadSettings():
with tests.testdir() as testdir:
featdir = op.join(testdir, 'analysis.feat')
tests.make_dummy_file(op.join(featdir, 'design.fsf'), contents)
result = featanalysis.loadSettings(featdir)
assert result == expected
designfsf = op.join(featdir, 'design.fsf')
tests.make_dummy_file(designfsf, contents)
assert featanalysis.loadSettings(featdir) == expected
assert featanalysis.loadFsf(designfsf) == expected
def test_loadDesign():
......@@ -274,7 +346,7 @@ def test_isFirstLevelAnalysis():
'2ndlevel_1.gfeat', '2ndlevel_2.gfeat']
for featdir in featdirs:
expected = featdir.startswith('1')
featdir = op.join(datadir, featdir)
settings = featanalysis.loadSettings(featdir)
......@@ -288,7 +360,10 @@ def test_loadClusterResults():
'2ndlevel_1.gfeat/cope1.feat', '2ndlevel_1.gfeat/cope2.feat',
'2ndlevel_2.gfeat/cope1.feat', '2ndlevel_2.gfeat/cope2.feat']
ncontrasts = [2, 2, 2, 1, 1, 1, 1]
nclusters = [[1, 5], [2, 2], [3, 5], [7], [1], [10], [27]]
nftests = [0, 0, 1, 0, 0, 1, 1]
# nclusters = [contrastclusters] + [ftestclusters]
nclusters = [[1, 5], [2, 2], [3, 5, 3], [7], [1], [10, 8], [27, 21]]
with pytest.raises(Exception):
featanalysis.loadClusterResults('notafeatdir')
......@@ -300,17 +375,18 @@ def test_loadClusterResults():
with tests.testdir() as testdir:
# For higher level analyses, the
# loadClusterResults function peeks
# at the FEAT input data file
# header, so we have to generate it.
# work from a copy of the test data directory
newfeatdir = op.join(testdir, 'analysis.feat')
shutil.copytree(op.join(datadir, featdir), newfeatdir)
featdir = newfeatdir
# For higher level analyses, the
# loadClusterResults function peeks
# at the FEAT input data file
# header, so we have to generate it.
if not firstlevel:
datafile = op.join(featdir, 'filtered_func_data.nii.gz')
data = np.random.randint(1, 10, (91, 109, 91))
data = np.random.randint(1, 10, (91, 109, 91), dtype=np.int32)
xform = np.array([[-2, 0, 0, 90],
[ 0, 2, 0, -126],
[ 0, 0, 2, -72],
......@@ -318,20 +394,55 @@ def test_loadClusterResults():
fslimage.Image(data, xform=xform).save(datafile)
settings = featanalysis.loadSettings(featdir)
# contrasts
for c in range(ncontrasts[i]):
clusters = featanalysis.loadClusterResults(
featdir, settings, c)
assert len(clusters) == nclusters[i][c]
# f-tests
for c in range(nftests[i]):
clusters = featanalysis.loadClusterResults(
featdir, settings, c, ftest=True)
assert len(clusters) == nclusters[i][c + ncontrasts[i]]
# Test calling the function on a feat dir
# which doesn't have any cluster results
# (2ndlevel_2.gfeat)
if i == len(featdirs) - 1:
for clustfile in glob.glob(op.join(featdir, 'cluster*txt')):
os.remove(clustfile)
assert featanalysis.loadClusterResults(
featdir, settings, 0) is None
# The above loop just checks that the number of
# clusters loaded for each analysis was correct.
# Below we check that the cluster data was loaded
# correctly, just for one analysis
featdir = op.join(datadir, '1stlevel_1.feat')
settings = featanalysis.loadSettings(featdir)
cluster = featanalysis.loadClusterResults(featdir, settings, 0)[0]
expected = {
'index' : 1,
'nvoxels' : 296,
'p' : 1.79e-27,
'logp' : 26.7,
'zmax' : 6.03,
'zmaxx' : 34,
'zmaxy' : 10,
'zmaxz' : 1,
'zcogx' : 31.4,
'zcogy' : 12.3,
'zcogz' : 1.72,
'copemax' : 612,
'copemaxx' : 34,
'copemaxy' : 10,
'copemaxz' : 1,
'copemean' : 143
}
for k, v in expected.items():
assert np.isclose(v, getattr(cluster, k))
def test_getDataFile():
paths = ['analysis.feat/filtered_func_data.nii.gz',
......@@ -392,9 +503,9 @@ def test_getResidualFile():
assert featanalysis.getResidualFile(featdir) == expect
else:
with pytest.raises(fslpath.PathError):
featanalysis.getResidualFile(featdir)
featanalysis.getResidualFile(featdir)
def test_getPEFile():
testcases = [
(['analysis.feat/stats/pe1.nii.gz',
......@@ -416,7 +527,7 @@ def test_getPEFile():
assert featanalysis.getPEFile(featdir, pei) == expect
else:
with pytest.raises(fslpath.PathError):
featanalysis.getPEFile(featdir, pei)
featanalysis.getPEFile(featdir, pei)
def test_getCOPEFile():
......@@ -440,8 +551,32 @@ def test_getCOPEFile():
assert featanalysis.getCOPEFile(featdir, ci) == expect
else:
with pytest.raises(fslpath.PathError):
featanalysis.getCOPEFile(featdir, ci)
featanalysis.getCOPEFile(featdir, ci)
def test_getZStatFile():
testcases = [
(['analysis.feat/stats/zstat1.nii.gz',
'analysis.feat/stats/zstat2.nii.gz'], True),
(['analysis.feat/stats/zstat1.nii.gz'], True),
(['analysis.feat/stats/zstat0.nii.gz'], False),
(['analysis.feat/stats/zstat1.txt'], False),
]
for paths, shouldPass in testcases:
with tests.testdir(paths) as testdir:
featdir = op.join(testdir, 'analysis.feat')
for zi in range(len(paths)):
expect = op.join(
featdir, 'stats', 'zstat{}.nii.gz'.format(zi + 1))
if shouldPass:
assert featanalysis.getZStatFile(featdir, zi) == expect
else:
with pytest.raises(fslpath.PathError):
featanalysis.getZStatFile(featdir, zi)
def test_getZStatFile():
testcases = [
......@@ -464,8 +599,31 @@ def test_getZStatFile():
assert featanalysis.getZStatFile(featdir, zi) == expect
else:
with pytest.raises(fslpath.PathError):
featanalysis.getZStatFile(featdir, zi)
featanalysis.getZStatFile(featdir, zi)
def test_getZFStatFile():
testcases = [
(['analysis.feat/stats/zfstat1.nii.gz',
'analysis.feat/stats/zfstat2.nii.gz'], True),
(['analysis.feat/stats/zfstat1.nii.gz'], True),
(['analysis.feat/stats/zfstat0.nii.gz'], False),
(['analysis.feat/stats/zfstat1.txt'], False),
]
for paths, shouldPass in testcases:
with tests.testdir(paths) as testdir:
featdir = op.join(testdir, 'analysis.feat')
for zi in range(len(paths)):
expect = op.join(
featdir, 'stats', 'zfstat{}.nii.gz'.format(zi + 1))
if shouldPass:
assert featanalysis.getZFStatFile(featdir, zi) == expect
else:
with pytest.raises(fslpath.PathError):
featanalysis.getZFStatFile(featdir, zi)
def test_getClusterMaskFile():
testcases = [
......@@ -488,4 +646,28 @@ def test_getClusterMaskFile():
assert featanalysis.getClusterMaskFile(featdir, ci) == expect
else:
with pytest.raises(fslpath.PathError):
featanalysis.getClusterMaskFile(featdir, ci)
featanalysis.getClusterMaskFile(featdir, ci)
def test_getFClusterMaskFile():
testcases = [
(['analysis.feat/cluster_mask_zfstat1.nii.gz',
'analysis.feat/cluster_mask_zfstat2.nii.gz'], True),
(['analysis.feat/cluster_mask_zfstat1.nii.gz'], True),
(['analysis.feat/cluster_mask_zfstat0.nii.gz'], False),
(['analysis.feat/cluster_mask_zfstat1.txt'], False),
]
for paths, shouldPass in testcases:
with tests.testdir(paths) as testdir:
featdir = op.join(testdir, 'analysis.feat')
for ci in range(len(paths)):
expect = op.join(
featdir, 'cluster_mask_zfstat{}.nii.gz'.format(ci + 1))
if shouldPass:
assert featanalysis.getFClusterMaskFile(featdir, ci) == expect
else:
with pytest.raises(fslpath.PathError):
featanalysis.getFClusterMaskFile(featdir, ci)
......@@ -103,12 +103,13 @@ with the following commands:
"""
import os.path as op
import numpy as np
import os.path as op
import textwrap as tw
import numpy as np
import pytest
import tests
import fsl.tests as tests
from fsl.utils.tempdir import tempdir
import fsl.data.featdesign as featdesign
import fsl.data.featanalysis as featanalysis
......@@ -400,6 +401,45 @@ def test_loadDesignMat():
featdesign.loadDesignMat(badfile)
# fsl/fslpy!469
def test_loadFEATDesignFile():
with tempdir():
with open('design1.con', 'wt') as f:
f.write(tw.dedent("""
/ContrastName1 mycontrast
/NumWaves 2
/NumContrasts 1
/Matrix
10 20
""").strip())
with open('design2.con', 'wt') as f:
f.write(tw.dedent("""
/ContrastName1
/NumWaves 2
/NumContrasts 1
/Matrix
10 20
""").strip())
des1 = featanalysis.loadFEATDesignFile('design1.con')
exp1 = {'ContrastName1': 'mycontrast',
'NumWaves': '2',
'NumContrasts': '1',
'Matrix': '10 20'}
des2 = featanalysis.loadFEATDesignFile('design2.con')
exp2 = {'ContrastName1': '',
'NumWaves': '2',
'NumContrasts': '1',
'Matrix': '10 20'}
assert des1 == exp1
assert des2 == exp2
def test_VoxelwiseEVs():
with tempdir():
img = tests.make_random_image('image.nii.gz', (10, 10, 10, 10))
......
......@@ -16,7 +16,7 @@ import numpy as np
import pytest
import tests
import fsl.tests as tests
import fsl.data.featimage as featimage
import fsl.data.featdesign as featdesign
import fsl.data.featanalysis as featanalysis
......@@ -88,7 +88,8 @@ def test_FEATImage_attributes():
copes=False,
zstats=False,
residuals=False,
clustMasks=False)
clustMasks=False,
zfstats=False)
else:
featdir = op.join(datadir, featdir)
......@@ -100,6 +101,7 @@ def test_FEATImage_attributes():
design = featdesign.FEATFSFDesign(featdir, settings)
desmat = design.getDesign()
evnames = [ev.title for ev in design.getEVs()]
ftests = featanalysis.loadFTests(featdir)
contrastnames, contrasts = featanalysis.loadContrasts(featdir)
assert np.all(np.isclose(fi.shape, shape))
......@@ -115,8 +117,10 @@ def test_FEATImage_attributes():
assert fi.numEVs() == desmat.shape[1]
assert fi.evNames() == evnames
assert fi.numContrasts() == len(contrasts)
assert fi.numFTests() == len(ftests)
assert fi.contrastNames() == contrastnames
assert fi.contrasts() == contrasts
assert fi.ftests() == ftests
assert np.all(np.isclose(fi.getDesign(), desmat))
assert fi.thresholds() == featanalysis.getThresholds(settings)
......@@ -138,7 +142,7 @@ def test_FEATImage_imageAccessors():
shape = TEST_ANALYSES[featdir]['shape']
xform = TEST_ANALYSES[featdir]['xform']
with tests.testdir() as testdir:
if 'realdata' not in featdir:
......@@ -153,9 +157,10 @@ def test_FEATImage_imageAccessors():
shape4D = shape
shape = shape4D[:3]
fi = featimage.FEATImage(featdir)
nevs = fi.numEVs()
ncons = fi.numContrasts()
fi = featimage.FEATImage(featdir)
nevs = fi.numEVs()
ncons = fi.numContrasts()
nftests = fi.numFTests()
# Testing the FEATImage internal cache
for i in range(2):
......@@ -166,6 +171,9 @@ def test_FEATImage_imageAccessors():
assert fi.getCOPE( con).shape == shape
assert fi.getZStats( con).shape == shape
assert fi.getClusterMask(con).shape == shape
for ft in range(nftests):
assert fi.getZFStats( ft).shape == shape
assert fi.getFClusterMask(ft).shape == shape
del fi
fi = None
......