Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • paulmc/fslpy
  • ndcn0236/fslpy
  • seanf/fslpy
3 results
Show changes
Showing
with 1087 additions and 97 deletions
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
import os import os
import sys import sys
import glob import glob
import hashlib
import shutil import shutil
import fnmatch import fnmatch
import logging import logging
...@@ -20,14 +21,12 @@ import os.path as op ...@@ -20,14 +21,12 @@ import os.path as op
import numpy as np import numpy as np
import nibabel as nib import nibabel as nib
from six import StringIO from io import StringIO
from unittest import mock
try: from unittest import mock
except ImportError: import mock
import fsl.data.image as fslimage import fsl.data.image as fslimage
from fsl.utils.tempdir import tempdir from fsl.utils.tempdir import tempdir
from fsl.utils.platform import platform as fslplatform from fsl.utils.platform import platform as fslplatform
...@@ -50,7 +49,10 @@ def mockFSLDIR(**kwargs): ...@@ -50,7 +49,10 @@ def mockFSLDIR(**kwargs):
if not op.isdir(subdir): if not op.isdir(subdir):
os.makedirs(subdir) os.makedirs(subdir)
for fname in files: for fname in files:
touch(op.join(subdir, fname)) fname = op.join(subdir, fname)
touch(fname)
if subdir == bindir:
os.chmod(fname, 0o755)
fslplatform.fsldir = fsldir fslplatform.fsldir = fsldir
fslplatform.fsldevdir = None fslplatform.fsldevdir = None
...@@ -68,7 +70,7 @@ def touch(fname): ...@@ -68,7 +70,7 @@ def touch(fname):
pass pass
class CaptureStdout(object): class CaptureStdout:
"""Context manager which captures stdout and stderr. """ """Context manager which captures stdout and stderr. """
def __init__(self): def __init__(self):
...@@ -87,6 +89,7 @@ class CaptureStdout(object): ...@@ -87,6 +89,7 @@ class CaptureStdout(object):
sys.stdout = self.__mock_stdout sys.stdout = self.__mock_stdout
sys.stderr = self.__mock_stderr sys.stderr = self.__mock_stderr
return self
def __exit__(self, *args, **kwargs): def __exit__(self, *args, **kwargs):
...@@ -145,6 +148,8 @@ def testdir(contents=None, suffix=""): ...@@ -145,6 +148,8 @@ def testdir(contents=None, suffix=""):
shutil.rmtree(self.testdir) shutil.rmtree(self.testdir)
return ctx(contents) return ctx(contents)
testdir.__test__ = False
def make_dummy_files(paths): def make_dummy_files(paths):
"""Creates dummy files for all of the given paths. """ """Creates dummy files for all of the given paths. """
...@@ -284,7 +289,8 @@ def make_mock_feat_analysis(featdir, ...@@ -284,7 +289,8 @@ def make_mock_feat_analysis(featdir,
copes=True, copes=True,
zstats=True, zstats=True,
residuals=True, residuals=True,
clustMasks=True): clustMasks=True,
zfstats=True):
if xform is None: if xform is None:
xform = np.eye(4) xform = np.eye(4)
...@@ -317,6 +323,7 @@ def make_mock_feat_analysis(featdir, ...@@ -317,6 +323,7 @@ def make_mock_feat_analysis(featdir,
data = np.ravel_multi_index(data, shape) data = np.ravel_multi_index(data, shape)
data = data.reshape(list(shape) + [1]).repeat(timepoints, axis=3) data = data.reshape(list(shape) + [1]).repeat(timepoints, axis=3)
data[..., :] += range(i, i + timepoints) data[..., :] += range(i, i + timepoints)
data = data.astype(np.int32)
img = nib.nifti1.Nifti1Image(data, xform) img = nib.nifti1.Nifti1Image(data, xform)
...@@ -341,6 +348,11 @@ def make_mock_feat_analysis(featdir, ...@@ -341,6 +348,11 @@ def make_mock_feat_analysis(featdir,
otherFiles .extend(files) otherFiles .extend(files)
otherShapes.extend([shape] * len(files)) otherShapes.extend([shape] * len(files))
if zfstats:
files = glob.glob(op.join(featdir, 'stats', 'zfstat*nii.gz'))
otherFiles .extend(files)
otherShapes.extend([shape] * len(files))
if residuals: if residuals:
files = glob.glob(op.join(featdir, 'stats', 'res4d.nii.gz')) files = glob.glob(op.join(featdir, 'stats', 'res4d.nii.gz'))
otherFiles .extend(files) otherFiles .extend(files)
...@@ -428,3 +440,10 @@ def make_random_mask(filename, shape, xform, premask=None, minones=1): ...@@ -428,3 +440,10 @@ def make_random_mask(filename, shape, xform, premask=None, minones=1):
img.save(filename) img.save(filename)
return img return img
def sha256(filename):
hashobj = hashlib.sha256()
with open(filename, 'rb') as f:
hashobj.update(f.read())
return hashobj.hexdigest()
...@@ -14,8 +14,8 @@ import pytest ...@@ -14,8 +14,8 @@ import pytest
import fsl.utils.assertions as assertions import fsl.utils.assertions as assertions
import fsl.utils.tempdir as tempdir import fsl.utils.tempdir as tempdir
from . import make_random_image from fsl.tests import make_random_image
from . import testdir from fsl.tests import testdir
def test_assertFileExists(): def test_assertFileExists():
...@@ -160,14 +160,14 @@ def test_assertIsMelodicDir(): ...@@ -160,14 +160,14 @@ def test_assertIsMelodicDir():
('analysis.ica', [ 'melodic_mix', 'melodic_FTmix'], False), ('analysis.ica', [ 'melodic_mix', 'melodic_FTmix'], False),
('analysis.ica', ['melodic_IC.nii.gz', 'melodic_FTmix'], False), ('analysis.ica', ['melodic_IC.nii.gz', 'melodic_FTmix'], False),
('analysis.ica', ['melodic_IC.nii.gz', 'melodic_mix'], False), ('analysis.ica', ['melodic_IC.nii.gz', 'melodic_mix'], False),
('analysis', ['melodic_IC.nii.gz', 'melodic_mix', 'melodic_FTmix'], False), ('analysis', ['melodic_IC.nii.gz', 'melodic_mix', 'melodic_FTmix'], True),
('analysis', ['melodic_oIC.nii.gz', 'melodic_mix', 'melodic_FTmix'], False), ('analysis', [ 'melodic_mix', 'melodic_FTmix'], False),
] ]
for dirname, paths, expected in tests: for dirname, paths, expected in tests:
with testdir(paths, dirname): with testdir(paths, dirname):
if expected: if expected:
assertions.assertIsMelodicDir(dirname) assertions.assertIsMelodicDir('.')
else: else:
with pytest.raises(AssertionError): with pytest.raises(AssertionError):
assertions.assertIsMelodicDir(dirname) assertions.assertIsMelodicDir(dirname)
......
...@@ -13,10 +13,11 @@ import os ...@@ -13,10 +13,11 @@ import os
import os.path as op import os.path as op
import numpy as np import numpy as np
import mock
from unittest import mock
import pytest import pytest
import tests import fsl.tests as tests
import fsl.utils.image.resample as resample import fsl.utils.image.resample as resample
import fsl.data.atlases as atlases import fsl.data.atlases as atlases
import fsl.data.image as fslimage import fsl.data.image as fslimage
...@@ -40,7 +41,8 @@ dummy_atlas_desc = """<?xml version="1.0" encoding="ISO-8859-1"?> ...@@ -40,7 +41,8 @@ dummy_atlas_desc = """<?xml version="1.0" encoding="ISO-8859-1"?>
<header> <header>
<name>{name}</name> <name>{name}</name>
<shortname>{shortname}</shortname> <shortname>{shortname}</shortname>
<type>Label</type> <type>{atlastype}</type>
{extraheader}
<images> <images>
<imagefile>/{shortname}/{filename}</imagefile> <imagefile>/{shortname}/{filename}</imagefile>
<summaryimagefile>/{shortname}/My{filename}</summaryimagefile> <summaryimagefile>/{shortname}/My{filename}</summaryimagefile>
...@@ -52,7 +54,8 @@ dummy_atlas_desc = """<?xml version="1.0" encoding="ISO-8859-1"?> ...@@ -52,7 +54,8 @@ dummy_atlas_desc = """<?xml version="1.0" encoding="ISO-8859-1"?>
</data> </data>
</atlas> </atlas>
""" """
def _make_dummy_atlas(savedir, name, shortName, filename): def _make_dummy_atlas(
savedir, name, shortName, filename, atlastype='Label', extraheader=''):
mladir = op.join(savedir, shortName) mladir = op.join(savedir, shortName)
mlaxmlfile = op.join(savedir, '{}.xml'.format(shortName)) mlaxmlfile = op.join(savedir, '{}.xml'.format(shortName))
mlaimgfile = op.join(savedir, shortName, '{}.nii.gz'.format(filename)) mlaimgfile = op.join(savedir, shortName, '{}.nii.gz'.format(filename))
...@@ -70,7 +73,9 @@ def _make_dummy_atlas(savedir, name, shortName, filename): ...@@ -70,7 +73,9 @@ def _make_dummy_atlas(savedir, name, shortName, filename):
desc = dummy_atlas_desc.format( desc = dummy_atlas_desc.format(
name=name, name=name,
shortname=shortName, shortname=shortName,
filename=filename) filename=filename,
atlastype=atlastype,
extraheader=extraheader)
f.write(desc) f.write(desc)
return mlaxmlfile return mlaxmlfile
...@@ -142,6 +147,28 @@ def test_AtlasDescription(): ...@@ -142,6 +147,28 @@ def test_AtlasDescription():
registry.getAtlasDescription('non-existent-atlas') registry.getAtlasDescription('non-existent-atlas')
def test_StatisticHeader():
with tests.testdir() as testdir:
hdr = '<statistic>T</statistic>' \
'<units></units>' \
'<precision>3</precision>' \
'<upper>75</upper>'
xmlfile = _make_dummy_atlas(testdir,
'statlas',
'STA',
'StAtlas',
atlastype='Statistic',
extraheader=hdr)
desc = atlases.AtlasDescription(xmlfile, 'StAtlas')
assert desc.atlasType == 'statistic'
assert desc.statistic == 'T'
assert desc.units == ''
assert desc.precision == 3
assert desc.lower == 0
assert desc.upper == 75
def test_add_remove_atlas(): def test_add_remove_atlas():
with tests.testdir() as testdir: with tests.testdir() as testdir:
...@@ -225,8 +252,7 @@ def test_load_atlas(): ...@@ -225,8 +252,7 @@ def test_load_atlas():
reg = atlases.registry reg = atlases.registry
reg.rescanAtlases() reg.rescanAtlases()
probatlas = reg.loadAtlas('harvardoxford-cortical', probatlas = reg.loadAtlas('harvardoxford-cortical')
calcRange=False, loadData=False)
probsumatlas = reg.loadAtlas('harvardoxford-cortical', loadSummary=True) probsumatlas = reg.loadAtlas('harvardoxford-cortical', loadSummary=True)
lblatlas = reg.loadAtlas('talairach') lblatlas = reg.loadAtlas('talairach')
...@@ -250,6 +276,9 @@ def test_get(): ...@@ -250,6 +276,9 @@ def test_get():
assert (target == atlas.get(index=label.index).data).all() assert (target == atlas.get(index=label.index).data).all()
assert (target == atlas.get(value=label.value).data).all() assert (target == atlas.get(value=label.value).data).all()
assert (target == atlas.get(name=label.name).data).all() assert (target == atlas.get(name=label.name).data).all()
if atlas is lblatlas:
target = target * label.value
assert (target == atlas.get(value=label.value, binary=False).data).all()
def test_find(): def test_find():
...@@ -257,8 +286,7 @@ def test_find(): ...@@ -257,8 +286,7 @@ def test_find():
reg = atlases.registry reg = atlases.registry
reg.rescanAtlases() reg.rescanAtlases()
probatlas = reg.loadAtlas('harvardoxford-cortical', probatlas = reg.loadAtlas('harvardoxford-cortical')
calcRange=False, loadData=False)
probsumatlas = reg.loadAtlas('harvardoxford-cortical', loadSummary=True) probsumatlas = reg.loadAtlas('harvardoxford-cortical', loadSummary=True)
lblatlas = reg.loadAtlas('talairach') lblatlas = reg.loadAtlas('talairach')
...@@ -305,8 +333,7 @@ def test_prepareMask(): ...@@ -305,8 +333,7 @@ def test_prepareMask():
reg = atlases.registry reg = atlases.registry
reg.rescanAtlases() reg.rescanAtlases()
probatlas = reg.loadAtlas('harvardoxford-cortical', probatlas = reg.loadAtlas('harvardoxford-cortical')
loadData=False, calcRange=False)
probsumatlas = reg.loadAtlas('harvardoxford-cortical', loadSummary=True) probsumatlas = reg.loadAtlas('harvardoxford-cortical', loadSummary=True)
lblatlas = reg.loadAtlas('talairach') lblatlas = reg.loadAtlas('talairach')
......
...@@ -17,7 +17,7 @@ import fsl.transform.affine as affine ...@@ -17,7 +17,7 @@ import fsl.transform.affine as affine
import fsl.utils.image.resample as resample import fsl.utils.image.resample as resample
import fsl.utils.cache as cache import fsl.utils.cache as cache
from . import (testdir, make_random_mask) from fsl.tests import (testdir, make_random_mask)
pytestmark = pytest.mark.fsltest pytestmark = pytest.mark.fsltest
...@@ -41,17 +41,16 @@ _atlases = cache.Cache() ...@@ -41,17 +41,16 @@ _atlases = cache.Cache()
def _get_atlas(atlasID, res, summary=False): def _get_atlas(atlasID, res, summary=False):
atlas = _atlases.get((atlasID, res, summary), default=None) atlas = _atlases.get((atlasID, res, summary), default=None)
if atlas is None: if atlas is None:
atlas = fslatlases.loadAtlas(atlasID,
loadSummary=summary,
resolution=res)
# We need some atlases to be loaded into memory,
# so we can use boolean-mask-based indexing
if summary or atlasID in ('talairach', 'striatum-structural', if summary or atlasID in ('talairach', 'striatum-structural',
'jhu-labels', 'smatt'): 'jhu-labels', 'smatt'):
kwargs = {} atlas.data
else:
kwargs = {'loadData' : False,
'calcRange' : False}
atlas = fslatlases.loadAtlas(atlasID,
loadSummary=summary,
resolution=res,
**kwargs)
_atlases.put((atlasID, res, summary), atlas) _atlases.put((atlasID, res, summary), atlas)
return atlas return atlas
...@@ -85,7 +84,7 @@ def _get_zero_mask(aimg): ...@@ -85,7 +84,7 @@ def _get_zero_mask(aimg):
elif isinstance(aimg, fslatlases.ProbabilisticAtlas): elif isinstance(aimg, fslatlases.ProbabilisticAtlas):
# Keep memory usage down # Keep memory usage down
zmask = np.ones(aimg.shape[:3], dtype=np.bool) zmask = np.ones(aimg.shape[:3], dtype=bool)
for vol in range(aimg.shape[-1]): for vol in range(aimg.shape[-1]):
zmask = np.logical_and(zmask, aimg[..., vol] == 0) zmask = np.logical_and(zmask, aimg[..., vol] == 0)
...@@ -218,8 +217,8 @@ def _eval_coord_voxel_query(atlas, query, qtype, qin): ...@@ -218,8 +217,8 @@ def _eval_coord_voxel_query(atlas, query, qtype, qin):
elif qin == 'out': elif qin == 'out':
expval = [] expval = []
assert atlas.proportions( query, voxel=voxel) == expval assert atlas.values( query, voxel=voxel) == expval
assert atlas.coordProportions(query, voxel=voxel) == expval assert atlas.coordValues(query, voxel=voxel) == expval
if isinstance(atlas, fslatlases.LabelAtlas): evalLabel() if isinstance(atlas, fslatlases.LabelAtlas): evalLabel()
elif isinstance(atlas, fslatlases.ProbabilisticAtlas): evalProb() elif isinstance(atlas, fslatlases.ProbabilisticAtlas): evalProb()
...@@ -284,7 +283,7 @@ def _eval_mask_query(atlas, query, qtype, qin): ...@@ -284,7 +283,7 @@ def _eval_mask_query(atlas, query, qtype, qin):
rmask = resample.resample( rmask = resample.resample(
mask, atlas.shape[:3], dtype=np.float32, order=0)[0] mask, atlas.shape[:3], dtype=np.float32, order=0)[0]
rmask = np.array(rmask, dtype=np.bool) rmask = np.array(rmask, dtype=bool)
def evalLabel(): def evalLabel():
...@@ -343,13 +342,13 @@ def _eval_mask_query(atlas, query, qtype, qin): ...@@ -343,13 +342,13 @@ def _eval_mask_query(atlas, query, qtype, qin):
if qin == 'out': if qin == 'out':
with pytest.raises(fslatlases.MaskError): with pytest.raises(fslatlases.MaskError):
atlas.maskProportions(mask) atlas.maskValues(mask)
with pytest.raises(fslatlases.MaskError): with pytest.raises(fslatlases.MaskError):
atlas.proportions( mask) atlas.values( mask)
return return
props = atlas. proportions(mask) props = atlas. values(mask)
props2 = atlas.maskProportions(mask) props2 = atlas.maskValues(mask)
assert np.all(np.isclose(props, props2)) assert np.all(np.isclose(props, props2))
......
#!/usr/bin/env python
#
# test_bids.py -
#
# Author: Paul McCarthy <pauldmccarthy@gmail.com>
#
import json
import os.path as op
import itertools as it
from pathlib import Path
import pytest
from fsl.utils.tempdir import tempdir
import fsl.utils.bids as fslbids
def test_parseFilename():
badtests = ['bad_file.txt']
for test in badtests:
with pytest.raises(ValueError):
fslbids.parseFilename(test)
tests = [
('sub-01_ses-01_t1w.nii.gz',
({'sub' : '01', 'ses' : '01'}, 't1w')),
('a-1_b-2_c-3_d-4_e.json',
({'a' : '1', 'b' : '2', 'c' : '3', 'd' : '4'}, 'e')),
]
for filename, expect in tests:
assert fslbids.parseFilename(filename) == expect
def test_isBIDSDir():
with tempdir():
assert not fslbids.isBIDSDir('.')
with tempdir():
Path('dataset_description.json').touch()
assert fslbids.isBIDSDir('.')
def test_inBIDSDir():
with tempdir():
Path('a/b/c').mkdir(parents=True)
Path('dataset_description.json').touch()
assert fslbids.inBIDSDir(Path('.'))
assert fslbids.inBIDSDir(Path('a'))
assert fslbids.inBIDSDir(Path('a/b'))
assert fslbids.inBIDSDir(Path('a/b/c'))
with tempdir():
Path('a/b/c').mkdir(parents=True)
assert not fslbids.inBIDSDir(Path('.'))
assert not fslbids.inBIDSDir(Path('a'))
assert not fslbids.inBIDSDir(Path('a/b'))
assert not fslbids.inBIDSDir(Path('a/b/c'))
def test_isBIDSFile():
goodfiles = [
Path('sub-01_ses-01_t1w.nii.gz'),
Path('sub-01_ses-01_t1w.nii'),
Path('sub-01_ses-01_t1w.json'),
Path('a-1_b-2_c-3_d-4_e.nii.gz'),
Path('sub-01_ses-01_t1w.txt'),
]
badfiles = [
Path('sub-01_ses-01.nii.gz'),
Path('sub-01_ses-01_t1w'),
Path('sub-01_ses-01_t1w.'),
Path('sub_ses-01_t1w.nii.gz'),
Path('sub-01_ses_t1w.nii.gz'),
]
with tempdir():
Path('dataset_description.json').touch()
for f in goodfiles: assert fslbids.isBIDSFile(f)
for f in badfiles: assert not fslbids.isBIDSFile(f)
with tempdir():
for f in it.chain(goodfiles, badfiles):
assert not fslbids.isBIDSFile(f)
def test_loadMetadata():
dd = Path('dataset_description.json')
t1 = Path('sub-01/func/sub-01_task-stim_bold.nii.gz')
json1 = Path('sub-01/func/sub-01_task-stim_bold.json')
json2 = Path('sub-01/sub-01_bold.json')
json3 = Path('sub-01_t1w.json')
json4 = Path('sub-01/task-stim_bold.json')
meta1 = {'a' : '1', 'b' : '2'}
meta2 = {'a' : '10', 'c' : '3'}
meta3 = {'a' : '109', 'b' : '99'}
meta4 = {'c' : '9', 'd' : '5'}
with tempdir():
dd.touch()
Path(op.dirname(t1)).mkdir(parents=True)
t1.touch()
assert fslbids.loadMetadata(t1) == {}
json1.write_text(json.dumps(meta1))
assert fslbids.loadMetadata(t1) == meta1
json2.write_text(json.dumps(meta2))
assert fslbids.loadMetadata(t1) == {**meta2, **meta1}
json3.write_text(json.dumps(meta3))
assert fslbids.loadMetadata(t1) == {**meta2, **meta1}
json4.write_text(json.dumps(meta4))
assert fslbids.loadMetadata(t1) == {**meta4, **meta2, **meta1}
def test_loadMetadata_control_characters():
dd = Path('dataset_description.json')
t1 = Path('sub-01/func/sub-01_task-stim_bold.nii.gz')
json1 = Path('sub-01/func/sub-01_task-stim_bold.json')
meta1 = {"a" : "1", "b" : "2\x19\x20"}
smeta1 = '{"a" : "1", "b" : "2\x19\x20"}'
with tempdir():
dd.touch()
Path(op.dirname(t1)).mkdir(parents=True)
t1.touch()
assert fslbids.loadMetadata(t1) == {}
json1.write_text(smeta1)
assert fslbids.loadMetadata(t1) == meta1
def test_loadMetadata_symlinked():
ddreal = Path('a')
t1real = Path('b')
j1real = Path('c')
j2real = Path('d')
j3real = Path('e')
j4real = Path('f')
dd = Path('data/dataset_description.json')
t1 = Path('data/sub-01/func/sub-01_task-stim_bold.nii.gz')
json1 = Path('data/sub-01/func/sub-01_task-stim_bold.json')
json2 = Path('data/sub-01/sub-01_bold.json')
json3 = Path('data/sub-01_t1w.json')
json4 = Path('data/sub-01/task-stim_bold.json')
meta1 = {'a' : '1', 'b' : '2'}
meta2 = {'a' : '10', 'c' : '3'}
meta3 = {'a' : '109', 'b' : '99'}
meta4 = {'c' : '9', 'd' : '5'}
with tempdir():
ddreal.touch()
t1real.touch()
j1real.write_text(json.dumps(meta1))
j2real.write_text(json.dumps(meta2))
j3real.write_text(json.dumps(meta3))
j4real.write_text(json.dumps(meta4))
Path(op.dirname(t1)).mkdir(parents=True)
dd .symlink_to(op.join('..', ddreal))
t1 .symlink_to(op.join('..', '..', '..', t1real))
json1.symlink_to(op.join('..', '..', '..', j1real))
json2.symlink_to(op.join('..', '..', j2real))
json3.symlink_to(op.join('..', j3real))
json4.symlink_to(op.join('..', '..', j4real))
assert fslbids.loadMetadata(t1) == {**meta4, **meta2, **meta1}
File moved
...@@ -113,7 +113,55 @@ def test_expiry(): ...@@ -113,7 +113,55 @@ def test_expiry():
with pytest.raises(cache.Expired): with pytest.raises(cache.Expired):
c.get(0) c.get(0)
with pytest.raises(cache.Expired):
c.get(1)
assert c.get(1, default='default') == 'default' assert c.get(1, default='default') == 'default'
# And that the cache is empty # And that the cache is empty
assert len(c) == 0 assert len(c) == 0
def test_lru():
c = cache.Cache(maxsize=3, lru=True)
c[0] = '0'
c[1] = '1'
c[2] = '2'
c[3] = '3'
# normal behaviour - first inserted
# is dropped
with pytest.raises(KeyError):
assert c.get(0)
# lru behaviour - oldest accessed is
# dropped
c[1]
c[4] = '4'
with pytest.raises(KeyError):
c[2]
c[1]
c[3]
c[4]
assert len(c) == 3
def test_accessors():
c = cache.Cache(maxsize=3)
c[0] = '0'
c[1] = '1'
c[2] = '2'
c[3] = '3'
assert list(c.keys()) == [ 1, 2, 3]
assert list(c.values()) == ['1', '2', '3']
assert list(c.items()) == [(1, '1'), (2, '2'), (3, '3')]
assert 0 not in c
assert 1 in c
assert 2 in c
assert 3 in c
from fsl.data import cifti
import os.path as op
import numpy as np
import nibabel as nib
from numpy import testing
import fsl.tests as tests
from nibabel.cifti2 import cifti2_axes
def volumetric_brain_model():
mask = np.random.randint(2, size=(10, 10, 10)) > 0
return cifti2_axes.BrainModelAxis.from_mask(mask, affine=np.eye(4))
def surface_brain_model():
mask = np.random.randint(2, size=100) > 0
return cifti2_axes.BrainModelAxis.from_mask(mask, name='cortex')
def volumetric_parcels(return_mask=False):
mask = np.random.randint(5, size=(10, 10, 10))
axis = cifti2_axes.ParcelsAxis(
[f'vol_{idx}' for idx in range(1, 5)],
voxels=[np.stack(np.where(mask == idx), axis=-1) for idx in range(1, 5)],
vertices=[{} for _ in range(1, 5)],
volume_shape=mask.shape,
affine=np.eye(4),
)
if return_mask:
return axis, mask
else:
return axis
def surface_parcels(return_mask=False):
mask = np.random.randint(5, size=100)
axis = cifti2_axes.ParcelsAxis(
[f'surf_{idx}' for idx in range(1, 5)],
voxels=[np.zeros((0, 3), dtype=int) for _ in range(1, 5)],
vertices=[{'CIFTI_STRUCTURE_CORTEX': np.where(mask == idx)[0]} for idx in range(1, 5)],
nvertices={'CIFTI_STRUCTURE_CORTEX': 100},
)
if return_mask:
return axis, mask
else:
return axis
def gen_data(axes):
return np.random.randn(*(5 if ax is None else len(ax) for ax in axes))
def test_read_gifti():
testdir = op.join(op.dirname(__file__), 'testdata')
shapefile = op.join(testdir, 'example.shape.gii')
ref_data = nib.load(shapefile).darrays[0].data
data = cifti.load(shapefile)
assert isinstance(data, cifti.DenseCifti)
assert data.arr.shape == (642, )
testing.assert_equal(data.arr, ref_data)
testing.assert_equal(data.brain_model_axis.vertex, np.arange(642))
assert len(data.brain_model_axis.nvertices) == 1
assert data.brain_model_axis.nvertices['CIFTI_STRUCTURE_OTHER'] == 642
data = cifti.load(shapefile, mask_values=(ref_data[0], ))
assert isinstance(data, cifti.DenseCifti)
assert data.arr.shape == (np.sum(ref_data != ref_data[0]), )
testing.assert_equal(data.arr, ref_data[ref_data != ref_data[0]])
testing.assert_equal(data.brain_model_axis.vertex, np.where(ref_data != ref_data[0])[0])
assert len(data.brain_model_axis.nvertices) == 1
assert data.brain_model_axis.nvertices['CIFTI_STRUCTURE_OTHER'] == 642
cifti.load(op.join(testdir, 'example'))
def test_read_nifti():
mask = np.random.randint(2, size=(10, 10, 10)) > 0
values = np.random.randn(10, 10, 10)
for mask_val in (0, np.nan):
values[~mask] = mask_val
affine = np.concatenate((np.random.randn(3, 4), np.array([0, 0, 0, 1])[None, :]), axis=0)
with tests.testdir():
nib.Nifti1Image(values, affine).to_filename("masked_image.nii.gz")
data = cifti.load("masked_image")
assert isinstance(data, cifti.DenseCifti)
testing.assert_equal(data.arr, values[mask])
testing.assert_allclose(data.brain_model_axis.affine, affine)
assert len(data.brain_model_axis.nvertices) == 0
def check_io(data: cifti.Cifti, extension):
with tests.testdir():
data.save("test")
assert op.isfile(f'test.{extension}.nii')
loaded = cifti.load("test")
if data.arr.ndim == 1:
testing.assert_equal(data.arr, loaded.arr[0])
assert data.axes == loaded.axes[1:]
else:
testing.assert_equal(data.arr, loaded.arr)
assert data.axes == loaded.axes
def test_io_cifti():
for cifti_class, cifti_type, main_axis_options in (
(cifti.DenseCifti, 'd', (volumetric_brain_model(), surface_brain_model(),
volumetric_brain_model() + surface_brain_model())),
(cifti.ParcelCifti, 'p', (volumetric_parcels(), surface_parcels(),
volumetric_parcels() + surface_parcels())),
):
for main_axis in main_axis_options:
with tests.testdir():
data_1d = cifti_class(gen_data([main_axis]), [main_axis])
check_io(data_1d, f'{cifti_type}scalar')
connectome = cifti_class(gen_data([main_axis, main_axis]), (main_axis, main_axis))
check_io(connectome, f'{cifti_type}conn')
scalar_axis = cifti2_axes.ScalarAxis(['A', 'B', 'C'])
scalar = cifti_class(gen_data([scalar_axis, main_axis]), (scalar_axis, main_axis))
check_io(scalar, f'{cifti_type}scalar')
label_axis = cifti2_axes.LabelAxis(['A', 'B', 'C'], {1: ('some parcel', (1, 0, 0, 1))})
label = cifti_class(gen_data([label_axis, main_axis]), (label_axis, main_axis))
check_io(label, f'{cifti_type}label')
series_axis = cifti2_axes.SeriesAxis(10, 3, 50, unit='HERTZ')
series = cifti_class(gen_data([series_axis, main_axis]), (series_axis, main_axis))
check_io(series, f'{cifti_type}tseries')
if cifti_type == 'd':
parcel_axis = surface_parcels()
dpconn = cifti_class(gen_data([parcel_axis, main_axis]), (parcel_axis, main_axis))
check_io(dpconn, 'dpconn')
else:
dense_axis = surface_brain_model()
pdconn = cifti_class(gen_data([dense_axis, main_axis]), (dense_axis, main_axis))
check_io(pdconn, 'pdconn')
def test_extract_dense():
vol_bm = volumetric_brain_model()
surf_bm = surface_brain_model()
for bm in (vol_bm + surf_bm, surf_bm + vol_bm):
for ndim, no_other_axis in ((1, True), (2, False), (2, True)):
if ndim == 1:
data = cifti.DenseCifti(gen_data([bm]), [bm])
else:
scl = cifti2_axes.ScalarAxis(['A', 'B', 'C'])
data = cifti.DenseCifti(gen_data([scl, bm]),
[None if no_other_axis else scl, bm])
# extract volume
ref_arr = data.arr[..., data.brain_model_axis.volume_mask]
vol_image = data.to_image(fill=np.nan)
if ndim == 1:
assert vol_image.shape == data.brain_model_axis.volume_shape
else:
assert vol_image.shape == data.brain_model_axis.volume_shape + (3, )
assert np.isfinite(vol_image.data).sum() == len(vol_bm) * (3 if ndim == 2 else 1)
testing.assert_equal(vol_image.data[tuple(vol_bm.voxel.T)], ref_arr.T)
from_image = cifti.DenseCifti.from_image(vol_image)
assert from_image.brain_model_axis == vol_bm
testing.assert_equal(from_image.arr, ref_arr)
# extract surface
ref_arr = data.arr[..., data.brain_model_axis.surface_mask]
mask, surf_data = data.surface('cortex', partial=True)
assert surf_data.shape[-1] < 100
testing.assert_equal(ref_arr, surf_data)
testing.assert_equal(surf_bm.vertex, mask)
surf_data_full = data.surface('cortex', fill=np.nan)
assert surf_data_full.shape[-1] == 100
mask_full = np.isfinite(surf_data_full)
if ndim == 2:
assert (mask_full.any(0) == mask_full.all(0)).all()
mask_full = mask_full[0]
assert mask_full.sum() == len(surf_bm)
assert mask_full[..., mask].sum() == len(surf_bm)
testing.assert_equal(surf_data_full[..., mask_full], ref_arr)
def test_extract_parcel():
vol_parcel, vol_mask = volumetric_parcels(return_mask=True)
surf_parcel, surf_mask = surface_parcels(return_mask=True)
parcel = vol_parcel + surf_parcel
for ndim, no_other_axis in ((1, True), (2, False), (2, True)):
if ndim == 1:
data = cifti.ParcelCifti(gen_data([parcel]), [parcel])
else:
scl = cifti2_axes.ScalarAxis(['A', 'B', 'C'])
data = cifti.ParcelCifti(gen_data([scl, parcel]),
[None if no_other_axis else scl, parcel])
# extract volume
vol_image = data.to_image(fill=np.nan)
if ndim == 1:
assert vol_image.shape == data.parcel_axis.volume_shape
else:
assert vol_image.shape == data.parcel_axis.volume_shape + (3, )
assert np.isfinite(vol_image.data).sum() == np.sum(vol_mask != 0) * (3 if ndim == 2 else 1)
if ndim == 1:
testing.assert_equal(vol_mask != 0, np.isfinite(vol_image.data))
for idx in range(1, 5):
testing.assert_allclose(vol_image.data[vol_mask == idx], data.arr[..., idx - 1])
else:
for idx in range(3):
testing.assert_equal(vol_mask != 0, np.isfinite(vol_image.data[..., idx]))
for idx2 in range(1, 5):
testing.assert_allclose(vol_image.data[vol_mask == idx2, idx], data.arr[idx, idx2 - 1])
# extract surface
mask, surf_data = data.surface('cortex', partial=True)
assert surf_data.shape[-1] == (surf_mask != 0).sum()
assert (surf_mask[mask] != 0).all()
print(data.arr)
for idx in range(1, 5):
if ndim == 1:
testing.assert_equal(surf_data.T[surf_mask[mask] == idx], data.arr[idx + 3])
else:
for idx2 in range(3):
testing.assert_equal(surf_data.T[surf_mask[mask] == idx, idx2], data.arr[idx2, idx + 3])
surf_data_full = data.surface('cortex', partial=False)
assert surf_data_full.shape[-1] == 100
if ndim == 1:
testing.assert_equal(np.isfinite(surf_data_full), surf_mask != 0)
for idx in range(1, 5):
testing.assert_equal(surf_data_full.T[surf_mask == idx], data.arr[idx + 3])
else:
for idx2 in range(3):
testing.assert_equal(np.isfinite(surf_data_full)[idx2], (surf_mask != 0))
for idx in range(1, 5):
testing.assert_equal(surf_data_full.T[surf_mask == idx, idx2], data.arr[idx2, idx + 3])
def test_brainstructure():
for primary in ['cortex', 'cerebellum']:
for secondary in [None, 'white', 'pial']:
for gtype in [None, 'volume', 'surface']:
for orientation in ['left', 'right', 'both']:
bst = cifti.BrainStructure(primary, secondary, orientation, gtype)
print(bst.cifti)
assert bst.cifti == 'CIFTI_STRUCTURE_%s%s' % (primary.upper(), '' if orientation == 'both' else '_' + orientation.upper())
assert bst.gifti['AnatomicalStructurePrimary'][:len(primary)] == primary.capitalize()
assert len(bst.gifti) == (1 if secondary is None else 2)
if secondary is not None:
assert bst.gifti['AnatomicalStructureSecondary'] == secondary.capitalize()
assert bst == cifti.BrainStructure(primary, secondary, orientation, gtype)
assert bst == bst
assert bst != cifti.BrainStructure('Thalamus', secondary, orientation, gtype)
if secondary is None:
assert bst == cifti.BrainStructure(primary, 'midplane', orientation, gtype)
else:
assert bst != cifti.BrainStructure(primary, 'midplane', orientation, gtype)
if (gtype == 'volume' and primary == 'cortex') or (gtype == 'surface' and primary != 'cortex'):
assert cifti.BrainStructure.from_string(bst.cifti) != bst
else:
assert cifti.BrainStructure.from_string(bst.cifti) == bst
assert cifti.BrainStructure.from_string(bst.cifti).secondary is None
...@@ -7,12 +7,22 @@ import pytest ...@@ -7,12 +7,22 @@ import pytest
import fsl.utils.deprecated as deprecated import fsl.utils.deprecated as deprecated
# the line number of the warning is hard coded in # these get updated in the relevant functions
# the unit tests below. Don't change the line number! WARNING_LINE_NUMBER = None
DEPRECATED_LINE_NUMBER = None
def _linenum(pattern):
with open(__file__, 'rt') as f:
for i, line in enumerate(f.readlines(), 1):
if pattern in line:
return i
return -1
def emit_warning(): def emit_warning():
deprecated.warn('blag', vin='1.0.0', rin='2.0.0', msg='yo') deprecated.warn('blag', vin='1.0.0', rin='2.0.0', msg='yo')
global WARNING_LINE_NUMBER
WARNING_LINE_NUMBER = 13 WARNING_LINE_NUMBER = _linenum('deprecated.warn(\'blag\'')
@deprecated.deprecated(vin='1.0.0', rin='2.0.0', msg='yo') @deprecated.deprecated(vin='1.0.0', rin='2.0.0', msg='yo')
...@@ -20,9 +30,9 @@ def depfunc(): ...@@ -20,9 +30,9 @@ def depfunc():
pass pass
def call_dep_func(): def call_dep_func():
depfunc() depfunc() # mark
global DEPRECATED_LINE_NUMBER
DEPRECATED_LINE_NUMBER = 23 DEPRECATED_LINE_NUMBER = _linenum('depfunc() # mark')
def _check_warning(w, name, lineno): def _check_warning(w, name, lineno):
......
#!/usr/bin/env python
#
# These tests require an internet connection, and will only work on linux.
#
import os.path as op
import os
import functools as ft
import subprocess as sp
import tarfile
import zipfile
import random
import string
import binascii
import contextlib
import urllib.request as request
from unittest import mock
import pytest
import fsl.data.dicom as fsldcm
import fsl.utils.tempdir as tempdir
from fsl.tests import mockFSLDIR, touch
datadir = op.join(op.dirname(__file__), 'testdata')
pytestmark = pytest.mark.dicomtest
@contextlib.contextmanager
def install_dcm2niix(version='1.0.20220720'):
filenames = {
'1.0.20201102' : 'v1.0.20201102/dcm2niix_lnx.zip',
'1.0.20190902' : 'v1.0.20190902/dcm2niix_lnx.zip',
'1.0.20181125' : 'v1.0.20181125/dcm2niix_25-Nov-2018_lnx.zip',
'1.0.20171017' : 'v1.0.20171017/dcm2niix_18-Oct-2017_lnx.zip',
'1.0.20220720' : 'v1.0.20220720/dcm2niix_lnx.zip',
}
prefix = 'https://github.com/rordenlab/dcm2niix/releases/download/'
url = prefix + filenames.get(version, f'v{version}/dcm2niix_lnx.zip')
with tempdir.tempdir() as td:
request.urlretrieve(url, 'dcm2niix.zip')
with zipfile.ZipFile('dcm2niix.zip', 'r') as f:
f.extractall('.')
os.chmod(op.join(td, 'dcm2niix'), 0o755)
path = op.abspath('dcm2niix')
with mock.patch('fsl.data.dicom.dcm2niix', return_value=path):
try:
yield
finally:
fsldcm.installedVersion.invalidate()
def test_disabled():
with mock.patch('fsl.data.dicom.enabled', return_value=False):
with pytest.raises(RuntimeError):
fsldcm.scanDir('.')
with pytest.raises(RuntimeError):
fsldcm.loadSeries({})
def test_dcm2niix():
"""
"""
env = os.environ.copy()
env.pop('FSLDIR', None)
with tempdir.tempdir() as td:
env['PATH'] = td
with mock.patch('os.environ', env):
assert fsldcm.dcm2niix() == 'dcm2niix'
bindir = op.join(td, 'bin')
dcm2niix = op.join(bindir, 'dcm2niix')
os.makedirs(bindir)
touch(dcm2niix)
os.chmod(dcm2niix, 0o755)
env['PATH'] = bindir
with mock.patch('os.environ', env):
assert fsldcm.dcm2niix() == dcm2niix
with mockFSLDIR(bin=['dcm2niix']) as fsldir:
env['FSLDIR'] = fsldir
dcm2niix = op.join(fsldir, 'bin', 'dcm2niix')
with mock.patch('os.environ', env):
assert fsldcm.dcm2niix() == dcm2niix
def test_installedVersion():
tests = [
('1.0.20190902', (1, 0, 2019, 9, 2)),
('1.0.20181125', (1, 0, 2018, 11, 25)),
('1.0.20171017', (1, 0, 2017, 10, 17))]
for version, expect in tests:
fsldcm.installedVersion.invalidate()
with install_dcm2niix(version):
got = fsldcm.installedVersion()
assert got == expect
def test_enabled():
try:
with install_dcm2niix('1.0.20190902'):
fsldcm.installedVersion.invalidate()
assert fsldcm.enabled()
# test dcm2niix not present
with mock.patch('subprocess.check_output',
side_effect=Exception()):
fsldcm.installedVersion.invalidate()
assert not fsldcm.enabled()
# test presence of different versions
tests = [(b'version v2.1.20191212', True),
(b'version v1.0.20190902', True),
(b'version v1.0.20171216', True),
(b'version v1.0.20171215', True),
(b'version v1.0.20171214', False),
(b'version v1.0.20160930', False),
(b'version v1.0.20160929', False),
(b'version v0.0.00000000', False),
(b'version blurgh', False)]
for verstr, expected in tests:
fsldcm.installedVersion.invalidate()
with mock.patch('subprocess.check_output', return_value=verstr):
assert fsldcm.enabled() == expected
finally:
fsldcm.installedVersion.invalidate()
def test_scanDir():
with install_dcm2niix():
series = fsldcm.scanDir('.')
assert len(series) == 0
datafile = op.join(datadir, 'example_dicom.tbz2')
with tarfile.open(datafile) as f:
f.extractall(filter='data')
series = fsldcm.scanDir('.')
assert len(series) == 2
for s in series:
assert s['PatientName'] in ('MCCARTHY_PAUL',
'MCCARTHY^PAUL',
'MCCARTHY_PAUL_2',
'MCCARTHY^PAUL^2')
def test_sersiesCRC():
RANDOM = object()
tests = [
({'SeriesInstanceUID' : 'hello-world'}, '2983461467'),
({'SeriesInstanceUID' : RANDOM, 'EchoNumber' : 0}, RANDOM),
({'SeriesInstanceUID' : RANDOM, 'EchoNumber' : 1}, RANDOM),
({'SeriesInstanceUID' : RANDOM, 'EchoNumber' : 2}, RANDOM),
({'SeriesInstanceUID' : RANDOM, 'EchoNumber' : 3}, RANDOM),
]
for series, expect in tests:
series = dict(series)
if expect is RANDOM:
expect = ''.join([random.choice(string.ascii_letters + string.digits)
for i in range(30)])
series['SeriesInstanceUID'] = expect
expect = str(binascii.crc32(expect.encode()))
echo = series.get('EchoNumber', 0)
if echo > 1:
expect += '.{}'.format(echo)
assert fsldcm.seriesCRC(series) == expect
def test_loadSeries():
# test a pre-CRC and a post-CRC version
for version in ('1.0.20181125', '1.0.20201102'):
with install_dcm2niix(version):
datafile = op.join(datadir, 'example_dicom.tbz2')
with tarfile.open(datafile) as f:
f.extractall()
dcmdir = os.getcwd()
series = fsldcm.scanDir(dcmdir)
expShape = (512, 512, 1)
for s in series:
imgs = fsldcm.loadSeries(s)
for img in imgs:
assert img.dicomDir == dcmdir
assert img.shape == expShape
assert img[:].shape == expShape
assert img.getMeta('PatientName') in ('MCCARTHY_PAUL',
'MCCARTHY^PAUL',
'MCCARTHY_PAUL_2',
'MCCARTHY^PAUL^2')
assert 'PatientName' in img.metaKeys()
assert 'MCCARTHY_PAUL' in img.metaValues() or \
'MCCARTHY^PAUL' in img.metaValues() or \
'MCCARTHY_PAUL_2' in img.metaValues() or \
'MCCARTHY^PAUL^2' in img.metaValues()
assert ('PatientName', 'MCCARTHY_PAUL') in img.metaItems() or \
('PatientName', 'MCCARTHY^PAUL') in img.metaItems() or \
('PatientName', 'MCCARTHY_PAUL_2') in img.metaItems() or \
('PatientName', 'MCCARTHY^PAUL^2') in img.metaItems()
...@@ -10,7 +10,7 @@ import numpy as np ...@@ -10,7 +10,7 @@ import numpy as np
import pytest import pytest
import tests import fsl.tests as tests
import fsl.data.dtifit as dtifit import fsl.data.dtifit as dtifit
import fsl.data.image as fslimage import fsl.data.image as fslimage
...@@ -169,12 +169,12 @@ def test_DTIFitTensor(): ...@@ -169,12 +169,12 @@ def test_DTIFitTensor():
l2file = op.join(testdir, 'dti_L2.nii') l2file = op.join(testdir, 'dti_L2.nii')
l3file = op.join(testdir, 'dti_L3.nii') l3file = op.join(testdir, 'dti_L3.nii')
v1 = tests.make_random_image(v1file, (5, 5, 5, 3)).get_data() v1 = np.asanyarray(tests.make_random_image(v1file, (5, 5, 5, 3)).dataobj)
v2 = tests.make_random_image(v2file, (5, 5, 5, 3)).get_data() v2 = np.asanyarray(tests.make_random_image(v2file, (5, 5, 5, 3)).dataobj)
v3 = tests.make_random_image(v3file, (5, 5, 5, 3)).get_data() v3 = np.asanyarray(tests.make_random_image(v3file, (5, 5, 5, 3)).dataobj)
l1 = tests.make_random_image(l1file, (5, 5, 5)) .get_data() l1 = np.asanyarray(tests.make_random_image(l1file, (5, 5, 5)) .dataobj)
l2 = tests.make_random_image(l2file, (5, 5, 5)) .get_data() l2 = np.asanyarray(tests.make_random_image(l2file, (5, 5, 5)) .dataobj)
l3 = tests.make_random_image(l3file, (5, 5, 5)) .get_data() l3 = np.asanyarray(tests.make_random_image(l3file, (5, 5, 5)) .dataobj)
dtiobj = dtifit.DTIFitTensor(testdir) dtiobj = dtifit.DTIFitTensor(testdir)
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
# Author: Paul McCarthy <pauldmccarthy@gmail.com> # Author: Paul McCarthy <pauldmccarthy@gmail.com>
# #
import pathlib
import numpy as np import numpy as np
import nibabel as nib import nibabel as nib
...@@ -12,7 +13,7 @@ import nibabel as nib ...@@ -12,7 +13,7 @@ import nibabel as nib
import fsl.utils.tempdir as tempdir import fsl.utils.tempdir as tempdir
import fsl.utils.ensure as ensure import fsl.utils.ensure as ensure
from . import make_random_image from fsl.tests import make_random_image
def test_ensureIsImage(): def test_ensureIsImage():
...@@ -22,12 +23,14 @@ def test_ensureIsImage(): ...@@ -22,12 +23,14 @@ def test_ensureIsImage():
assert ensure.ensureIsImage(img) is img assert ensure.ensureIsImage(img) is img
loaded = [ensure.ensureIsImage('image.nii'), loaded = [ensure.ensureIsImage( 'image.nii'),
ensure.ensureIsImage('image')] ensure.ensureIsImage( 'image'),
ensure.ensureIsImage(pathlib.Path('image')),
ensure.ensureIsImage(pathlib.Path('image.nii'))]
for l in loaded: for l in loaded:
assert isinstance(l, nib.nifti1.Nifti1Image) assert isinstance(l, nib.nifti1.Nifti1Image)
assert np.all(img.get_data() == l.get_data()) assert np.all(np.asanyarray(img.dataobj) == np.asanyarray(l.dataobj))
l = None l = None
loaded = None loaded = None
......
...@@ -17,7 +17,7 @@ import numpy as np ...@@ -17,7 +17,7 @@ import numpy as np
import pytest import pytest
import tests import fsl.tests as tests
import fsl.data.featanalysis as featanalysis import fsl.data.featanalysis as featanalysis
import fsl.data.featdesign as featdesign import fsl.data.featdesign as featdesign
import fsl.data.image as fslimage import fsl.data.image as fslimage
...@@ -59,7 +59,7 @@ def test_isFEATDir(): ...@@ -59,7 +59,7 @@ def test_isFEATDir():
# it's not a feat directory # it's not a feat directory
assert not featanalysis.isFEATDir('nonexistent.feat') assert not featanalysis.isFEATDir('nonexistent.feat')
# If any of the above files are not # If any of the above files are not
# present, it is not a FEAT directory # present, it is not a FEAT directory
perms = it.chain(it.combinations(paths, 1), perms = it.chain(it.combinations(paths, 1),
it.combinations(paths, 2), it.combinations(paths, 2),
...@@ -69,7 +69,7 @@ def test_isFEATDir(): ...@@ -69,7 +69,7 @@ def test_isFEATDir():
assert not featanalysis.isFEATDir( assert not featanalysis.isFEATDir(
op.join(testdir, 'analysis.feat')) op.join(testdir, 'analysis.feat'))
def test_hasStats(): def test_hasStats():
with tests.testdir(['analysis.feat/stats/zstat1.nii.gz']) as testdir: with tests.testdir(['analysis.feat/stats/zstat1.nii.gz']) as testdir:
...@@ -78,7 +78,7 @@ def test_hasStats(): ...@@ -78,7 +78,7 @@ def test_hasStats():
with tests.testdir(['analysis.feat/stats/zstat1.txt']) as testdir: with tests.testdir(['analysis.feat/stats/zstat1.txt']) as testdir:
featdir = op.join(testdir, 'analysis.feat') featdir = op.join(testdir, 'analysis.feat')
assert not featanalysis.hasStats(featdir) assert not featanalysis.hasStats(featdir)
def test_hasMelodicDir(): def test_hasMelodicDir():
...@@ -94,7 +94,7 @@ def test_getAnalysisDir(): ...@@ -94,7 +94,7 @@ def test_getAnalysisDir():
'analysis.feat/design.fsf', 'analysis.feat/design.fsf',
'analysis.feat/design.mat', 'analysis.feat/design.mat',
'analysis.feat/design.con'] 'analysis.feat/design.con']
testpaths = ['analysis.feat/filtered_func_data.nii.gz', testpaths = ['analysis.feat/filtered_func_data.nii.gz',
'analysis.feat/stats/zstat1.nii.gz', 'analysis.feat/stats/zstat1.nii.gz',
'analysis.feat/logs/feat4_post', 'analysis.feat/logs/feat4_post',
...@@ -106,7 +106,7 @@ def test_getAnalysisDir(): ...@@ -106,7 +106,7 @@ def test_getAnalysisDir():
t = op.join(testdir, t) t = op.join(testdir, t)
assert featanalysis.getAnalysisDir(t) == expected assert featanalysis.getAnalysisDir(t) == expected
def test_getTopLevelAnalysisDir(): def test_getTopLevelAnalysisDir():
testcases = [ testcases = [
('analysis.feat/filtered_func_data.ica/melodic_IC.nii.gz', 'analysis.feat'), ('analysis.feat/filtered_func_data.ica/melodic_IC.nii.gz', 'analysis.feat'),
...@@ -127,7 +127,7 @@ def test_getReportFile(): ...@@ -127,7 +127,7 @@ def test_getReportFile():
for paths, expected in testcases: for paths, expected in testcases:
with tests.testdir(paths) as testdir: with tests.testdir(paths) as testdir:
featdir = op.join(testdir, 'analysis.feat') featdir = op.join(testdir, 'analysis.feat')
if expected: if expected:
...@@ -145,7 +145,7 @@ def test_loadContrasts(): ...@@ -145,7 +145,7 @@ def test_loadContrasts():
/ContrastName1 c1 /ContrastName1 c1
/ContrastName2 c2 /ContrastName2 c2
/ContrastName3 c3 /ContrastName3 c3
/NumContrasts 3 /NumContrasts 3
/Matrix /Matrix
1 0 0 1 0 0
0 1 0 0 1 0
...@@ -188,9 +188,9 @@ def test_loadContrasts(): ...@@ -188,9 +188,9 @@ def test_loadContrasts():
0 1 1 0 1 1
""", """,
] ]
with pytest.raises(Exception): with pytest.raises(Exception):
featanalysis.loadContrasts('no file') featanalysis.loadContrasts('no file')
with tests.testdir() as testdir: with tests.testdir() as testdir:
featdir = op.join(testdir, 'analysis.feat') featdir = op.join(testdir, 'analysis.feat')
...@@ -210,6 +210,77 @@ def test_loadContrasts(): ...@@ -210,6 +210,77 @@ def test_loadContrasts():
featanalysis.loadContrasts(featdir) featanalysis.loadContrasts(featdir)
def test_loadFTests():
goodtests = [
("""
/NumWaves 4
/NumContrasts 3
/Matrix
0 1 0 1
0 0 1 1
1 1 1 1
""",
[[0, 1, 0, 1],
[0, 0, 1, 1],
[1, 1, 1, 1]]),
("""
/NumWaves 10
/NumContrasts 2
/Matrix
0 1 0 1 0 1 1 0 0 1
0 0 1 1 1 0 0 1 0 0
""",
[[0, 1, 0, 1, 0, 1, 1, 0, 0, 1],
[0, 0, 1, 1, 1, 0, 0, 1, 0, 0]]),
]
badtests = [
"""
/NumWaves 10
/NumContrasts 2
""",
"""
/NumContrasts 2
/Matrix
1 0
0 1
""",
"""
/NumWaves Burgers
/NumContrasts 2
/Matrix
1 0
0 1
""",
"""
/Matrix
1 0
0 1
""",
"""
/NumWaves 4
/NumContrasts 3
/Matrix
1 0 0 0 1 0 0
0 1 0 0 1 0 0
""",
]
with tests.testdir() as testdir:
featdir = op.join(testdir, 'analysis.feat')
for contents, expect in goodtests:
designcon = op.join(featdir, 'design.fts')
tests.make_dummy_file(designcon, textwrap.dedent(contents).strip())
assert featanalysis.loadFTests(featdir) == expect
for contents in badtests:
designcon = op.join(featdir, 'design.fts')
tests.make_dummy_file(designcon, textwrap.dedent(contents).strip())
with pytest.raises(Exception):
featanalysis.loadFTests(featdir)
def test_loadSettings(): def test_loadSettings():
contents = """ contents = """
...@@ -240,9 +311,10 @@ def test_loadSettings(): ...@@ -240,9 +311,10 @@ def test_loadSettings():
with tests.testdir() as testdir: with tests.testdir() as testdir:
featdir = op.join(testdir, 'analysis.feat') featdir = op.join(testdir, 'analysis.feat')
tests.make_dummy_file(op.join(featdir, 'design.fsf'), contents) designfsf = op.join(featdir, 'design.fsf')
result = featanalysis.loadSettings(featdir) tests.make_dummy_file(designfsf, contents)
assert result == expected assert featanalysis.loadSettings(featdir) == expected
assert featanalysis.loadFsf(designfsf) == expected
def test_loadDesign(): def test_loadDesign():
...@@ -274,7 +346,7 @@ def test_isFirstLevelAnalysis(): ...@@ -274,7 +346,7 @@ def test_isFirstLevelAnalysis():
'2ndlevel_1.gfeat', '2ndlevel_2.gfeat'] '2ndlevel_1.gfeat', '2ndlevel_2.gfeat']
for featdir in featdirs: for featdir in featdirs:
expected = featdir.startswith('1') expected = featdir.startswith('1')
featdir = op.join(datadir, featdir) featdir = op.join(datadir, featdir)
settings = featanalysis.loadSettings(featdir) settings = featanalysis.loadSettings(featdir)
...@@ -288,7 +360,10 @@ def test_loadClusterResults(): ...@@ -288,7 +360,10 @@ def test_loadClusterResults():
'2ndlevel_1.gfeat/cope1.feat', '2ndlevel_1.gfeat/cope2.feat', '2ndlevel_1.gfeat/cope1.feat', '2ndlevel_1.gfeat/cope2.feat',
'2ndlevel_2.gfeat/cope1.feat', '2ndlevel_2.gfeat/cope2.feat'] '2ndlevel_2.gfeat/cope1.feat', '2ndlevel_2.gfeat/cope2.feat']
ncontrasts = [2, 2, 2, 1, 1, 1, 1] ncontrasts = [2, 2, 2, 1, 1, 1, 1]
nclusters = [[1, 5], [2, 2], [3, 5], [7], [1], [10], [27]] nftests = [0, 0, 1, 0, 0, 1, 1]
# nclusters = [contrastclusters] + [ftestclusters]
nclusters = [[1, 5], [2, 2], [3, 5, 3], [7], [1], [10, 8], [27, 21]]
with pytest.raises(Exception): with pytest.raises(Exception):
featanalysis.loadClusterResults('notafeatdir') featanalysis.loadClusterResults('notafeatdir')
...@@ -300,17 +375,18 @@ def test_loadClusterResults(): ...@@ -300,17 +375,18 @@ def test_loadClusterResults():
with tests.testdir() as testdir: with tests.testdir() as testdir:
# For higher level analyses, the # work from a copy of the test data directory
# loadClusterResults function peeks
# at the FEAT input data file
# header, so we have to generate it.
newfeatdir = op.join(testdir, 'analysis.feat') newfeatdir = op.join(testdir, 'analysis.feat')
shutil.copytree(op.join(datadir, featdir), newfeatdir) shutil.copytree(op.join(datadir, featdir), newfeatdir)
featdir = newfeatdir featdir = newfeatdir
# For higher level analyses, the
# loadClusterResults function peeks
# at the FEAT input data file
# header, so we have to generate it.
if not firstlevel: if not firstlevel:
datafile = op.join(featdir, 'filtered_func_data.nii.gz') datafile = op.join(featdir, 'filtered_func_data.nii.gz')
data = np.random.randint(1, 10, (91, 109, 91)) data = np.random.randint(1, 10, (91, 109, 91), dtype=np.int32)
xform = np.array([[-2, 0, 0, 90], xform = np.array([[-2, 0, 0, 90],
[ 0, 2, 0, -126], [ 0, 2, 0, -126],
[ 0, 0, 2, -72], [ 0, 0, 2, -72],
...@@ -318,20 +394,55 @@ def test_loadClusterResults(): ...@@ -318,20 +394,55 @@ def test_loadClusterResults():
fslimage.Image(data, xform=xform).save(datafile) fslimage.Image(data, xform=xform).save(datafile)
settings = featanalysis.loadSettings(featdir) settings = featanalysis.loadSettings(featdir)
# contrasts
for c in range(ncontrasts[i]): for c in range(ncontrasts[i]):
clusters = featanalysis.loadClusterResults( clusters = featanalysis.loadClusterResults(
featdir, settings, c) featdir, settings, c)
assert len(clusters) == nclusters[i][c] assert len(clusters) == nclusters[i][c]
# f-tests
for c in range(nftests[i]):
clusters = featanalysis.loadClusterResults(
featdir, settings, c, ftest=True)
assert len(clusters) == nclusters[i][c + ncontrasts[i]]
# Test calling the function on a feat dir # Test calling the function on a feat dir
# which doesn't have any cluster results # which doesn't have any cluster results
# (2ndlevel_2.gfeat)
if i == len(featdirs) - 1: if i == len(featdirs) - 1:
for clustfile in glob.glob(op.join(featdir, 'cluster*txt')): for clustfile in glob.glob(op.join(featdir, 'cluster*txt')):
os.remove(clustfile) os.remove(clustfile)
assert featanalysis.loadClusterResults( assert featanalysis.loadClusterResults(
featdir, settings, 0) is None featdir, settings, 0) is None
# The above loop just checks that the number of
# clusters loaded for each analysis was correct.
# Below we check that the cluster data was loaded
# correctly, just for one analysis
featdir = op.join(datadir, '1stlevel_1.feat')
settings = featanalysis.loadSettings(featdir)
cluster = featanalysis.loadClusterResults(featdir, settings, 0)[0]
expected = {
'index' : 1,
'nvoxels' : 296,
'p' : 1.79e-27,
'logp' : 26.7,
'zmax' : 6.03,
'zmaxx' : 34,
'zmaxy' : 10,
'zmaxz' : 1,
'zcogx' : 31.4,
'zcogy' : 12.3,
'zcogz' : 1.72,
'copemax' : 612,
'copemaxx' : 34,
'copemaxy' : 10,
'copemaxz' : 1,
'copemean' : 143
}
for k, v in expected.items():
assert np.isclose(v, getattr(cluster, k))
def test_getDataFile(): def test_getDataFile():
paths = ['analysis.feat/filtered_func_data.nii.gz', paths = ['analysis.feat/filtered_func_data.nii.gz',
...@@ -392,9 +503,9 @@ def test_getResidualFile(): ...@@ -392,9 +503,9 @@ def test_getResidualFile():
assert featanalysis.getResidualFile(featdir) == expect assert featanalysis.getResidualFile(featdir) == expect
else: else:
with pytest.raises(fslpath.PathError): with pytest.raises(fslpath.PathError):
featanalysis.getResidualFile(featdir) featanalysis.getResidualFile(featdir)
def test_getPEFile(): def test_getPEFile():
testcases = [ testcases = [
(['analysis.feat/stats/pe1.nii.gz', (['analysis.feat/stats/pe1.nii.gz',
...@@ -416,7 +527,7 @@ def test_getPEFile(): ...@@ -416,7 +527,7 @@ def test_getPEFile():
assert featanalysis.getPEFile(featdir, pei) == expect assert featanalysis.getPEFile(featdir, pei) == expect
else: else:
with pytest.raises(fslpath.PathError): with pytest.raises(fslpath.PathError):
featanalysis.getPEFile(featdir, pei) featanalysis.getPEFile(featdir, pei)
def test_getCOPEFile(): def test_getCOPEFile():
...@@ -440,8 +551,32 @@ def test_getCOPEFile(): ...@@ -440,8 +551,32 @@ def test_getCOPEFile():
assert featanalysis.getCOPEFile(featdir, ci) == expect assert featanalysis.getCOPEFile(featdir, ci) == expect
else: else:
with pytest.raises(fslpath.PathError): with pytest.raises(fslpath.PathError):
featanalysis.getCOPEFile(featdir, ci) featanalysis.getCOPEFile(featdir, ci)
def test_getZStatFile():
testcases = [
(['analysis.feat/stats/zstat1.nii.gz',
'analysis.feat/stats/zstat2.nii.gz'], True),
(['analysis.feat/stats/zstat1.nii.gz'], True),
(['analysis.feat/stats/zstat0.nii.gz'], False),
(['analysis.feat/stats/zstat1.txt'], False),
]
for paths, shouldPass in testcases:
with tests.testdir(paths) as testdir:
featdir = op.join(testdir, 'analysis.feat')
for zi in range(len(paths)):
expect = op.join(
featdir, 'stats', 'zstat{}.nii.gz'.format(zi + 1))
if shouldPass:
assert featanalysis.getZStatFile(featdir, zi) == expect
else:
with pytest.raises(fslpath.PathError):
featanalysis.getZStatFile(featdir, zi)
def test_getZStatFile(): def test_getZStatFile():
testcases = [ testcases = [
...@@ -464,8 +599,31 @@ def test_getZStatFile(): ...@@ -464,8 +599,31 @@ def test_getZStatFile():
assert featanalysis.getZStatFile(featdir, zi) == expect assert featanalysis.getZStatFile(featdir, zi) == expect
else: else:
with pytest.raises(fslpath.PathError): with pytest.raises(fslpath.PathError):
featanalysis.getZStatFile(featdir, zi) featanalysis.getZStatFile(featdir, zi)
def test_getZFStatFile():
testcases = [
(['analysis.feat/stats/zfstat1.nii.gz',
'analysis.feat/stats/zfstat2.nii.gz'], True),
(['analysis.feat/stats/zfstat1.nii.gz'], True),
(['analysis.feat/stats/zfstat0.nii.gz'], False),
(['analysis.feat/stats/zfstat1.txt'], False),
]
for paths, shouldPass in testcases:
with tests.testdir(paths) as testdir:
featdir = op.join(testdir, 'analysis.feat')
for zi in range(len(paths)):
expect = op.join(
featdir, 'stats', 'zfstat{}.nii.gz'.format(zi + 1))
if shouldPass:
assert featanalysis.getZFStatFile(featdir, zi) == expect
else:
with pytest.raises(fslpath.PathError):
featanalysis.getZFStatFile(featdir, zi)
def test_getClusterMaskFile(): def test_getClusterMaskFile():
testcases = [ testcases = [
...@@ -488,4 +646,28 @@ def test_getClusterMaskFile(): ...@@ -488,4 +646,28 @@ def test_getClusterMaskFile():
assert featanalysis.getClusterMaskFile(featdir, ci) == expect assert featanalysis.getClusterMaskFile(featdir, ci) == expect
else: else:
with pytest.raises(fslpath.PathError): with pytest.raises(fslpath.PathError):
featanalysis.getClusterMaskFile(featdir, ci) featanalysis.getClusterMaskFile(featdir, ci)
def test_getFClusterMaskFile():
testcases = [
(['analysis.feat/cluster_mask_zfstat1.nii.gz',
'analysis.feat/cluster_mask_zfstat2.nii.gz'], True),
(['analysis.feat/cluster_mask_zfstat1.nii.gz'], True),
(['analysis.feat/cluster_mask_zfstat0.nii.gz'], False),
(['analysis.feat/cluster_mask_zfstat1.txt'], False),
]
for paths, shouldPass in testcases:
with tests.testdir(paths) as testdir:
featdir = op.join(testdir, 'analysis.feat')
for ci in range(len(paths)):
expect = op.join(
featdir, 'cluster_mask_zfstat{}.nii.gz'.format(ci + 1))
if shouldPass:
assert featanalysis.getFClusterMaskFile(featdir, ci) == expect
else:
with pytest.raises(fslpath.PathError):
featanalysis.getFClusterMaskFile(featdir, ci)
...@@ -103,12 +103,13 @@ with the following commands: ...@@ -103,12 +103,13 @@ with the following commands:
""" """
import os.path as op import os.path as op
import numpy as np import textwrap as tw
import numpy as np
import pytest import pytest
import tests import fsl.tests as tests
from fsl.utils.tempdir import tempdir from fsl.utils.tempdir import tempdir
import fsl.data.featdesign as featdesign import fsl.data.featdesign as featdesign
import fsl.data.featanalysis as featanalysis import fsl.data.featanalysis as featanalysis
...@@ -400,6 +401,45 @@ def test_loadDesignMat(): ...@@ -400,6 +401,45 @@ def test_loadDesignMat():
featdesign.loadDesignMat(badfile) featdesign.loadDesignMat(badfile)
# fsl/fslpy!469
def test_loadFEATDesignFile():
with tempdir():
with open('design1.con', 'wt') as f:
f.write(tw.dedent("""
/ContrastName1 mycontrast
/NumWaves 2
/NumContrasts 1
/Matrix
10 20
""").strip())
with open('design2.con', 'wt') as f:
f.write(tw.dedent("""
/ContrastName1
/NumWaves 2
/NumContrasts 1
/Matrix
10 20
""").strip())
des1 = featanalysis.loadFEATDesignFile('design1.con')
exp1 = {'ContrastName1': 'mycontrast',
'NumWaves': '2',
'NumContrasts': '1',
'Matrix': '10 20'}
des2 = featanalysis.loadFEATDesignFile('design2.con')
exp2 = {'ContrastName1': '',
'NumWaves': '2',
'NumContrasts': '1',
'Matrix': '10 20'}
assert des1 == exp1
assert des2 == exp2
def test_VoxelwiseEVs(): def test_VoxelwiseEVs():
with tempdir(): with tempdir():
img = tests.make_random_image('image.nii.gz', (10, 10, 10, 10)) img = tests.make_random_image('image.nii.gz', (10, 10, 10, 10))
......
...@@ -16,7 +16,7 @@ import numpy as np ...@@ -16,7 +16,7 @@ import numpy as np
import pytest import pytest
import tests import fsl.tests as tests
import fsl.data.featimage as featimage import fsl.data.featimage as featimage
import fsl.data.featdesign as featdesign import fsl.data.featdesign as featdesign
import fsl.data.featanalysis as featanalysis import fsl.data.featanalysis as featanalysis
...@@ -88,7 +88,8 @@ def test_FEATImage_attributes(): ...@@ -88,7 +88,8 @@ def test_FEATImage_attributes():
copes=False, copes=False,
zstats=False, zstats=False,
residuals=False, residuals=False,
clustMasks=False) clustMasks=False,
zfstats=False)
else: else:
featdir = op.join(datadir, featdir) featdir = op.join(datadir, featdir)
...@@ -100,6 +101,7 @@ def test_FEATImage_attributes(): ...@@ -100,6 +101,7 @@ def test_FEATImage_attributes():
design = featdesign.FEATFSFDesign(featdir, settings) design = featdesign.FEATFSFDesign(featdir, settings)
desmat = design.getDesign() desmat = design.getDesign()
evnames = [ev.title for ev in design.getEVs()] evnames = [ev.title for ev in design.getEVs()]
ftests = featanalysis.loadFTests(featdir)
contrastnames, contrasts = featanalysis.loadContrasts(featdir) contrastnames, contrasts = featanalysis.loadContrasts(featdir)
assert np.all(np.isclose(fi.shape, shape)) assert np.all(np.isclose(fi.shape, shape))
...@@ -115,8 +117,10 @@ def test_FEATImage_attributes(): ...@@ -115,8 +117,10 @@ def test_FEATImage_attributes():
assert fi.numEVs() == desmat.shape[1] assert fi.numEVs() == desmat.shape[1]
assert fi.evNames() == evnames assert fi.evNames() == evnames
assert fi.numContrasts() == len(contrasts) assert fi.numContrasts() == len(contrasts)
assert fi.numFTests() == len(ftests)
assert fi.contrastNames() == contrastnames assert fi.contrastNames() == contrastnames
assert fi.contrasts() == contrasts assert fi.contrasts() == contrasts
assert fi.ftests() == ftests
assert np.all(np.isclose(fi.getDesign(), desmat)) assert np.all(np.isclose(fi.getDesign(), desmat))
assert fi.thresholds() == featanalysis.getThresholds(settings) assert fi.thresholds() == featanalysis.getThresholds(settings)
...@@ -138,7 +142,7 @@ def test_FEATImage_imageAccessors(): ...@@ -138,7 +142,7 @@ def test_FEATImage_imageAccessors():
shape = TEST_ANALYSES[featdir]['shape'] shape = TEST_ANALYSES[featdir]['shape']
xform = TEST_ANALYSES[featdir]['xform'] xform = TEST_ANALYSES[featdir]['xform']
with tests.testdir() as testdir: with tests.testdir() as testdir:
if 'realdata' not in featdir: if 'realdata' not in featdir:
...@@ -153,9 +157,10 @@ def test_FEATImage_imageAccessors(): ...@@ -153,9 +157,10 @@ def test_FEATImage_imageAccessors():
shape4D = shape shape4D = shape
shape = shape4D[:3] shape = shape4D[:3]
fi = featimage.FEATImage(featdir) fi = featimage.FEATImage(featdir)
nevs = fi.numEVs() nevs = fi.numEVs()
ncons = fi.numContrasts() ncons = fi.numContrasts()
nftests = fi.numFTests()
# Testing the FEATImage internal cache # Testing the FEATImage internal cache
for i in range(2): for i in range(2):
...@@ -166,6 +171,9 @@ def test_FEATImage_imageAccessors(): ...@@ -166,6 +171,9 @@ def test_FEATImage_imageAccessors():
assert fi.getCOPE( con).shape == shape assert fi.getCOPE( con).shape == shape
assert fi.getZStats( con).shape == shape assert fi.getZStats( con).shape == shape
assert fi.getClusterMask(con).shape == shape assert fi.getClusterMask(con).shape == shape
for ft in range(nftests):
assert fi.getZFStats( ft).shape == shape
assert fi.getFClusterMask(ft).shape == shape
del fi del fi
fi = None fi = None
......
->format (format)
\ No newline at end of file