Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • paulmc/fslpy
  • ndcn0236/fslpy
  • seanf/fslpy
3 results
Show changes
Showing
with 1142 additions and 97 deletions
......@@ -9,8 +9,6 @@ time series from a MELODIC ``.ica`` directory.
"""
from __future__ import print_function
import os.path as op
import sys
import argparse
......@@ -18,12 +16,8 @@ import warnings
import numpy as np
# See atlasq.py for explanation
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import fsl.data.fixlabels as fixlabels
import fsl.data.melodicanalysis as melanalysis
import fsl.data.fixlabels as fixlabels
import fsl.data.melodicanalysis as melanalysis
DTYPE = np.float64
......
......@@ -12,19 +12,13 @@ The :func:`main` function is essentially a wrapper around the
"""
from __future__ import print_function
import os.path as op
import sys
import warnings
import logging
import fsl.utils.path as fslpath
# See atlasq.py for explanation
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import fsl.utils.imcp as imcp
import fsl.data.image as fslimage
import fsl.utils.imcp as imcp
import fsl.data.image as fslimage
usage = """Usage:
......@@ -59,6 +53,11 @@ def main(argv=None):
print(usage)
return 1
# When converting to NIFTI2, nibabel
# emits an annoying message via log.warning:
# sizeof_hdr should be 540; set sizeof_hdr to 540
logging.getLogger('nibabel').setLevel(logging.ERROR)
try:
srcs = [fslimage.fixExt(s) for s in srcs]
srcs = fslpath.removeDuplicates(
......
......@@ -9,17 +9,11 @@ NIFTI/ANALYZE image files.
"""
from __future__ import print_function
import itertools as it
import glob
import sys
import warnings
import fsl.utils.path as fslpath
# See atlasq.py for explanation
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import fsl.data.image as fslimage
usage = """
Usage: imglob [-extension/extensions] <list of names>
......@@ -27,8 +21,17 @@ Usage: imglob [-extension/extensions] <list of names>
-extensions for image list with full extensions
""".strip()
exts = fslimage.ALLOWED_EXTENSIONS
groups = fslimage.FILE_GROUPS
# The lists below are defined in the
# fsl.data.image class, but are duplicated
# here for performance (to avoid import of
# nibabel/numpy/etc).
exts = ['.nii.gz', '.nii', '.img', '.hdr', '.img.gz', '.hdr.gz']
"""List of supported image file extensions. """
groups = [('.hdr', '.img'), ('.hdr.gz', '.img.gz')]
"""List of known image file groups (image/header file pairs). """
def imglob(paths, output=None):
......@@ -61,12 +64,38 @@ def imglob(paths, output=None):
imgfiles = []
# Expand any wildcard paths if provided.
# Depending on the way that imglob is
# invoked, this may not get done by the
# calling shell.
#
# We also have to handle incomplete
# wildcards, e.g. if the user provides
# "img_??", we need to add possible
# file suffixes before it can be
# expanded.
expanded = []
for path in paths:
if any(c in path for c in '*?[]'):
if fslpath.hasExt(path, exts):
globs = [path]
else:
globs = [f'{path}{ext}' for ext in exts]
globs = [glob.glob(g) for g in globs]
expanded.extend(it.chain(*globs))
else:
expanded.append(path)
paths = expanded
# Build a list of all image files (both
# hdr and img and otherwise) that match
for path in paths:
try:
path = fslimage.removeExt(path)
imgfiles.extend(fslimage.addExt(path, unambiguous=False))
path = fslpath.removeExt(path, allowedExts=exts)
imgfiles.extend(fslpath.addExt(path,
allowedExts=exts,
unambiguous=False))
except fslpath.PathError:
continue
......
#!/usr/bin/env python
#
# imln.py - Create symbolic links to image files.
#
# Author: Paul McCarthy <pauldmccarthy@gmail.com>
#
"""This module defines the ``imln`` application, for creating sym-links
to NIFTI image files.
.. note:: When creating links to relative paths, ln requires that the path is
relative to the link location, rather than the invocation
location. This is *not* currently supported by imln, and possibly
never will be.
"""
import os.path as op
import os
import sys
import fsl.utils.path as fslpath
# The lists below are defined in the
# fsl.data.image class, but are duplicated
# here for performance (to avoid import of
# nibabel/numpy/etc).
exts = ['.nii.gz', '.nii',
'.img', '.hdr',
'.img.gz', '.hdr.gz',
'.mnc', '.mnc.gz']
"""List of file extensions that are supported by ``imtest``.
"""
groups = [('.hdr', '.img'), ('.hdr.gz', '.img.gz')]
"""List of known image file groups (image/header file pairs). """
usage = """
Usage: imln <file1> <file2>
Makes a link (called file2) to file1
NB: filenames can be basenames or include an extension
""".strip()
def main(argv=None):
"""``imln`` - create sym-links to images. """
if argv is None:
argv = sys.argv[1:]
if len(argv) != 2:
print(usage)
return 1
target, linkbase = argv
target = fslpath.removeExt(target, exts)
linkbase = fslpath.removeExt(linkbase, exts)
# Target must exist, so we can
# infer the correct extension(s).
# Error on incomplete file groups
# (e.g. a.img without a.hdr).
try:
targets = fslpath.getFileGroup(target,
allowedExts=exts,
fileGroups=groups,
unambiguous=True)
except Exception as e:
print(f'Error: {e}')
return 1
for target in targets:
if not op.exists(target):
continue
ext = fslpath.getExt(target, exts)
link = f'{linkbase}{ext}'
try:
# emulate old imln behaviour - if
# link already exists, it is removed
if op.exists(link):
os.remove(link)
os.symlink(target, link)
except Exception as e:
print(f'Error: {e}')
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
......@@ -13,19 +13,13 @@ The :func:`main` function is essentially a wrapper around the
"""
from __future__ import print_function
import os.path as op
import sys
import warnings
import logging
import fsl.utils.path as fslpath
# See atlasq.py for explanation
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import fsl.utils.imcp as imcp
import fsl.data.image as fslimage
import fsl.utils.imcp as imcp
import fsl.data.image as fslimage
usage = """Usage:
......@@ -60,6 +54,11 @@ def main(argv=None):
print(usage)
return 1
# When converting to NIFTI2, nibabel
# emits an annoying message via log.warning:
# sizeof_hdr should be 540; set sizeof_hdr to 540
logging.getLogger('nibabel').setLevel(logging.ERROR)
try:
srcs = [fslimage.fixExt(s) for s in srcs]
srcs = fslpath.removeDuplicates(
......
#!/usr/bin/env python
#
# imrm.py - Remove image files.
#
# Author: Paul McCarthy <paulmc@fmrib.ox.ac.uk>
#
"""This module defines the ``imrm`` application, for removing NIFTI image
files.
"""
import os.path as op
import os
import sys
import fsl.scripts.imglob as imglob
usage = """Usage: imrm <list of image names to remove>
NB: filenames can be basenames or not
""".strip()
def main(argv=None):
"""Removes all images which are specified on the command line. """
if argv is None:
argv = sys.argv[1:]
if len(argv) < 1:
print(usage)
return 1
paths = imglob.imglob(argv, 'all')
for path in paths:
if op.exists(path):
os.remove(path)
return 0
if __name__ == '__main__':
sys.exit(main())
#!/usr/bin/env python
#
# imtest.py - Test whether an image file exists or not.
#
# Author: Paul McCarthy <pauldmccarthy@gmail.com>
#
"""The ``imtest`` script can be used to test whether an image file exists or
not, without having to know the file suffix (.nii, .nii.gz, etc).
"""
import os.path as op
import sys
import fsl.utils.path as fslpath
# The lists below are defined in the
# fsl.data.image class, but are duplicated
# here for performance (to avoid import of
# nibabel/numpy/etc).
exts = ['.nii.gz', '.nii',
'.img', '.hdr',
'.img.gz', '.hdr.gz',
'.mnc', '.mnc.gz']
"""List of file extensions that are supported by ``imtest``.
"""
groups = [('.hdr', '.img'), ('.hdr.gz', '.img.gz')]
"""List of known image file groups (image/header file pairs). """
def imtest(path):
"""Returns ``True`` if the given image path exists, False otherwise. """
path = fslpath.removeExt(path, exts)
path = op.realpath(path)
# getFileGroup will raise an error
# if the image (including all
# components - i.e. header and
# image) does not exist
try:
fslpath.getFileGroup(path,
allowedExts=exts,
fileGroups=groups,
unambiguous=True)
return True
except fslpath.PathError:
return False
def main(argv=None):
"""Test if an image path exists, and prints ``'1'`` if it does or ``'0'``
if it doesn't.
"""
if argv is None:
argv = sys.argv[1:]
# emulate old fslio/imtest - always return 0
if len(argv) != 1:
print('0')
return 0
if imtest(argv[0]):
print('1')
else:
print('0')
return 0
if __name__ == '__main__':
sys.exit(main())
#!/usr/bin/env python
#
# remove_ext.py - Remove file extensions from NIFTI image paths
#
# Author: Paul McCarthy <pauldmccarthy@gmail.com>
#
import sys
import fsl.utils.path as fslpath
usage = """Usage: remove_ext <list of image paths to remove extension from>
""".strip()
# This list is defined in the
# fsl.data.image class, but are duplicated
# here for performance (to avoid import of
# nibabel/numpy/etc).
exts = ['.nii.gz', '.nii',
'.img', '.hdr',
'.img.gz', '.hdr.gz',
'.mnc', '.mnc.gz']
"""List of file extensions that are removed by ``remove_ext``. """
def main(argv=None):
"""Removes file extensions from all paths which are specified on the
command line.
"""
if argv is None:
argv = sys.argv[1:]
if len(argv) < 1:
print(usage)
return 1
removed = []
for path in argv:
removed.append(fslpath.removeExt(path, exts))
print(' '.join(removed))
return 0
if __name__ == '__main__':
sys.exit(main())
......@@ -10,6 +10,7 @@
import os
import sys
import glob
import hashlib
import shutil
import fnmatch
import logging
......@@ -20,14 +21,12 @@ import os.path as op
import numpy as np
import nibabel as nib
from six import StringIO
from io import StringIO
try: from unittest import mock
except ImportError: import mock
from unittest import mock
import fsl.data.image as fslimage
from fsl.utils.tempdir import tempdir
from fsl.utils.tempdir import tempdir
from fsl.utils.platform import platform as fslplatform
......@@ -50,7 +49,10 @@ def mockFSLDIR(**kwargs):
if not op.isdir(subdir):
os.makedirs(subdir)
for fname in files:
touch(op.join(subdir, fname))
fname = op.join(subdir, fname)
touch(fname)
if subdir == bindir:
os.chmod(fname, 0o755)
fslplatform.fsldir = fsldir
fslplatform.fsldevdir = None
......@@ -68,7 +70,7 @@ def touch(fname):
pass
class CaptureStdout(object):
class CaptureStdout:
"""Context manager which captures stdout and stderr. """
def __init__(self):
......@@ -87,6 +89,7 @@ class CaptureStdout(object):
sys.stdout = self.__mock_stdout
sys.stderr = self.__mock_stderr
return self
def __exit__(self, *args, **kwargs):
......@@ -145,6 +148,8 @@ def testdir(contents=None, suffix=""):
shutil.rmtree(self.testdir)
return ctx(contents)
testdir.__test__ = False
def make_dummy_files(paths):
"""Creates dummy files for all of the given paths. """
......@@ -284,7 +289,8 @@ def make_mock_feat_analysis(featdir,
copes=True,
zstats=True,
residuals=True,
clustMasks=True):
clustMasks=True,
zfstats=True):
if xform is None:
xform = np.eye(4)
......@@ -317,6 +323,7 @@ def make_mock_feat_analysis(featdir,
data = np.ravel_multi_index(data, shape)
data = data.reshape(list(shape) + [1]).repeat(timepoints, axis=3)
data[..., :] += range(i, i + timepoints)
data = data.astype(np.int32)
img = nib.nifti1.Nifti1Image(data, xform)
......@@ -341,6 +348,11 @@ def make_mock_feat_analysis(featdir,
otherFiles .extend(files)
otherShapes.extend([shape] * len(files))
if zfstats:
files = glob.glob(op.join(featdir, 'stats', 'zfstat*nii.gz'))
otherFiles .extend(files)
otherShapes.extend([shape] * len(files))
if residuals:
files = glob.glob(op.join(featdir, 'stats', 'res4d.nii.gz'))
otherFiles .extend(files)
......@@ -428,3 +440,10 @@ def make_random_mask(filename, shape, xform, premask=None, minones=1):
img.save(filename)
return img
def sha256(filename):
hashobj = hashlib.sha256()
with open(filename, 'rb') as f:
hashobj.update(f.read())
return hashobj.hexdigest()
......@@ -14,8 +14,8 @@ import pytest
import fsl.utils.assertions as assertions
import fsl.utils.tempdir as tempdir
from . import make_random_image
from . import testdir
from fsl.tests import make_random_image
from fsl.tests import testdir
def test_assertFileExists():
......@@ -160,14 +160,14 @@ def test_assertIsMelodicDir():
('analysis.ica', [ 'melodic_mix', 'melodic_FTmix'], False),
('analysis.ica', ['melodic_IC.nii.gz', 'melodic_FTmix'], False),
('analysis.ica', ['melodic_IC.nii.gz', 'melodic_mix'], False),
('analysis', ['melodic_IC.nii.gz', 'melodic_mix', 'melodic_FTmix'], False),
('analysis', ['melodic_oIC.nii.gz', 'melodic_mix', 'melodic_FTmix'], False),
('analysis', ['melodic_IC.nii.gz', 'melodic_mix', 'melodic_FTmix'], True),
('analysis', [ 'melodic_mix', 'melodic_FTmix'], False),
]
for dirname, paths, expected in tests:
with testdir(paths, dirname):
if expected:
assertions.assertIsMelodicDir(dirname)
assertions.assertIsMelodicDir('.')
else:
with pytest.raises(AssertionError):
assertions.assertIsMelodicDir(dirname)
......
......@@ -13,10 +13,11 @@ import os
import os.path as op
import numpy as np
import mock
from unittest import mock
import pytest
import tests
import fsl.tests as tests
import fsl.utils.image.resample as resample
import fsl.data.atlases as atlases
import fsl.data.image as fslimage
......@@ -40,7 +41,8 @@ dummy_atlas_desc = """<?xml version="1.0" encoding="ISO-8859-1"?>
<header>
<name>{name}</name>
<shortname>{shortname}</shortname>
<type>Label</type>
<type>{atlastype}</type>
{extraheader}
<images>
<imagefile>/{shortname}/{filename}</imagefile>
<summaryimagefile>/{shortname}/My{filename}</summaryimagefile>
......@@ -52,7 +54,8 @@ dummy_atlas_desc = """<?xml version="1.0" encoding="ISO-8859-1"?>
</data>
</atlas>
"""
def _make_dummy_atlas(savedir, name, shortName, filename):
def _make_dummy_atlas(
savedir, name, shortName, filename, atlastype='Label', extraheader=''):
mladir = op.join(savedir, shortName)
mlaxmlfile = op.join(savedir, '{}.xml'.format(shortName))
mlaimgfile = op.join(savedir, shortName, '{}.nii.gz'.format(filename))
......@@ -70,7 +73,9 @@ def _make_dummy_atlas(savedir, name, shortName, filename):
desc = dummy_atlas_desc.format(
name=name,
shortname=shortName,
filename=filename)
filename=filename,
atlastype=atlastype,
extraheader=extraheader)
f.write(desc)
return mlaxmlfile
......@@ -142,6 +147,28 @@ def test_AtlasDescription():
registry.getAtlasDescription('non-existent-atlas')
def test_StatisticHeader():
with tests.testdir() as testdir:
hdr = '<statistic>T</statistic>' \
'<units></units>' \
'<precision>3</precision>' \
'<upper>75</upper>'
xmlfile = _make_dummy_atlas(testdir,
'statlas',
'STA',
'StAtlas',
atlastype='Statistic',
extraheader=hdr)
desc = atlases.AtlasDescription(xmlfile, 'StAtlas')
assert desc.atlasType == 'statistic'
assert desc.statistic == 'T'
assert desc.units == ''
assert desc.precision == 3
assert desc.lower == 0
assert desc.upper == 75
def test_add_remove_atlas():
with tests.testdir() as testdir:
......@@ -225,8 +252,7 @@ def test_load_atlas():
reg = atlases.registry
reg.rescanAtlases()
probatlas = reg.loadAtlas('harvardoxford-cortical',
calcRange=False, loadData=False)
probatlas = reg.loadAtlas('harvardoxford-cortical')
probsumatlas = reg.loadAtlas('harvardoxford-cortical', loadSummary=True)
lblatlas = reg.loadAtlas('talairach')
......@@ -250,6 +276,9 @@ def test_get():
assert (target == atlas.get(index=label.index).data).all()
assert (target == atlas.get(value=label.value).data).all()
assert (target == atlas.get(name=label.name).data).all()
if atlas is lblatlas:
target = target * label.value
assert (target == atlas.get(value=label.value, binary=False).data).all()
def test_find():
......@@ -257,8 +286,7 @@ def test_find():
reg = atlases.registry
reg.rescanAtlases()
probatlas = reg.loadAtlas('harvardoxford-cortical',
calcRange=False, loadData=False)
probatlas = reg.loadAtlas('harvardoxford-cortical')
probsumatlas = reg.loadAtlas('harvardoxford-cortical', loadSummary=True)
lblatlas = reg.loadAtlas('talairach')
......@@ -305,8 +333,7 @@ def test_prepareMask():
reg = atlases.registry
reg.rescanAtlases()
probatlas = reg.loadAtlas('harvardoxford-cortical',
loadData=False, calcRange=False)
probatlas = reg.loadAtlas('harvardoxford-cortical')
probsumatlas = reg.loadAtlas('harvardoxford-cortical', loadSummary=True)
lblatlas = reg.loadAtlas('talairach')
......
......@@ -17,7 +17,7 @@ import fsl.transform.affine as affine
import fsl.utils.image.resample as resample
import fsl.utils.cache as cache
from . import (testdir, make_random_mask)
from fsl.tests import (testdir, make_random_mask)
pytestmark = pytest.mark.fsltest
......@@ -41,17 +41,16 @@ _atlases = cache.Cache()
def _get_atlas(atlasID, res, summary=False):
atlas = _atlases.get((atlasID, res, summary), default=None)
if atlas is None:
atlas = fslatlases.loadAtlas(atlasID,
loadSummary=summary,
resolution=res)
# We need some atlases to be loaded into memory,
# so we can use boolean-mask-based indexing
if summary or atlasID in ('talairach', 'striatum-structural',
'jhu-labels', 'smatt'):
kwargs = {}
else:
kwargs = {'loadData' : False,
'calcRange' : False}
atlas.data
atlas = fslatlases.loadAtlas(atlasID,
loadSummary=summary,
resolution=res,
**kwargs)
_atlases.put((atlasID, res, summary), atlas)
return atlas
......@@ -85,7 +84,7 @@ def _get_zero_mask(aimg):
elif isinstance(aimg, fslatlases.ProbabilisticAtlas):
# Keep memory usage down
zmask = np.ones(aimg.shape[:3], dtype=np.bool)
zmask = np.ones(aimg.shape[:3], dtype=bool)
for vol in range(aimg.shape[-1]):
zmask = np.logical_and(zmask, aimg[..., vol] == 0)
......@@ -218,8 +217,8 @@ def _eval_coord_voxel_query(atlas, query, qtype, qin):
elif qin == 'out':
expval = []
assert atlas.proportions( query, voxel=voxel) == expval
assert atlas.coordProportions(query, voxel=voxel) == expval
assert atlas.values( query, voxel=voxel) == expval
assert atlas.coordValues(query, voxel=voxel) == expval
if isinstance(atlas, fslatlases.LabelAtlas): evalLabel()
elif isinstance(atlas, fslatlases.ProbabilisticAtlas): evalProb()
......@@ -284,7 +283,7 @@ def _eval_mask_query(atlas, query, qtype, qin):
rmask = resample.resample(
mask, atlas.shape[:3], dtype=np.float32, order=0)[0]
rmask = np.array(rmask, dtype=np.bool)
rmask = np.array(rmask, dtype=bool)
def evalLabel():
......@@ -343,13 +342,13 @@ def _eval_mask_query(atlas, query, qtype, qin):
if qin == 'out':
with pytest.raises(fslatlases.MaskError):
atlas.maskProportions(mask)
atlas.maskValues(mask)
with pytest.raises(fslatlases.MaskError):
atlas.proportions( mask)
atlas.values( mask)
return
props = atlas. proportions(mask)
props2 = atlas.maskProportions(mask)
props = atlas. values(mask)
props2 = atlas.maskValues(mask)
assert np.all(np.isclose(props, props2))
......
#!/usr/bin/env python
#
# test_bids.py -
#
# Author: Paul McCarthy <pauldmccarthy@gmail.com>
#
import json
import os.path as op
import itertools as it
from pathlib import Path
import pytest
from fsl.utils.tempdir import tempdir
import fsl.utils.bids as fslbids
def test_parseFilename():
badtests = ['bad_file.txt']
for test in badtests:
with pytest.raises(ValueError):
fslbids.parseFilename(test)
tests = [
('sub-01_ses-01_t1w.nii.gz',
({'sub' : '01', 'ses' : '01'}, 't1w')),
('a-1_b-2_c-3_d-4_e.json',
({'a' : '1', 'b' : '2', 'c' : '3', 'd' : '4'}, 'e')),
]
for filename, expect in tests:
assert fslbids.parseFilename(filename) == expect
def test_isBIDSDir():
with tempdir():
assert not fslbids.isBIDSDir('.')
with tempdir():
Path('dataset_description.json').touch()
assert fslbids.isBIDSDir('.')
def test_inBIDSDir():
with tempdir():
Path('a/b/c').mkdir(parents=True)
Path('dataset_description.json').touch()
assert fslbids.inBIDSDir(Path('.'))
assert fslbids.inBIDSDir(Path('a'))
assert fslbids.inBIDSDir(Path('a/b'))
assert fslbids.inBIDSDir(Path('a/b/c'))
with tempdir():
Path('a/b/c').mkdir(parents=True)
assert not fslbids.inBIDSDir(Path('.'))
assert not fslbids.inBIDSDir(Path('a'))
assert not fslbids.inBIDSDir(Path('a/b'))
assert not fslbids.inBIDSDir(Path('a/b/c'))
def test_isBIDSFile():
goodfiles = [
Path('sub-01_ses-01_t1w.nii.gz'),
Path('sub-01_ses-01_t1w.nii'),
Path('sub-01_ses-01_t1w.json'),
Path('a-1_b-2_c-3_d-4_e.nii.gz'),
Path('sub-01_ses-01_t1w.txt'),
]
badfiles = [
Path('sub-01_ses-01.nii.gz'),
Path('sub-01_ses-01_t1w'),
Path('sub-01_ses-01_t1w.'),
Path('sub_ses-01_t1w.nii.gz'),
Path('sub-01_ses_t1w.nii.gz'),
]
with tempdir():
Path('dataset_description.json').touch()
for f in goodfiles: assert fslbids.isBIDSFile(f)
for f in badfiles: assert not fslbids.isBIDSFile(f)
with tempdir():
for f in it.chain(goodfiles, badfiles):
assert not fslbids.isBIDSFile(f)
def test_loadMetadata():
dd = Path('dataset_description.json')
t1 = Path('sub-01/func/sub-01_task-stim_bold.nii.gz')
json1 = Path('sub-01/func/sub-01_task-stim_bold.json')
json2 = Path('sub-01/sub-01_bold.json')
json3 = Path('sub-01_t1w.json')
json4 = Path('sub-01/task-stim_bold.json')
meta1 = {'a' : '1', 'b' : '2'}
meta2 = {'a' : '10', 'c' : '3'}
meta3 = {'a' : '109', 'b' : '99'}
meta4 = {'c' : '9', 'd' : '5'}
with tempdir():
dd.touch()
Path(op.dirname(t1)).mkdir(parents=True)
t1.touch()
assert fslbids.loadMetadata(t1) == {}
json1.write_text(json.dumps(meta1))
assert fslbids.loadMetadata(t1) == meta1
json2.write_text(json.dumps(meta2))
assert fslbids.loadMetadata(t1) == {**meta2, **meta1}
json3.write_text(json.dumps(meta3))
assert fslbids.loadMetadata(t1) == {**meta2, **meta1}
json4.write_text(json.dumps(meta4))
assert fslbids.loadMetadata(t1) == {**meta4, **meta2, **meta1}
def test_loadMetadata_control_characters():
dd = Path('dataset_description.json')
t1 = Path('sub-01/func/sub-01_task-stim_bold.nii.gz')
json1 = Path('sub-01/func/sub-01_task-stim_bold.json')
meta1 = {"a" : "1", "b" : "2\x19\x20"}
smeta1 = '{"a" : "1", "b" : "2\x19\x20"}'
with tempdir():
dd.touch()
Path(op.dirname(t1)).mkdir(parents=True)
t1.touch()
assert fslbids.loadMetadata(t1) == {}
json1.write_text(smeta1)
assert fslbids.loadMetadata(t1) == meta1
def test_loadMetadata_symlinked():
ddreal = Path('a')
t1real = Path('b')
j1real = Path('c')
j2real = Path('d')
j3real = Path('e')
j4real = Path('f')
dd = Path('data/dataset_description.json')
t1 = Path('data/sub-01/func/sub-01_task-stim_bold.nii.gz')
json1 = Path('data/sub-01/func/sub-01_task-stim_bold.json')
json2 = Path('data/sub-01/sub-01_bold.json')
json3 = Path('data/sub-01_t1w.json')
json4 = Path('data/sub-01/task-stim_bold.json')
meta1 = {'a' : '1', 'b' : '2'}
meta2 = {'a' : '10', 'c' : '3'}
meta3 = {'a' : '109', 'b' : '99'}
meta4 = {'c' : '9', 'd' : '5'}
with tempdir():
ddreal.touch()
t1real.touch()
j1real.write_text(json.dumps(meta1))
j2real.write_text(json.dumps(meta2))
j3real.write_text(json.dumps(meta3))
j4real.write_text(json.dumps(meta4))
Path(op.dirname(t1)).mkdir(parents=True)
dd .symlink_to(op.join('..', ddreal))
t1 .symlink_to(op.join('..', '..', '..', t1real))
json1.symlink_to(op.join('..', '..', '..', j1real))
json2.symlink_to(op.join('..', '..', j2real))
json3.symlink_to(op.join('..', j3real))
json4.symlink_to(op.join('..', '..', j4real))
assert fslbids.loadMetadata(t1) == {**meta4, **meta2, **meta1}
File moved
......@@ -113,7 +113,55 @@ def test_expiry():
with pytest.raises(cache.Expired):
c.get(0)
with pytest.raises(cache.Expired):
c.get(1)
assert c.get(1, default='default') == 'default'
# And that the cache is empty
assert len(c) == 0
def test_lru():
c = cache.Cache(maxsize=3, lru=True)
c[0] = '0'
c[1] = '1'
c[2] = '2'
c[3] = '3'
# normal behaviour - first inserted
# is dropped
with pytest.raises(KeyError):
assert c.get(0)
# lru behaviour - oldest accessed is
# dropped
c[1]
c[4] = '4'
with pytest.raises(KeyError):
c[2]
c[1]
c[3]
c[4]
assert len(c) == 3
def test_accessors():
c = cache.Cache(maxsize=3)
c[0] = '0'
c[1] = '1'
c[2] = '2'
c[3] = '3'
assert list(c.keys()) == [ 1, 2, 3]
assert list(c.values()) == ['1', '2', '3']
assert list(c.items()) == [(1, '1'), (2, '2'), (3, '3')]
assert 0 not in c
assert 1 in c
assert 2 in c
assert 3 in c
from fsl.data import cifti
import os.path as op
import numpy as np
import nibabel as nib
from numpy import testing
import fsl.tests as tests
from nibabel.cifti2 import cifti2_axes
def volumetric_brain_model():
mask = np.random.randint(2, size=(10, 10, 10)) > 0
return cifti2_axes.BrainModelAxis.from_mask(mask, affine=np.eye(4))
def surface_brain_model():
mask = np.random.randint(2, size=100) > 0
return cifti2_axes.BrainModelAxis.from_mask(mask, name='cortex')
def volumetric_parcels(return_mask=False):
mask = np.random.randint(5, size=(10, 10, 10))
axis = cifti2_axes.ParcelsAxis(
[f'vol_{idx}' for idx in range(1, 5)],
voxels=[np.stack(np.where(mask == idx), axis=-1) for idx in range(1, 5)],
vertices=[{} for _ in range(1, 5)],
volume_shape=mask.shape,
affine=np.eye(4),
)
if return_mask:
return axis, mask
else:
return axis
def surface_parcels(return_mask=False):
mask = np.random.randint(5, size=100)
axis = cifti2_axes.ParcelsAxis(
[f'surf_{idx}' for idx in range(1, 5)],
voxels=[np.zeros((0, 3), dtype=int) for _ in range(1, 5)],
vertices=[{'CIFTI_STRUCTURE_CORTEX': np.where(mask == idx)[0]} for idx in range(1, 5)],
nvertices={'CIFTI_STRUCTURE_CORTEX': 100},
)
if return_mask:
return axis, mask
else:
return axis
def gen_data(axes):
return np.random.randn(*(5 if ax is None else len(ax) for ax in axes))
def test_read_gifti():
testdir = op.join(op.dirname(__file__), 'testdata')
shapefile = op.join(testdir, 'example.shape.gii')
ref_data = nib.load(shapefile).darrays[0].data
data = cifti.load(shapefile)
assert isinstance(data, cifti.DenseCifti)
assert data.arr.shape == (642, )
testing.assert_equal(data.arr, ref_data)
testing.assert_equal(data.brain_model_axis.vertex, np.arange(642))
assert len(data.brain_model_axis.nvertices) == 1
assert data.brain_model_axis.nvertices['CIFTI_STRUCTURE_OTHER'] == 642
data = cifti.load(shapefile, mask_values=(ref_data[0], ))
assert isinstance(data, cifti.DenseCifti)
assert data.arr.shape == (np.sum(ref_data != ref_data[0]), )
testing.assert_equal(data.arr, ref_data[ref_data != ref_data[0]])
testing.assert_equal(data.brain_model_axis.vertex, np.where(ref_data != ref_data[0])[0])
assert len(data.brain_model_axis.nvertices) == 1
assert data.brain_model_axis.nvertices['CIFTI_STRUCTURE_OTHER'] == 642
cifti.load(op.join(testdir, 'example'))
def test_read_nifti():
mask = np.random.randint(2, size=(10, 10, 10)) > 0
values = np.random.randn(10, 10, 10)
for mask_val in (0, np.nan):
values[~mask] = mask_val
affine = np.concatenate((np.random.randn(3, 4), np.array([0, 0, 0, 1])[None, :]), axis=0)
with tests.testdir():
nib.Nifti1Image(values, affine).to_filename("masked_image.nii.gz")
data = cifti.load("masked_image")
assert isinstance(data, cifti.DenseCifti)
testing.assert_equal(data.arr, values[mask])
testing.assert_allclose(data.brain_model_axis.affine, affine)
assert len(data.brain_model_axis.nvertices) == 0
def check_io(data: cifti.Cifti, extension):
with tests.testdir():
data.save("test")
assert op.isfile(f'test.{extension}.nii')
loaded = cifti.load("test")
if data.arr.ndim == 1:
testing.assert_equal(data.arr, loaded.arr[0])
assert data.axes == loaded.axes[1:]
else:
testing.assert_equal(data.arr, loaded.arr)
assert data.axes == loaded.axes
def test_io_cifti():
for cifti_class, cifti_type, main_axis_options in (
(cifti.DenseCifti, 'd', (volumetric_brain_model(), surface_brain_model(),
volumetric_brain_model() + surface_brain_model())),
(cifti.ParcelCifti, 'p', (volumetric_parcels(), surface_parcels(),
volumetric_parcels() + surface_parcels())),
):
for main_axis in main_axis_options:
with tests.testdir():
data_1d = cifti_class(gen_data([main_axis]), [main_axis])
check_io(data_1d, f'{cifti_type}scalar')
connectome = cifti_class(gen_data([main_axis, main_axis]), (main_axis, main_axis))
check_io(connectome, f'{cifti_type}conn')
scalar_axis = cifti2_axes.ScalarAxis(['A', 'B', 'C'])
scalar = cifti_class(gen_data([scalar_axis, main_axis]), (scalar_axis, main_axis))
check_io(scalar, f'{cifti_type}scalar')
label_axis = cifti2_axes.LabelAxis(['A', 'B', 'C'], {1: ('some parcel', (1, 0, 0, 1))})
label = cifti_class(gen_data([label_axis, main_axis]), (label_axis, main_axis))
check_io(label, f'{cifti_type}label')
series_axis = cifti2_axes.SeriesAxis(10, 3, 50, unit='HERTZ')
series = cifti_class(gen_data([series_axis, main_axis]), (series_axis, main_axis))
check_io(series, f'{cifti_type}tseries')
if cifti_type == 'd':
parcel_axis = surface_parcels()
dpconn = cifti_class(gen_data([parcel_axis, main_axis]), (parcel_axis, main_axis))
check_io(dpconn, 'dpconn')
else:
dense_axis = surface_brain_model()
pdconn = cifti_class(gen_data([dense_axis, main_axis]), (dense_axis, main_axis))
check_io(pdconn, 'pdconn')
def test_extract_dense():
vol_bm = volumetric_brain_model()
surf_bm = surface_brain_model()
for bm in (vol_bm + surf_bm, surf_bm + vol_bm):
for ndim, no_other_axis in ((1, True), (2, False), (2, True)):
if ndim == 1:
data = cifti.DenseCifti(gen_data([bm]), [bm])
else:
scl = cifti2_axes.ScalarAxis(['A', 'B', 'C'])
data = cifti.DenseCifti(gen_data([scl, bm]),
[None if no_other_axis else scl, bm])
# extract volume
ref_arr = data.arr[..., data.brain_model_axis.volume_mask]
vol_image = data.to_image(fill=np.nan)
if ndim == 1:
assert vol_image.shape == data.brain_model_axis.volume_shape
else:
assert vol_image.shape == data.brain_model_axis.volume_shape + (3, )
assert np.isfinite(vol_image.data).sum() == len(vol_bm) * (3 if ndim == 2 else 1)
testing.assert_equal(vol_image.data[tuple(vol_bm.voxel.T)], ref_arr.T)
from_image = cifti.DenseCifti.from_image(vol_image)
assert from_image.brain_model_axis == vol_bm
testing.assert_equal(from_image.arr, ref_arr)
# extract surface
ref_arr = data.arr[..., data.brain_model_axis.surface_mask]
mask, surf_data = data.surface('cortex', partial=True)
assert surf_data.shape[-1] < 100
testing.assert_equal(ref_arr, surf_data)
testing.assert_equal(surf_bm.vertex, mask)
surf_data_full = data.surface('cortex', fill=np.nan)
assert surf_data_full.shape[-1] == 100
mask_full = np.isfinite(surf_data_full)
if ndim == 2:
assert (mask_full.any(0) == mask_full.all(0)).all()
mask_full = mask_full[0]
assert mask_full.sum() == len(surf_bm)
assert mask_full[..., mask].sum() == len(surf_bm)
testing.assert_equal(surf_data_full[..., mask_full], ref_arr)
def test_extract_parcel():
vol_parcel, vol_mask = volumetric_parcels(return_mask=True)
surf_parcel, surf_mask = surface_parcels(return_mask=True)
parcel = vol_parcel + surf_parcel
for ndim, no_other_axis in ((1, True), (2, False), (2, True)):
if ndim == 1:
data = cifti.ParcelCifti(gen_data([parcel]), [parcel])
else:
scl = cifti2_axes.ScalarAxis(['A', 'B', 'C'])
data = cifti.ParcelCifti(gen_data([scl, parcel]),
[None if no_other_axis else scl, parcel])
# extract volume
vol_image = data.to_image(fill=np.nan)
if ndim == 1:
assert vol_image.shape == data.parcel_axis.volume_shape
else:
assert vol_image.shape == data.parcel_axis.volume_shape + (3, )
assert np.isfinite(vol_image.data).sum() == np.sum(vol_mask != 0) * (3 if ndim == 2 else 1)
if ndim == 1:
testing.assert_equal(vol_mask != 0, np.isfinite(vol_image.data))
for idx in range(1, 5):
testing.assert_allclose(vol_image.data[vol_mask == idx], data.arr[..., idx - 1])
else:
for idx in range(3):
testing.assert_equal(vol_mask != 0, np.isfinite(vol_image.data[..., idx]))
for idx2 in range(1, 5):
testing.assert_allclose(vol_image.data[vol_mask == idx2, idx], data.arr[idx, idx2 - 1])
# extract surface
mask, surf_data = data.surface('cortex', partial=True)
assert surf_data.shape[-1] == (surf_mask != 0).sum()
assert (surf_mask[mask] != 0).all()
print(data.arr)
for idx in range(1, 5):
if ndim == 1:
testing.assert_equal(surf_data.T[surf_mask[mask] == idx], data.arr[idx + 3])
else:
for idx2 in range(3):
testing.assert_equal(surf_data.T[surf_mask[mask] == idx, idx2], data.arr[idx2, idx + 3])
surf_data_full = data.surface('cortex', partial=False)
assert surf_data_full.shape[-1] == 100
if ndim == 1:
testing.assert_equal(np.isfinite(surf_data_full), surf_mask != 0)
for idx in range(1, 5):
testing.assert_equal(surf_data_full.T[surf_mask == idx], data.arr[idx + 3])
else:
for idx2 in range(3):
testing.assert_equal(np.isfinite(surf_data_full)[idx2], (surf_mask != 0))
for idx in range(1, 5):
testing.assert_equal(surf_data_full.T[surf_mask == idx, idx2], data.arr[idx2, idx + 3])
def test_brainstructure():
for primary in ['cortex', 'cerebellum']:
for secondary in [None, 'white', 'pial']:
for gtype in [None, 'volume', 'surface']:
for orientation in ['left', 'right', 'both']:
bst = cifti.BrainStructure(primary, secondary, orientation, gtype)
print(bst.cifti)
assert bst.cifti == 'CIFTI_STRUCTURE_%s%s' % (primary.upper(), '' if orientation == 'both' else '_' + orientation.upper())
assert bst.gifti['AnatomicalStructurePrimary'][:len(primary)] == primary.capitalize()
assert len(bst.gifti) == (1 if secondary is None else 2)
if secondary is not None:
assert bst.gifti['AnatomicalStructureSecondary'] == secondary.capitalize()
assert bst == cifti.BrainStructure(primary, secondary, orientation, gtype)
assert bst == bst
assert bst != cifti.BrainStructure('Thalamus', secondary, orientation, gtype)
if secondary is None:
assert bst == cifti.BrainStructure(primary, 'midplane', orientation, gtype)
else:
assert bst != cifti.BrainStructure(primary, 'midplane', orientation, gtype)
if (gtype == 'volume' and primary == 'cortex') or (gtype == 'surface' and primary != 'cortex'):
assert cifti.BrainStructure.from_string(bst.cifti) != bst
else:
assert cifti.BrainStructure.from_string(bst.cifti) == bst
assert cifti.BrainStructure.from_string(bst.cifti).secondary is None
......@@ -7,12 +7,22 @@ import pytest
import fsl.utils.deprecated as deprecated
# the line number of the warning is hard coded in
# the unit tests below. Don't change the line number!
# these get updated in the relevant functions
WARNING_LINE_NUMBER = None
DEPRECATED_LINE_NUMBER = None
def _linenum(pattern):
with open(__file__, 'rt') as f:
for i, line in enumerate(f.readlines(), 1):
if pattern in line:
return i
return -1
def emit_warning():
deprecated.warn('blag', vin='1.0.0', rin='2.0.0', msg='yo')
WARNING_LINE_NUMBER = 13
global WARNING_LINE_NUMBER
WARNING_LINE_NUMBER = _linenum('deprecated.warn(\'blag\'')
@deprecated.deprecated(vin='1.0.0', rin='2.0.0', msg='yo')
......@@ -20,9 +30,9 @@ def depfunc():
pass
def call_dep_func():
depfunc()
DEPRECATED_LINE_NUMBER = 23
depfunc() # mark
global DEPRECATED_LINE_NUMBER
DEPRECATED_LINE_NUMBER = _linenum('depfunc() # mark')
def _check_warning(w, name, lineno):
......
#!/usr/bin/env python
#
# These tests require an internet connection, and will only work on linux.
#
import os.path as op
import os
import functools as ft
import subprocess as sp
import tarfile
import zipfile
import random
import string
import binascii
import contextlib
import urllib.request as request
from unittest import mock
import pytest
import fsl.data.dicom as fsldcm
import fsl.utils.tempdir as tempdir
from fsl.tests import mockFSLDIR, touch
datadir = op.join(op.dirname(__file__), 'testdata')
pytestmark = pytest.mark.dicomtest
@contextlib.contextmanager
def install_dcm2niix(version='1.0.20220720'):
filenames = {
'1.0.20201102' : 'v1.0.20201102/dcm2niix_lnx.zip',
'1.0.20190902' : 'v1.0.20190902/dcm2niix_lnx.zip',
'1.0.20181125' : 'v1.0.20181125/dcm2niix_25-Nov-2018_lnx.zip',
'1.0.20171017' : 'v1.0.20171017/dcm2niix_18-Oct-2017_lnx.zip',
'1.0.20220720' : 'v1.0.20220720/dcm2niix_lnx.zip',
}
prefix = 'https://github.com/rordenlab/dcm2niix/releases/download/'
url = prefix + filenames.get(version, f'v{version}/dcm2niix_lnx.zip')
with tempdir.tempdir() as td:
request.urlretrieve(url, 'dcm2niix.zip')
with zipfile.ZipFile('dcm2niix.zip', 'r') as f:
f.extractall('.')
os.chmod(op.join(td, 'dcm2niix'), 0o755)
path = op.abspath('dcm2niix')
with mock.patch('fsl.data.dicom.dcm2niix', return_value=path):
try:
yield
finally:
fsldcm.installedVersion.invalidate()
def test_disabled():
with mock.patch('fsl.data.dicom.enabled', return_value=False):
with pytest.raises(RuntimeError):
fsldcm.scanDir('.')
with pytest.raises(RuntimeError):
fsldcm.loadSeries({})
def test_dcm2niix():
"""
"""
env = os.environ.copy()
env.pop('FSLDIR', None)
with tempdir.tempdir() as td:
env['PATH'] = td
with mock.patch('os.environ', env):
assert fsldcm.dcm2niix() == 'dcm2niix'
bindir = op.join(td, 'bin')
dcm2niix = op.join(bindir, 'dcm2niix')
os.makedirs(bindir)
touch(dcm2niix)
os.chmod(dcm2niix, 0o755)
env['PATH'] = bindir
with mock.patch('os.environ', env):
assert fsldcm.dcm2niix() == dcm2niix
with mockFSLDIR(bin=['dcm2niix']) as fsldir:
env['FSLDIR'] = fsldir
dcm2niix = op.join(fsldir, 'bin', 'dcm2niix')
with mock.patch('os.environ', env):
assert fsldcm.dcm2niix() == dcm2niix
def test_installedVersion():
tests = [
('1.0.20190902', (1, 0, 2019, 9, 2)),
('1.0.20181125', (1, 0, 2018, 11, 25)),
('1.0.20171017', (1, 0, 2017, 10, 17))]
for version, expect in tests:
fsldcm.installedVersion.invalidate()
with install_dcm2niix(version):
got = fsldcm.installedVersion()
assert got == expect
def test_enabled():
try:
with install_dcm2niix('1.0.20190902'):
fsldcm.installedVersion.invalidate()
assert fsldcm.enabled()
# test dcm2niix not present
with mock.patch('subprocess.check_output',
side_effect=Exception()):
fsldcm.installedVersion.invalidate()
assert not fsldcm.enabled()
# test presence of different versions
tests = [(b'version v2.1.20191212', True),
(b'version v1.0.20190902', True),
(b'version v1.0.20171216', True),
(b'version v1.0.20171215', True),
(b'version v1.0.20171214', False),
(b'version v1.0.20160930', False),
(b'version v1.0.20160929', False),
(b'version v0.0.00000000', False),
(b'version blurgh', False)]
for verstr, expected in tests:
fsldcm.installedVersion.invalidate()
with mock.patch('subprocess.check_output', return_value=verstr):
assert fsldcm.enabled() == expected
finally:
fsldcm.installedVersion.invalidate()
def test_scanDir():
with install_dcm2niix():
series = fsldcm.scanDir('.')
assert len(series) == 0
datafile = op.join(datadir, 'example_dicom.tbz2')
with tarfile.open(datafile) as f:
f.extractall(filter='data')
series = fsldcm.scanDir('.')
assert len(series) == 2
for s in series:
assert s['PatientName'] in ('MCCARTHY_PAUL',
'MCCARTHY^PAUL',
'MCCARTHY_PAUL_2',
'MCCARTHY^PAUL^2')
def test_sersiesCRC():
RANDOM = object()
tests = [
({'SeriesInstanceUID' : 'hello-world'}, '2983461467'),
({'SeriesInstanceUID' : RANDOM, 'EchoNumber' : 0}, RANDOM),
({'SeriesInstanceUID' : RANDOM, 'EchoNumber' : 1}, RANDOM),
({'SeriesInstanceUID' : RANDOM, 'EchoNumber' : 2}, RANDOM),
({'SeriesInstanceUID' : RANDOM, 'EchoNumber' : 3}, RANDOM),
]
for series, expect in tests:
series = dict(series)
if expect is RANDOM:
expect = ''.join([random.choice(string.ascii_letters + string.digits)
for i in range(30)])
series['SeriesInstanceUID'] = expect
expect = str(binascii.crc32(expect.encode()))
echo = series.get('EchoNumber', 0)
if echo > 1:
expect += '.{}'.format(echo)
assert fsldcm.seriesCRC(series) == expect
def test_loadSeries():
# test a pre-CRC and a post-CRC version
for version in ('1.0.20181125', '1.0.20201102'):
with install_dcm2niix(version):
datafile = op.join(datadir, 'example_dicom.tbz2')
with tarfile.open(datafile) as f:
f.extractall()
dcmdir = os.getcwd()
series = fsldcm.scanDir(dcmdir)
expShape = (512, 512, 1)
for s in series:
imgs = fsldcm.loadSeries(s)
for img in imgs:
assert img.dicomDir == dcmdir
assert img.shape == expShape
assert img[:].shape == expShape
assert img.getMeta('PatientName') in ('MCCARTHY_PAUL',
'MCCARTHY^PAUL',
'MCCARTHY_PAUL_2',
'MCCARTHY^PAUL^2')
assert 'PatientName' in img.metaKeys()
assert 'MCCARTHY_PAUL' in img.metaValues() or \
'MCCARTHY^PAUL' in img.metaValues() or \
'MCCARTHY_PAUL_2' in img.metaValues() or \
'MCCARTHY^PAUL^2' in img.metaValues()
assert ('PatientName', 'MCCARTHY_PAUL') in img.metaItems() or \
('PatientName', 'MCCARTHY^PAUL') in img.metaItems() or \
('PatientName', 'MCCARTHY_PAUL_2') in img.metaItems() or \
('PatientName', 'MCCARTHY^PAUL^2') in img.metaItems()
......@@ -10,7 +10,7 @@ import numpy as np
import pytest
import tests
import fsl.tests as tests
import fsl.data.dtifit as dtifit
import fsl.data.image as fslimage
......@@ -169,12 +169,12 @@ def test_DTIFitTensor():
l2file = op.join(testdir, 'dti_L2.nii')
l3file = op.join(testdir, 'dti_L3.nii')
v1 = tests.make_random_image(v1file, (5, 5, 5, 3)).get_data()
v2 = tests.make_random_image(v2file, (5, 5, 5, 3)).get_data()
v3 = tests.make_random_image(v3file, (5, 5, 5, 3)).get_data()
l1 = tests.make_random_image(l1file, (5, 5, 5)) .get_data()
l2 = tests.make_random_image(l2file, (5, 5, 5)) .get_data()
l3 = tests.make_random_image(l3file, (5, 5, 5)) .get_data()
v1 = np.asanyarray(tests.make_random_image(v1file, (5, 5, 5, 3)).dataobj)
v2 = np.asanyarray(tests.make_random_image(v2file, (5, 5, 5, 3)).dataobj)
v3 = np.asanyarray(tests.make_random_image(v3file, (5, 5, 5, 3)).dataobj)
l1 = np.asanyarray(tests.make_random_image(l1file, (5, 5, 5)) .dataobj)
l2 = np.asanyarray(tests.make_random_image(l2file, (5, 5, 5)) .dataobj)
l3 = np.asanyarray(tests.make_random_image(l3file, (5, 5, 5)) .dataobj)
dtiobj = dtifit.DTIFitTensor(testdir)
......
......@@ -5,6 +5,7 @@
# Author: Paul McCarthy <pauldmccarthy@gmail.com>
#
import pathlib
import numpy as np
import nibabel as nib
......@@ -12,7 +13,7 @@ import nibabel as nib
import fsl.utils.tempdir as tempdir
import fsl.utils.ensure as ensure
from . import make_random_image
from fsl.tests import make_random_image
def test_ensureIsImage():
......@@ -22,12 +23,14 @@ def test_ensureIsImage():
assert ensure.ensureIsImage(img) is img
loaded = [ensure.ensureIsImage('image.nii'),
ensure.ensureIsImage('image')]
loaded = [ensure.ensureIsImage( 'image.nii'),
ensure.ensureIsImage( 'image'),
ensure.ensureIsImage(pathlib.Path('image')),
ensure.ensureIsImage(pathlib.Path('image.nii'))]
for l in loaded:
assert isinstance(l, nib.nifti1.Nifti1Image)
assert np.all(img.get_data() == l.get_data())
assert np.all(np.asanyarray(img.dataobj) == np.asanyarray(l.dataobj))
l = None
loaded = None
......