Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • paulmc/fslpy
  • ndcn0236/fslpy
  • seanf/fslpy
3 results
Show changes
Showing
with 3103 additions and 1129 deletions
File added
doc/images/nonlinear_registration_process.png

66.3 KiB

File added
......@@ -4,12 +4,57 @@
The ``fslpy`` package is a collection of utilities and data abstractions used
by |fsleyes_apidoc|_.
within `FSL <https://fsl.fmrib.ox.ac.uk/fsl/fslwiki>`_ and by
|fsleyes_apidoc|_.
The top-level Python package for ``fslpy`` is called :mod:`fsl`. It is
broadly split into the following sub-packages:
+----------------------+-----------------------------------------------------+
| :mod:`fsl.data` | contains data abstractions and I/O routines for a |
| | range of FSL and neuroimaging file types. Most I/O |
| | routines use `nibabel <https://nipy.org/nibabel/>`_ |
| | extensively. |
+----------------------+-----------------------------------------------------+
| :mod:`fsl.utils` | contains a range of miscellaneous utilities, |
| | including :mod:`fsl.utils.path`, |
| | :mod:`fsl.utils.run`, and :mod:`fsl.utils.bids` |
+----------------------+-----------------------------------------------------+
| :mod:`fsl.scripts` | contains a range of scripts which are installed as |
| | FSL commands. |
+----------------------+-----------------------------------------------------+
| :mod:`fsl.transform` | contains functions and classes for working with |
| | FSL-style linear and non-linear transformations. |
+----------------------+-----------------------------------------------------+
| :mod:`fsl.version` | simply contains the ``fslpy`` version number. |
+----------------------+-----------------------------------------------------+
| :mod:`fsl.wrappers` | contains Python functions which can be used to |
| | invoke FSL commands. |
+----------------------+-----------------------------------------------------+
The :mod:`fsl` package provides the top-level Python package namespace for
``fslpy``, and for other FSL python libaries. It is a `native namespace
package <https://packaging.python.org/guides/packaging-namespace-packages/>`_,
which means that there is no ``fsl/__init__.py`` file.
Other libraries can use the ``fsl`` package namepace simply by also omitting a
``fsl/__init__.py`` file, and by ensuring that there are no naming conflicts
with any sub-packages of ``fslpy`` or any other projects which use the ``fsl``
package namespace.
.. toctree::
:hidden:
self
fsl
fsl.data
fsl.scripts
fsl.transform
fsl.utils
fsl.wrappers
fsl.version
contributing
changelog
deprecation
dill
h5py
nibabel
nibabel.cifti2
nibabel.fileslice
nibabel.freesurfer
numpy
numpy.linalg
scipy
scipy.ndimage
scipy.ndimage.interpolation
six
#!/usr/bin/env python
#
# __init__.py - The fslpy library.
#
# Author: Paul McCarthy <pauldmccarthy@gmail.com>
#
"""The :mod:`fsl` package is a library which contains convenience classes
and functions for use by FSL python tools. It is broadly split into the
following sub-packages:
.. autosummary::
fsl.data
fsl.utils
fsl.scripts
fsl.version
fsl.wrappers
.. note:: The ``fsl`` namespace is a ``pkgutil``-style *namespace package* -
it can be used across different projects - see
https://packaging.python.org/guides/packaging-namespace-packages/
for details.
"""
__path__ = __import__('pkgutil').extend_path(__path__, __name__)
......@@ -36,26 +36,28 @@ load an atlas image, which will be one of the following atlas-specific
:nosignatures:
LabelAtlas
StatisticAtlas
ProbabilisticAtlas
"""
from __future__ import division
import xml.etree.ElementTree as et
import os.path as op
import glob
import bisect
import logging
import xml.etree.ElementTree as et
import os.path as op
import glob
import bisect
import logging
import numpy as np
import numpy as np
import fsl.data.image as fslimage
import fsl.data.constants as constants
from fsl.utils.platform import platform as platform
import fsl.utils.transform as transform
import fsl.utils.notifier as notifier
import fsl.utils.settings as fslsettings
import fsl.data.image as fslimage
import fsl.data.constants as constants
from fsl.utils.platform import platform
import fsl.utils.image.resample as resample
import fsl.transform.affine as affine
import fsl.utils.notifier as notifier
import fsl.utils.settings as fslsettings
log = logging.getLogger(__name__)
......@@ -321,9 +323,9 @@ class AtlasLabel(object):
========= ================================================================
``name`` Region name
``index`` The index of this label into the list of all labels in the
``AtlasDescription`` that owns it. For probabilistic atlases,
this is also the index into the 4D atlas image of the volume
that corresponds to this region.
``AtlasDescription`` that owns it. For statistic/probabilistic
atlases, this is also the index into the 4D atlas image of the
volume that corresponds to this region.
``value`` For label atlases and summary images, the value of voxels that
are in this region.
``x`` X coordinate of the region in world space
......@@ -365,8 +367,17 @@ class AtlasLabel(object):
"""
return self.index < other.index
def __repr__(self):
"""
Represents AtlasLabel as string
"""
return '{}({}, index={}, value={})'.format(
self.__class__.__name__, self.name,
self.index, self.value,
)
class AtlasDescription(object):
class AtlasDescription:
"""An ``AtlasDescription`` instance parses and stores the information
stored in the FSL XML file that describes a single FSL atlas. An XML
atlas specification file is assumed to have a structure that looks like
......@@ -376,8 +387,13 @@ class AtlasDescription(object):
<atlas>
<header>
<name></name> # Atlas name
<type></type> # 'Probabilistic' or 'Label'
<name></name> # Atlas name
<type></type> # 'Statistic', 'Probabilistic' or 'Label'
<statistic></statistic> # Optional. Type of statistic
<units></units> # Optional. Units of measurement
<precision></precision> # Optional. Decimal precision to report
<upper></upper> # Optional. Upper threshold
<lower></lower> # Optional. Lower threshold
<images>
<imagefile>
</imagefile> # If type is Probabilistic, path
......@@ -402,11 +418,12 @@ class AtlasDescription(object):
</header>
<data>
# index - For probabilistic atlases, index of corresponding volume in
# 4D image file. For label images, the value of voxels which
# are in the corresponding region. For probabilistic atlases,
# it is assumed that the value for each region in the summary
# image(s) are equal to ``index + 1``.
# index - For statistic/probabilistic atlases, index of corresponding
# volume in 4D image file. For label images, the value of
# voxels which are in the corresponding region. For
# statistic/probabilistic atlases, it is assumed that the
# value for each region in the summary image(s) are equal to
# ``index + 1``.
#
#
# x |
......@@ -442,7 +459,18 @@ class AtlasDescription(object):
``specPath`` Path to the atlas XML specification file.
``atlasType`` Atlas type - either *probabilistic* or *label*.
``atlasType`` Atlas type - either *statistic*, *probabilistic* or
*label*.
``statistic`` Type of statistic, for statistic atlases.
``units`` Unit of measurement, for statistic atlases.
``precision`` Reporting precision, for statistic atlases.
``upper`` Upper threshold, for statistic atlases.
``lower`` Lower threshold, for statistic atlases.
``images`` A list of images available for this atlas - usually
:math:`1mm^3` and :math:`2mm^3` images are present.
......@@ -490,6 +518,29 @@ class AtlasDescription(object):
if self.atlasType == 'probabalistic':
self.atlasType = 'probabilistic'
if self.atlasType == 'statistic':
fields = ['statistic', 'units', 'lower', 'upper', 'precision']
values = {}
for field in fields:
elem = header.find(field)
if elem is not None and elem.text is not None:
values[field] = elem.text.strip()
self.statistic = values.get('statistic', '')
self.units = values.get('units', '')
self.lower = float(values.get('lower', 0))
self.upper = float(values.get('upper', 100))
self.precision = int( values.get('precision', 2))
elif self.atlasType == 'probabilistic':
self.statistic = ''
self.units = '%'
self.lower = 5
self.upper = 100
self.precision = 0
images = header.findall('images')
self.images = []
self.summaryImages = []
......@@ -509,7 +560,7 @@ class AtlasDescription(object):
imagefile = op.normpath(atlasDir + imagefile)
summaryimagefile = op.normpath(atlasDir + summaryimagefile)
i = fslimage.Image(imagefile, loadData=False, calcRange=False)
i = fslimage.Image(imagefile)
self.images .append(imagefile)
self.summaryImages.append(summaryimagefile)
......@@ -562,7 +613,7 @@ class AtlasDescription(object):
# Load the appropriate transformation matrix
# and transform all those voxel coordinates
# into world coordinates
coords = transform.transform(coords, self.xforms[0])
coords = affine.transform(coords, self.xforms[0])
# Update the coordinates
# in our label objects
......@@ -573,11 +624,11 @@ class AtlasDescription(object):
self.labels = list(sorted(self.labels))
def find(self, index=None, value=None):
def find(self, index=None, value=None, name=None):
"""Find an :class:`.AtlasLabel` either by ``index``, or by ``value``.
Exactly one of ``index`` or ``value`` may be specified - a
``ValueError`` is raised otherwise. If an invalid ``index`` or
Exactly one of ``index``, ``value``, or ``name`` may be specified - a
``ValueError`` is raised otherwise. If an invalid ``index``, ``name``, or
``value`` is specified, an ``IndexError`` or ``KeyError`` will be
raised.
......@@ -585,12 +636,25 @@ class AtlasDescription(object):
labels, and a 3D ``LabelAtlas`` may have more values
than labels.
"""
if (index is None and value is None) or \
(index is not None and value is not None):
raise ValueError('Only one of index or value may be specified')
if ((index is not None) + (value is not None) + (name is not None)) != 1:
raise ValueError('Only one of index, value, or name may be specified')
if index is not None: return self.labels[ index]
elif value is not None: return self.__labelsByValue[int(value)]
else:
matches = [l for l in self.labels if l.name == name]
if len(matches) == 0:
# look for partial matches only if there are no full matches
matches = [l for l in self.labels if l.name[:len(name)] == name]
if len(matches) == 0:
raise IndexError('No match for {} found in labels {}'.format(
name, tuple(l.name for l in self.labels)
))
elif len(matches) > 1:
raise IndexError('Multiple matches for {} found in labels {}'.format(
name, tuple(l.name for l in self.labels)
))
return matches[0]
if index is not None: return self.labels[ index]
else: return self.__labelsByValue[int(value)]
def __eq__(self, other):
......@@ -611,6 +675,12 @@ class AtlasDescription(object):
"""
return self.name.lower() < other.name.lower()
def __repr__(self, ):
"""
String representation of AtlasDescription
"""
return '{}({})'.format(self.__class__.__name__, self.atlasID)
class Atlas(fslimage.Image):
"""This is the base class for the :class:`LabelAtlas` and
......@@ -632,7 +702,7 @@ class Atlas(fslimage.Image):
:arg resolution: Desired isotropic resolution in millimetres.
:arg isLabel: Pass in ``True`` for label atlases, ``False`` for
probabilistic atlases.
statistic/probabilistic atlases.
All other arguments are passed to :meth:`.Image.__init__`.
"""
......@@ -679,7 +749,7 @@ class Atlas(fslimage.Image):
"""Makes sure that the given mask has the same resolution as this
atlas, so it can be used for querying. Used by the
:meth:`.LabelAtlas.maskLabels` and
:meth:`.ProbabilisticAtlas.maskProportions` methods.
:meth:`.StatisticAtlas.maskValues` methods.
:arg mask: A :class:`.Image`
......@@ -695,9 +765,8 @@ class Atlas(fslimage.Image):
# for resampling, as it is most likely
# that the mask is binary.
try:
mask, xform = mask.resample(self.shape[:3],
dtype=np.float32,
order=0)
mask, xform = resample.resample(
mask, self.shape[:3], dtype=np.float32, order=0)
except ValueError:
raise MaskError('Mask has wrong number of dimensions')
......@@ -710,13 +779,11 @@ class Atlas(fslimage.Image):
return mask
class MaskError(Exception):
"""Exception raised by the :meth:`LabelAtlas.maskLabel` and
:meth:`ProbabilisticAtlas.maskProportions` when a mask is provided which
:meth:`StatisticAtlas.maskValues` when a mask is provided which
does not match the atlas space.
"""
pass
class LabelAtlas(Atlas):
......@@ -782,7 +849,7 @@ class LabelAtlas(Atlas):
"""
if not voxel:
loc = transform.transform([loc], self.worldToVoxMat)[0]
loc = affine.transform([loc], self.worldToVoxMat)[0]
loc = [int(v) for v in loc.round()]
if loc[0] < 0 or \
......@@ -813,10 +880,17 @@ class LabelAtlas(Atlas):
of each present value. The proportions are returned as
values between 0 and 100.
.. note:: Calling this method will cause the atlas image data to be
loaded into memory.
.. note:: Use the :meth:`find` method to retrieve the ``AtlasLabel``
associated with each returned value.
"""
# Mask-based indexing requires the image
# data to be loaded into memory
self.data
# Extract the values that are in
# the mask, and their corresponding
# mask weights
......@@ -850,15 +924,48 @@ class LabelAtlas(Atlas):
return values, props
class ProbabilisticAtlas(Atlas):
"""A 4D atlas which contains one volume for each region.
def get(self, label=None, index=None, value=None, name=None, binary=True):
"""Returns the binary image for the given label.
Only one of the arguments should be used to define the label
:arg label: :class:`AtlasLabel` contained within this atlas
:arg index: index of the label
:arg value: value of the label
:arg name: string of the label
:arg binary: If ``True`` (the default), the image will contain 1s in
the label region. Otherwise the image will contain the
label value.
:return: :class:`.Image` with the mask
"""
if ((label is not None) + (index is not None) +
(value is not None) + (name is not None)) != 1:
raise ValueError('Only one of label, index, value, or name may be specified')
if label is None:
label = self.find(index=index, name=name, value=value)
elif label not in self.desc.labels:
raise ValueError("Unknown label provided")
arr = (self.data == label.value).astype(np.int32)
if not binary:
arr[arr > 0] = label.value
return fslimage.Image(arr, name=label.name, header=self.header)
The ``ProbabilisticAtlas`` provides the :meth`proportions` method,
which makes looking up region probabilities easy.
class StatisticAtlas(Atlas):
"""A ``StatisticAtlas`` is a 4D image which contains one volume for
each region in the atlas; each volume contains some statistic value
for the corresponding region.
The :class:`ProbabilisticAtlas` is a specialisation of the
``StatisticAtlas``
"""
def __init__(self, atlasDesc, resolution=None, **kwargs):
"""Create a ``ProbabilisticAtlas`` instance.
"""Create a ``StatisticAtlas`` instance.
:arg atlasDesc: The :class:`AtlasDescription` instance describing
the atlas.
......@@ -868,36 +975,59 @@ class ProbabilisticAtlas(Atlas):
Atlas.__init__(self, atlasDesc, resolution, False, **kwargs)
def proportions(self, location, *args, **kwargs):
"""Looks up and returns the proportions of of all regions at the given
def get(self, label=None, index=None, value=None, name=None):
"""Returns the statistic image at the given label.
Only one of the arguments should be used to define the label
:arg label: :class:`AtlasLabel` contained within this atlas
:arg index: index of the label
:arg value: value of the label
:arg name: string of the label
:return: :class:`.Image` with the statistic values for the
specified label.
"""
if ((label is not None) + (index is not None) +
(value is not None) + (name is not None)) != 1:
raise ValueError('Only one of label, index, value, or name may be specified')
if label is None:
label = self.find(index=index, value=value, name=name)
elif label not in self.desc.labels:
raise ValueError("Unknown label provided")
arr = self[..., label.index]
return fslimage.Image(arr, name=label.name, header=self.header)
def values(self, location, *args, **kwargs):
"""Looks up and returns the values of of all regions at the given
location.
:arg location: Can be one of the following:
- A sequence of three values, interpreted as atlas
coordinates. In this case, :meth:`coordProportions`
coordinates. In this case, :meth:`coordValues`
is called.
- An :class:`.Image` which is interpreted as a
weighted mask. In this case, :meth:`maskProportions`
weighted mask. In this case, :meth:`maskValues`
is called.
All other arguments are passed through to the :meth:`coordProportions`
or :meth:`maskProportions` methods.
All other arguments are passed through to the :meth:`coordValues`
or :meth:`maskValues` methods.
:returns: The return value of either :meth:`coordProportions` or
:meth:`maskProportions`.
:returns: The return value of either :meth:`coordValues` or
:meth:`maskValues`.
"""
if isinstance(location, fslimage.Image):
return self.maskProportions(location, *args, **kwargs)
return self.maskValues(location, *args, **kwargs)
else:
return self.coordProportions(location, *args, **kwargs)
return self.coordValues(location, *args, **kwargs)
def coordProportions(self, loc, voxel=False):
"""Looks up the region probabilities for the given location.
def coordValues(self, loc, voxel=False):
"""Looks up the region values for the given location.
:arg loc: A sequence of three values, interpreted as atlas
world or voxel coordinates.
......@@ -905,14 +1035,12 @@ class ProbabilisticAtlas(Atlas):
:arg voxel: Defaults to ``False``. If ``True``, the ``loc``
argument is interpreted as voxel coordinates.
:returns: a list of values, one per region, which represent
the probability of each region for the specified
location. Returns an empty list if the given
location is out of bounds.
:returns: a list of values, one per region. Returns an empty
list if the given location is out of bounds.
"""
if not voxel:
loc = transform.transform([loc], self.worldToVoxMat)[0]
loc = affine.transform([loc], self.worldToVoxMat)[0]
loc = [int(v) for v in loc.round()]
if loc[0] < 0 or \
......@@ -923,30 +1051,27 @@ class ProbabilisticAtlas(Atlas):
loc[2] >= self.shape[2]:
return []
props = self[loc[0], loc[1], loc[2], :]
vals = self[loc[0], loc[1], loc[2], :]
# We only return labels for this atlas -
# the underlying image may have more
# volumes than this atlas has labels.
return [props[l.index] for l in self.desc.labels]
return [vals[l.index] for l in self.desc.labels]
def maskProportions(self, mask):
"""Looks up the probabilities of all regions in the given ``mask``.
def maskValues(self, mask):
"""Looks up the average values of all regions in the given ``mask``.
:arg mask: A 3D :class:`.Image`` which is interpreted as a weighted
mask. If the ``mask`` shape does not match that of this
``ProbabilisticAtlas``, it is resampled using
:meth:`.Image.resample`, with nearest-neighbour
interpolation.
``StatisticAtlas``, it is resampled using
:meth:`Atlas.prepareMask`.
:returns: A sequence containing the proportion, within the mask,
of all regions in the atlas. The proportions are returned as
values between 0 and 100.
:returns: A sequence containing the average value, within the mask,
of all regions in the atlas.
"""
props = []
avgvals = []
mask = self.prepareMask(mask)
boolmask = mask > 0
weights = mask[boolmask]
......@@ -959,11 +1084,17 @@ class ProbabilisticAtlas(Atlas):
vals = self[..., label.index]
vals = vals[boolmask] * weights
prop = vals.sum() / weightsum
val = vals.sum() / weightsum
avgvals.append(val)
props.append(prop)
return avgvals
return props
class ProbabilisticAtlas(StatisticAtlas):
"""A 4D atlas which contains one volume for each region. Each volume
contains probabiliy values for one region, between 0 and 100.
"""
registry = AtlasRegistry()
......
#!/usr/bin/env python
#
# bitmap.py - The Bitmap class
#
# Author: Paul McCarthy <pauldmccarthy@gmail.com>
#
"""This module contains the :class:`Bitmap` class, for loading bitmap image
files. Pillow is required to use the ``Bitmap`` class.
"""
import os.path as op
import pathlib
import logging
import numpy as np
import fsl.data.image as fslimage
log = logging.getLogger(__name__)
BITMAP_EXTENSIONS = ['.bmp', '.png', '.jpg', '.jpeg',
'.tif', '.tiff', '.gif', '.rgba',
'.jp2', '.jpg2', '.jp2k']
"""File extensions we understand. """
BITMAP_DESCRIPTIONS = [
'Bitmap',
'Portable Network Graphics',
'JPEG',
'JPEG',
'TIFF',
'TIFF',
'Graphics Interchange Format',
'Raw RGBA',
'JPEG 2000',
'JPEG 2000',
'JPEG 2000']
"""A description for each :attr:`BITMAP_EXTENSION`. """
class Bitmap(object):
"""The ``Bitmap`` class can be used to load a bitmap image. The
:meth:`asImage` method will convert the bitmap into an :class:`.Image`
instance.
"""
def __init__(self, bmp):
"""Create a ``Bitmap``.
:arg bmp: File name of an image, or a ``numpy`` array containing image
data.
"""
if isinstance(bmp, (pathlib.Path, str)):
try:
# Allow big/truncated images
import PIL.Image as Image
import PIL.ImageFile as ImageFile
Image .MAX_IMAGE_PIXELS = None
ImageFile.LOAD_TRUNCATED_IMAGES = True
except ImportError:
raise RuntimeError('Install Pillow to use the Bitmap class')
src = str(bmp)
img = Image.open(src)
# If this is a palette/LUT
# image, convert it into a
# regular rgb(a) image.
if img.mode == 'P':
img = img.convert()
data = np.array(img)
elif isinstance(bmp, np.ndarray):
src = 'array'
data = np.copy(bmp)
else:
raise ValueError('unknown bitmap: {}'.format(bmp))
# Make the array (w, h, c). Single channel
# (e.g. greyscale) images are returned as
# 2D arrays, whereas multi-channel images
# are returned as 3D. In either case, the
# first two dimensions are (height, width),
# but we watn them the other way aruond.
data = np.atleast_3d(data)
data = np.fliplr(data.transpose((1, 0, 2)))
data = np.array(data, dtype=np.uint8, order='C')
w, h = data.shape[:2]
self.__data = data
self.__dataSource = src
self.__name = op.basename(src)
def __hash__(self):
"""Returns a number which uniquely idenfities this ``Bitmap`` instance
(the result of ``id(self)``).
"""
return id(self)
def __str__(self):
"""Return a string representation of this ``Bitmap`` instance."""
return '{}({}, {})'.format(self.__class__.__name__,
self.dataSource,
self.shape)
def __repr__(self):
"""See the :meth:`__str__` method. """
return self.__str__()
@property
def name(self):
"""Returns the name of this ``Bitmap``, typically the base name of the
file.
"""
return self.__name
@property
def dataSource(self):
"""Returns the bitmap data source - typically the file name. """
return self.__dataSource
@property
def data(self):
"""Convenience method which returns the bitmap data as a ``(w, h, c)``
array, where ``c`` is either 3 or 4.
"""
return self.__data
@property
def shape(self):
"""Returns the bitmap shape - ``(width, height, nchannels)``. """
return self.__data.shape
def asImage(self):
"""Convert this ``Bitmap`` into an :class:`.Image` instance. """
width, height, nchannels = self.shape
if nchannels == 1:
dtype = np.uint8
elif nchannels == 3:
dtype = np.dtype([('R', 'uint8'),
('G', 'uint8'),
('B', 'uint8')])
elif nchannels == 4:
dtype = np.dtype([('R', 'uint8'),
('G', 'uint8'),
('B', 'uint8'),
('A', 'uint8')])
else:
raise ValueError('Cannot convert bitmap with {} '
'channels into nifti image'.format(nchannels))
if nchannels == 1:
data = self.data.reshape((width, height))
else:
data = np.zeros((width, height), dtype=dtype)
for ci, ch in enumerate(dtype.names):
data[ch] = self.data[..., ci]
data = np.asarray(data, order='F')
return fslimage.Image(data,
name=self.name,
dataSource=self.dataSource)
"""
Provides a sparse representation of volumetric and/or surface data
The data can be either defined per voxel/vertex (:class:`DenseCifti`) or per parcel (`class:`ParcelCifti`).
The data can be read from NIFTI, GIFTI, or CIFTI files.
Non-sparse volumetric or surface representations can be extracte.
"""
from nibabel.cifti2 import cifti2_axes
from typing import Sequence, Optional, Union
import numpy as np
from fsl.data import image
import nibabel as nib
from fsl.utils.path import addExt
dense_extensions = {
cifti2_axes.BrainModelAxis: '.dconn.nii',
cifti2_axes.ParcelsAxis: '.dpconn.nii',
cifti2_axes.SeriesAxis: '.dtseries.nii',
cifti2_axes.ScalarAxis: '.dscalar.nii',
cifti2_axes.LabelAxis: '.dlabel.nii',
}
parcel_extensions = {
cifti2_axes.BrainModelAxis: '.pdconn.nii',
cifti2_axes.ParcelsAxis: '.pconn.nii',
cifti2_axes.SeriesAxis: '.ptseries.nii',
cifti2_axes.ScalarAxis: '.pscalar.nii',
cifti2_axes.LabelAxis: '.plabel.nii',
}
class Cifti:
"""
Parent class for the two types of CIFTI files.
The type of the CIFTI file is determined by the last axis, which can be one of:
- :py:class:`BrainModelAxis <cifti2_axes.BrainModelAxis>`
- :py:class:`ParcelsAxis <cifti2_axes.ParcelsAxis>`
"""
def __init__(self, arr: np.ndarray, axes: Sequence[Optional[cifti2_axes.Axis]]):
"""
Defines a new dataset in greyordinate space
:param data: (..., N) array for N greyordinates or parcels; can contain Nones for undefined axes
:param axes: sequence of CIFTI axes describing the data along each dimension
"""
self.arr = arr
axes = tuple(axes)
while self.arr.ndim > len(axes):
axes = (None, ) + axes
self.axes = axes
if not all(ax is None or len(ax) == sz for ax, sz in zip(axes, self.arr.shape)):
raise ValueError(f"Shape of axes {tuple(-1 if ax is None else len(ax) for ax in axes)} does not "
f"match shape of array {self.arr.shape}")
def to_cifti(self, default_axis=None):
"""
Create a CIFTI image from the data
:param default_axis: What to use as an axis along any undefined dimensions
- By default an error is raised
- if set to "scalar" a ScalarAxis is used with names of "default {index}"
- if set to "series" a SeriesAxis is used
:return: nibabel CIFTI image
"""
if any(ax is None for ax in self.axes):
if default_axis is None:
raise ValueError("Can not store to CIFTI without defining what is stored along each dimension")
elif default_axis == 'scalar':
def get_axis(n: int):
return cifti2_axes.ScalarAxis([f'default {idx + 1}' for idx in range(n)])
elif default_axis == 'series':
def get_axis(n: int):
return cifti2_axes.SeriesAxis(0, 1, n)
else:
raise ValueError(f"default_axis should be set to None, 'scalar', or 'series', not {default_axis}")
new_axes = [
get_axis(sz) if ax is None else ax
for ax, sz in zip(self.axes, self.arr.shape)
]
else:
new_axes = list(self.axes)
data = self.arr
if data.ndim == 1:
# CIFTI axes are always at least 2D
data = data[None, :]
new_axes.insert(0, cifti2_axes.ScalarAxis(['default']))
return nib.Cifti2Image(data, header=new_axes)
@classmethod
def from_cifti(cls, filename, writable=False):
"""
Creates new greyordinate object from dense CIFTI file
:param filename: CIFTI filename or :class:`nib.Cifti2Image` object
:param writable: if True, opens data array in writable mode
"""
if isinstance(filename, str):
img = nib.load(filename)
else:
img = filename
if not isinstance(img, nib.Cifti2Image):
raise ValueError(f"Input {filename} should be CIFTI filename or nibabel Cifti2Image")
if writable:
data = np.memmap(filename, img.dataobj.dtype, mode='r+',
offset=img.dataobj.offset, shape=img.shape, order='F')
else:
data = np.asanyarray(img.dataobj)
axes = [img.header.get_axis(idx) for idx in range(data.ndim)]
if isinstance(axes[-1], cifti2_axes.BrainModelAxis):
return DenseCifti(data, axes)
elif isinstance(axes[-1], cifti2_axes.ParcelsAxis):
return ParcelCifti(data, axes)
raise ValueError("Last axis of CIFTI object should be a BrainModelAxis or ParcelsAxis")
def save(self, cifti_filename, default_axis=None):
"""
Writes this sparse representation to/from a filename
:param cifti_filename: output filename
:param default_axis: What to use as an axis along any undefined dimensions
- By default an error is raised
- if set to "scalar" a ScalarAxis is used with names of "default {index}"
- if set to "series" a SeriesAxis is used
:return:
"""
self.to_cifti(default_axis).to_filename(addExt(cifti_filename, defaultExt=self.extension, mustExist=False))
@classmethod
def from_gifti(cls, filename, mask_values=(0, np.nan)):
"""
Creates a new greyordinate object from a GIFTI file
:param filename: GIFTI filename
:param mask_values: values to mask out
:return: greyordinate object representing the unmasked vertices
"""
if isinstance(filename, str):
img = nib.load(filename)
else:
img = filename
datasets = [darr.data for darr in img.darrays]
if len(datasets) == 1:
data = datasets[0]
else:
data = np.concatenate(
[np.atleast_2d(d) for d in datasets], axis=0
)
mask = np.ones(data.shape, dtype='bool')
for value in mask_values:
if value is np.nan:
mask &= ~np.isnan(data)
else:
mask &= ~(data == value)
while mask.ndim > 1:
mask = mask.any(0)
anatomy = BrainStructure.from_gifti(img)
bm_axes = cifti2_axes.BrainModelAxis.from_mask(mask, name=anatomy.cifti)
return DenseCifti(data[..., mask], [bm_axes])
@classmethod
def from_image(cls, input, mask_values=(np.nan, 0)):
"""
Creates a new greyordinate object from a NIFTI file
:param input: FSL :class:`image.Image` object
:param mask_values: which values to mask out
:return: greyordinate object representing the unmasked voxels
"""
img = image.Image(input)
mask = np.ones(img.data.shape, dtype='bool')
for value in mask_values:
if value is np.nan:
mask &= ~np.isnan(img.data)
else:
mask &= ~(img.data == value)
while mask.ndim > 3:
mask = mask.any(-1)
if np.sum(mask) == 0:
raise ValueError("No unmasked voxels found in NIFTI image")
inverted_data = np.transpose(img.data[mask], tuple(range(1, img.data.ndim - 2)) + (0, ))
bm_axes = cifti2_axes.BrainModelAxis.from_mask(mask, affine=img.nibImage.affine)
return DenseCifti(inverted_data, [bm_axes])
class DenseCifti(Cifti):
"""
Represents sparse data defined for a subset of voxels and vertices (i.e., greyordinates)
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not isinstance(self.brain_model_axis, cifti2_axes.BrainModelAxis):
raise ValueError(f"DenseCifti expects a BrainModelAxis as last axes object, not {type(self.brain_model_axis)}")
@property
def brain_model_axis(self, ) -> cifti2_axes.BrainModelAxis:
return self.axes[-1]
@property
def extension(self, ):
if self.arr.ndim == 1:
return dense_extensions[cifti2_axes.ScalarAxis]
return dense_extensions[type(self.axes[-2])]
def to_image(self, fill=0) -> image.Image:
"""
Get the volumetric data as an :class:`image.Image`
"""
if self.brain_model_axis.volume_mask.sum() == 0:
raise ValueError(f"Can not create volume without voxels in {self}")
data = np.full(self.brain_model_axis.volume_shape + self.arr.shape[:-1], fill,
dtype=self.arr.dtype)
voxels = self.brain_model_axis.voxel[self.brain_model_axis.volume_mask]
data[tuple(voxels.T)] = np.transpose(self.arr, (-1,) + tuple(range(self.arr.ndim - 1)))[
self.brain_model_axis.volume_mask]
return image.Image(data, xform=self.brain_model_axis.affine)
def surface(self, anatomy, fill=np.nan, partial=False):
"""
Gets a specific surface
If `partial` is True a view of the data rather than a copy is returned.
:param anatomy: BrainStructure or string like 'CortexLeft' or 'CortexRight'
:param fill: which value to fill the array with if not all vertices are defined
:param partial: only return the part of the surface defined in the greyordinate file (ignores `fill` if set)
:return:
- if not partial: (..., n_vertices) array
- if partial: tuple with (N, ) int array with indices on the surface included in (..., N) array
"""
if isinstance(anatomy, str):
anatomy = BrainStructure.from_string(anatomy, issurface=True)
if anatomy.cifti not in self.brain_model_axis.name:
raise ValueError(f"No surface data for {anatomy.cifti} found")
slc, bm = None, None
arr = np.full(self.arr.shape[:-1] + (self.brain_model_axis.nvertices[anatomy.cifti],), fill,
dtype=self.arr.dtype)
for name, slc_try, bm_try in self.brain_model_axis.iter_structures():
if name == anatomy.cifti:
if partial:
if bm is not None:
raise ValueError(f"Surface {anatomy} does not form a contiguous block")
slc, bm = slc_try, bm_try
else:
arr[..., bm_try.vertex] = self.arr[..., slc_try]
if not partial:
return arr
else:
return bm.vertex, self.arr[..., slc]
class ParcelCifti(Cifti):
"""
Represents sparse data defined at specific parcels
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not isinstance(self.parcel_axis, cifti2_axes.ParcelsAxis):
raise ValueError(f"ParcelCifti expects a ParcelsAxis as last axes object, not {type(self.parcel_axis)}")
@property
def extension(self, ):
if self.arr.ndim == 1:
return parcel_extensions[cifti2_axes.ScalarAxis]
return parcel_extensions[type(self.axes[-2])]
@property
def parcel_axis(self, ) -> cifti2_axes.ParcelsAxis:
return self.axes[-1]
def to_image(self, fill=0):
"""
Get the volumetric data as an :class:`Image`
"""
data = np.full(self.parcel_axis.volume_shape + self.arr.shape[:-1], fill, dtype=self.arr.dtype)
written = np.zeros(self.parcel_axis.volume_shape, dtype='bool')
for idx, write_to in enumerate(self.parcel_axis.voxels):
if written[tuple(write_to.T)].any():
raise ValueError("Duplicate voxels in different parcels")
data[tuple(write_to.T)] = self.arr[np.newaxis, ..., idx]
written[tuple(write_to.T)] = True
if not written.any():
raise ValueError("Parcellation does not contain any volumetric data")
return image.Image(data, xform=self.parcel_axis.affine)
def surface(self, anatomy, fill=np.nan, partial=False):
"""
Gets a specific surface
:param anatomy: BrainStructure or string like 'CortexLeft' or 'CortexRight'
:param fill: which value to fill the array with if not all vertices are defined
:param partial: only return the part of the surface defined in the greyordinate file (ignores `fill` if set)
:return:
- if not partial: (..., n_vertices) array
- if partial: tuple with (N, ) int array with indices on the surface included in (..., N) array
"""
if isinstance(anatomy, str):
anatomy = BrainStructure.from_string(anatomy, issurface=True)
if anatomy.cifti not in self.parcel_axis.nvertices:
raise ValueError(f"No surface data for {anatomy.cifti} found")
arr = np.full(self.arr.shape[:-1] + (self.parcel_axis.nvertices[anatomy.cifti],), fill,
dtype=self.arr.dtype)
written = np.zeros(self.parcel_axis.nvertices[anatomy.cifti], dtype='bool')
for idx, vertices in enumerate(self.parcel_axis.vertices):
if anatomy.cifti not in vertices:
continue
write_to = vertices[anatomy.cifti]
if written[write_to].any():
raise ValueError("Duplicate vertices in different parcels")
arr[..., write_to] = self.arr[..., idx, np.newaxis]
written[write_to] = True
if not partial:
return arr
else:
return np.where(written)[0], arr[..., written]
class BrainStructure(object):
"""Which brain structure does the parent object describe?
Supports how brain structures are stored in both GIFTI and CIFTI files
"""
def __init__(self, primary, secondary=None, hemisphere='both', geometry=None):
"""Creates a new brain structure
:param primary: Name of the brain structure (e.g. cortex, thalamus)
:param secondary: Further specification of which part of the brain structure is described (e.g. 'white' or
'pial' for the cortex)
:param hemisphere: which hemisphere is the brain structure in ('left', 'right', or 'both')
:param geometry: does the parent object describe the 'volume' or the 'surface'
"""
self.primary = primary.lower()
self.secondary = None if secondary is None else secondary.lower()
self.hemisphere = hemisphere.lower()
if geometry not in (None, 'surface', 'volume'):
raise ValueError(f"Invalid value for geometry: {geometry}")
self.geometry = geometry
def __eq__(self, other):
"""Two brain structures are equal if they could describe the same structure
"""
if isinstance(other, str):
other = self.from_string(other)
match_primary = (self.primary == other.primary or self.primary == 'all' or other.primary == 'all' or
self.primary == other.geometry or self.geometry == other.primary)
match_hemisphere = self.hemisphere == other.hemisphere
match_secondary = (self.secondary is None or other.secondary is None or self.secondary == other.secondary)
match_geometry = (self.geometry is None or other.geometry is None or self.geometry == other.geometry)
return match_primary and match_hemisphere and match_secondary and match_geometry
@property
def gifti(self, ):
"""Returns the keywords needed to define the surface in the meta information of a GIFTI file
"""
main = self.primary.capitalize() + ('' if self.hemisphere == 'both' else self.hemisphere.capitalize())
res = {'AnatomicalStructurePrimary': main}
if self.secondary is not None:
res['AnatomicalStructureSecondary'] = self.secondary.capitalize()
return res
def __str__(self, ):
"""Returns a short description of the brain structure
"""
if self.secondary is None:
return self.primary.capitalize() + self.hemisphere.capitalize()
else:
return "%s%s(%s)" % (self.primary.capitalize(), self.hemisphere.capitalize(), self.secondary)
@property
def cifti(self, ):
"""Returns a description of the brain structure needed to define the surface in a CIFTI file
"""
return 'CIFTI_STRUCTURE_' + self.primary.upper() + ('' if self.hemisphere == 'both' else ('_' + self.hemisphere.upper()))
@classmethod
def from_string(cls, value, issurface=None):
"""Parses a string to find out which brain structure is being described
:param value: string to be parsed
:param issurface: defines whether the object describes the volume or surface of the brain structure (default: surface if the brain structure is the cortex volume otherwise)
"""
if '_' in value:
items = [val.lower() for val in value.split('_')]
if items[-1] in ['left', 'right', 'both']:
hemisphere = items[-1]
others = items[:-1]
elif items[0] in ['left', 'right', 'both']:
hemisphere = items[0]
others = items[1:]
else:
hemisphere = 'both'
others = items
if others[0] in ['nifti', 'cifti', 'gifti']:
others = others[2:]
primary = '_'.join(others)
else:
low = value.lower()
if 'left' == low[-4:]:
hemisphere = 'left'
primary = low[:-4]
elif 'right' == low[-5:]:
hemisphere = 'right'
primary = low[:-5]
elif 'both' == low[-4:]:
hemisphere = 'both'
primary = low[:-4]
else:
hemisphere = 'both'
primary = low
if issurface is None:
issurface = primary == 'cortex'
if primary == '':
primary = 'all'
return cls(primary, None, hemisphere, 'surface' if issurface else 'volume')
@classmethod
def from_gifti(cls, gifti_obj):
"""
Extracts the brain structure from a GIFTI object
"""
primary_str = 'AnatomicalStructurePrimary'
secondary_str = 'AnatomicalStructureSecondary'
primary = "other"
secondary = None
for obj in [gifti_obj] + gifti_obj.darrays:
if primary_str in obj.meta:
primary = obj.meta[primary_str]
if secondary_str in obj.meta:
secondary = obj.meta[secondary_str]
anatomy = cls.from_string(primary, issurface=True)
anatomy.secondary = None if secondary is None else secondary.lower()
return anatomy
def load(filename, mask_values=(0, np.nan), writable=False) -> Union[DenseCifti, ParcelCifti]:
"""
Reads CIFTI data from the given file
File can be:
- NIFTI file
- GIFTI file
- CIFTI file
:param filename: input filename
:param mask_values: which values are outside of the mask for NIFTI or GIFTI input
:param writable: allow to write to disk
:return: appropriate CIFTI sub-class (parcellated or dense)
"""
possible_extensions = (
tuple(dense_extensions.values()) +
tuple(parcel_extensions.values()) +
tuple(image.ALLOWED_EXTENSIONS) +
('.shape.gii', '.gii')
)
if isinstance(filename, str):
filename = addExt(filename, possible_extensions, fileGroups=image.FILE_GROUPS)
img = nib.load(filename)
else:
img = filename
if isinstance(img, nib.Cifti2Image):
return Cifti.from_cifti(img, writable=writable)
if isinstance(img, nib.GiftiImage):
if writable:
raise ValueError("Can not open GIFTI file in writable mode")
return Cifti.from_gifti(img, mask_values)
try:
vol_img = image.Image(img)
except ValueError:
raise ValueError(f"I do not know how to convert {type(img)} into greyordinates (from {filename})")
if writable:
raise ValueError("Can not open NIFTI file in writable mode")
return Cifti.from_image(vol_img, mask_values)
......@@ -30,6 +30,7 @@ specification:
NIFTI_XFORM_ALIGNED_ANAT
NIFTI_XFORM_TALAIRACH
NIFTI_XFORM_MNI_152
NIFTI_XFORM_TEMPLATE_OTHER
"""
......@@ -81,7 +82,14 @@ NIFTI_XFORM_MNI_152 = 4
"""MNI 152 normalized coordinates."""
NIFTI_XFORM_ANALYZE = 5
NIFTI_XFORM_TEMPLATE_OTHER = 5
"""Coordinates aligned to some template that is not MNI152 or Talairach.
See https://www.nitrc.org/forum/message.php?msg_id=26394 for details.
"""
NIFTI_XFORM_ANALYZE = 6
"""Code which indicates that this is an ANALYZE image, not a NIFTI image. """
......@@ -98,6 +106,36 @@ NIFTI_UNITS_PPM = 40
NIFTI_UNITS_RADS = 48
# NIFTI datatype codes
NIFTI_DT_NONE = 0
NIFTI_DT_UNKNOWN = 0
NIFTI_DT_BINARY = 1
NIFTI_DT_UNSIGNED_CHAR = 2
NIFTI_DT_SIGNED_SHORT = 4
NIFTI_DT_SIGNED_INT = 8
NIFTI_DT_FLOAT = 16
NIFTI_DT_COMPLEX = 32
NIFTI_DT_DOUBLE = 64
NIFTI_DT_RGB = 128
NIFTI_DT_ALL = 255
NIFTI_DT_UINT8 = 2
NIFTI_DT_INT16 = 4
NIFTI_DT_INT32 = 8
NIFTI_DT_FLOAT32 = 16
NIFTI_DT_COMPLEX64 = 32
NIFTI_DT_FLOAT64 = 64
NIFTI_DT_RGB24 = 128
NIFTI_DT_INT8 = 256
NIFTI_DT_UINT16 = 512
NIFTI_DT_UINT32 = 768
NIFTI_DT_INT64 = 1024
NIFTI_DT_UINT64 = 1280
NIFTI_DT_FLOAT128 = 1536
NIFTI_DT_COMPLEX128 = 1792
NIFTI_DT_COMPLEX256 = 2048
NIFTI_DT_RGBA32 = 2304
# NIFTI file intent codes
NIFTI_INTENT_NONE = 0
NIFTI_INTENT_CORREL = 2
......
......@@ -29,22 +29,56 @@ import os
import os.path as op
import subprocess as sp
import re
import sys
import glob
import json
import shlex
import shutil
import logging
import binascii
import numpy as np
import nibabel as nib
import fsl.utils.tempdir as tempdir
import fsl.utils.memoize as memoize
import fsl.data.image as fslimage
import fsl.utils.tempdir as tempdir
import fsl.utils.memoize as memoize
import fsl.utils.platform as fslplatform
import fsl.data.image as fslimage
log = logging.getLogger(__name__)
MIN_DCM2NIIX_VERSION = (1, 0, 2017, 12, 15)
"""Minimum version of dcm2niix that is required for this module to work. """
"""Minimum version of ``dcm2niix`` that is required for this module to work.
"""
CRC_DCM2NIIX_VERSION = (1, 0, 2019, 9, 2)
"""For versions of ``dcm2niix`` orf this version or newer, the ``-n`` flag,
used to convert a single DICOM series, requires that a CRC checksum
identifying the series be passed (see the :func:`seriesCRC`
function). Versions prior to this require the series number to be passed.
"""
def dcm2niix() -> str:
"""Tries to find an absolute path to the ``dcm2niix`` command. Returns
``'dcm2niix'`` (unqualified) if a specific executable cannot be found.
"""
fsldir = fslplatform.platform.fsldir
candidates = [
shutil.which('dcm2niix')
]
if fsldir is not None:
candidates.insert(0, op.join(fsldir, 'bin', 'dcm2niix'))
for c in candidates:
if c is not None and op.exists(c):
return c
return 'dcm2niix'
class DicomImage(fslimage.Image):
......@@ -80,12 +114,19 @@ class DicomImage(fslimage.Image):
@memoize.memoize
def enabled():
"""Returns ``True`` if ``dcm2niix`` is present, and recent enough,
``False`` otherwise.
def installedVersion():
"""Return a tuple describing the version of ``dcm2niix`` that is installed,
or ``None`` if dcm2niix cannot be found, or its version not parsed.
The returned tuple contains the following fields, all integers:
- Major version number
- Minor version number
- Year
- Month
- Day
"""
cmd = 'dcm2niix -h'
cmd = f'{dcm2niix()} -h'
versionPattern = re.compile(r'v'
r'(?P<major>[0-9]+)\.'
r'(?P<minor>[0-9]+)\.'
......@@ -102,80 +143,124 @@ def enabled():
match = re.match(versionPattern, word)
if match is None:
continue
if match is not None:
return (int(match.group('major')),
int(match.group('minor')),
int(match.group('year')),
int(match.group('month')),
int(match.group('day')))
except Exception as e:
log.debug(f'Error parsing dcm2niix version string: {e}')
return None
installedVersion = (
int(match.group('major')),
int(match.group('minor')),
int(match.group('year')),
int(match.group('month')),
int(match.group('day')))
# make sure installed version
# is equal to or newer than
# minimum required version
for iv, mv in zip(installedVersion, MIN_DCM2NIIX_VERSION):
if iv > mv: return True
elif iv < mv: return False
def compareVersions(v1, v2):
"""Compares two ``dcm2niix`` versions ``v1`` and ``v2``. The versions are
assumed to be in the format returned by :func:`installedVersion`.
# if we get here, versions are equal
return True
:returns: - 1 if ``v1`` is newer than ``v2``
- -1 if ``v1`` is older than ``v2``
- 0 if ``v1`` the same as ``v2``.
"""
except Exception as e:
log.debug('Error parsing dcm2niix version string: {}'.format(e))
for iv1, iv2 in zip(v1, v2):
if iv1 > iv2: return 1
elif iv1 < iv2: return -1
return 0
return False
def enabled():
"""Returns ``True`` if ``dcm2niix`` is present, and recent enough,
``False`` otherwise.
"""
installed = installedVersion()
required = MIN_DCM2NIIX_VERSION
return ((installed is not None) and
(compareVersions(installed, required) >= 0))
def scanDir(dcmdir):
"""Uses ``dcm2niix`` to scans the given DICOM directory, and returns a
list of dictionaries, one for each data series that was identified.
Each dictionary is populated with some basic metadata about the series.
"""Uses the ``dcm2niix -b o`` option to generate a BIDS sidecar JSON
file for each series in the given DICOM directory. Reads them all in,
and returns them as a sequence of dicts.
:arg dcmdir: Directory containing DICOM files.
Some additional metadata is added to each dictionary:
- ``DicomDir``: The absolute path to ``dcmdir``
:returns: A list of dictionaries, each containing metadata about
one DICOM data series.
:arg dcmdir: Directory containing DICOM series
:returns: A list of dicts, each containing the BIDS sidecar JSON
metadata for one DICOM series.
"""
if not enabled():
raise RuntimeError('dcm2niix is not available or is too old')
dcmdir = op.abspath(dcmdir)
cmd = 'dcm2niix -b o -ba n -f %s -o . {}'.format(dcmdir)
snumPattern = re.compile('^[0-9]+')
dcmdir = op.abspath(dcmdir)
cmd = f'{dcm2niix()} -b o -ba n -f %s -o . "{dcmdir}"'
series = []
with tempdir.tempdir() as td:
with open(os.devnull, 'wb') as devnull:
sp.call(cmd.split(), stdout=devnull, stderr=devnull)
sp.call(shlex.split(cmd), stdout=devnull, stderr=devnull)
files = glob.glob(op.join(td, '*.json'))
if len(files) == 0:
return []
# sort numerically by series number if possible
try:
def sortkey(f):
match = re.match(snumPattern, f)
snum = int(match.group(0))
return snum
files = sorted(files, key=sortkey)
except Exception:
files = sorted(files)
series = []
for fn in files:
with open(fn, 'rt') as f:
meta = json.load(f)
meta = json.load(f)
meta['DicomDir'] = dcmdir
# SeriesDescription is not
# guaranteed to be present
if 'SeriesDescription' not in meta:
meta['SeriesDescription'] = meta['SeriesNumber']
series.append(meta)
return series
# sort by series number
def key(s):
return s.get('SeriesNumber', sys.maxsize)
series = list(sorted(series, key=key))
return series
def seriesCRC(series):
"""Calculate a checksum string of the given DICOM series.
The returned string is of the form::
SeriesCRC[.echonumber]
Where ``SeriesCRC`` is an unsigned integer which is the CRC32
checksum of the ``SeriesInstanceUID``, and ``echonumber`` is
the ``EchoNumber`` of the series - this is only present for
multi-echo data, where the series is from the second or subsequent
echos.
:arg series: Dict containing BIDS metadata about a DICOM series,
as returned by :func:`scanDir`.
:returns: String containing a CRC32 checksum for the series.
"""
uid = series.get('SeriesInstanceUID', None)
echo = series.get('EchoNumber', None)
if uid is None:
return None
crc32 = str(binascii.crc32(uid.encode()))
if echo is not None and echo > 1:
crc32 = f'{crc32}.{echo}'
return crc32
def loadSeries(series):
......@@ -192,24 +277,39 @@ def loadSeries(series):
if not enabled():
raise RuntimeError('dcm2niix is not available or is too old')
dcmdir = series['DicomDir']
snum = series['SeriesNumber']
desc = series['SeriesDescription']
cmd = 'dcm2niix -b n -f %s -z n -o . -n {} {}'.format(snum, dcmdir)
dcmdir = series['DicomDir']
snum = series['SeriesNumber']
desc = series['SeriesDescription']
version = installedVersion()
# Newer versions of dcm2niix
# require a CRC to identify
# series
if compareVersions(version, CRC_DCM2NIIX_VERSION) >= 0:
ident = seriesCRC(series)
# Older versions require
# the series number
else:
ident = snum
cmd = f'{dcm2niix()} -b n -f %s -z n -o . -n "{ident}" "{dcmdir}"'
with tempdir.tempdir() as td:
with open(os.devnull, 'wb') as devnull:
sp.call(cmd.split(), stdout=devnull, stderr=devnull)
sp.call(shlex.split(cmd), stdout=devnull, stderr=devnull)
files = glob.glob(op.join(td, '{}*.nii'.format(snum)))
files = glob.glob(op.join(td, f'{snum}*.nii'))
images = [nib.load(f, mmap=False) for f in files]
# copy images so nibabel no longer
# refs to the files (as they will
# be deleted), and use get_data()
# to force-load the image data.
images = [nib.Nifti1Image(i.get_data(), None, i.header)
# be deleted), and force-load the
# the image data into memory (to
# avoid any disk accesses due to
# e.g. memmap)
images = [nib.Nifti1Image(np.asanyarray(i.dataobj), None, i.header)
for i in images]
return [DicomImage(i, series, dcmdir, name=desc) for i in images]
......@@ -22,9 +22,12 @@ following functions are provided:
isFirstLevelAnalysis
loadDesign
loadContrasts
loadFTests
loadFsf
loadSettings
getThresholds
loadClusterResults
loadFEATDesignFile
The following functions return the names of various files of interest:
......@@ -38,20 +41,22 @@ The following functions return the names of various files of interest:
getPEFile
getCOPEFile
getZStatFile
getZFStatFile
getClusterMaskFile
getFClusterMaskFile
"""
import collections
import logging
import os.path as op
import numpy as np
import collections
import io
import logging
import os.path as op
import numpy as np
import fsl.utils.path as fslpath
import fsl.utils.transform as transform
from . import image as fslimage
from . import featdesign
import fsl.utils.path as fslpath
import fsl.transform.affine as affine
from . import image as fslimage
from . import featdesign
log = logging.getLogger(__name__)
......@@ -166,70 +171,83 @@ def loadContrasts(featdir):
:arg featdir: A FEAT directory.
"""
matrix = None
numContrasts = 0
names = {}
designcon = op.join(featdir, 'design.con')
filename = op.join(featdir, 'design.con')
log.debug('Loading FEAT contrasts from {}'.format(designcon))
log.debug('Loading FEAT contrasts from %s', filename)
with open(designcon, 'rt') as f:
try:
designcon = loadFEATDesignFile(filename)
contrasts = np.genfromtxt(io.StringIO(designcon['Matrix']), ndmin=2)
numContrasts = int(designcon['NumContrasts'])
names = []
while True:
line = f.readline().strip()
if numContrasts != contrasts.shape[0]:
raise RuntimeError(f'Matrix shape {contrasts.shape} does not '
f'match number of contrasts {numContrasts}')
if line.startswith('/ContrastName'):
tkns = line.split(None, 1)
num = [c for c in tkns[0] if c.isdigit()]
num = int(''.join(num))
contrasts = [list(row) for row in contrasts]
# The /ContrastName field may not
# actually have a name specified
if len(tkns) > 1:
name = tkns[1].strip()
names[num] = name
for i in range(numContrasts):
cname = designcon.get(f'ContrastName{i + 1}', '')
if cname == '':
cname = f'{i + 1}'
names.append(cname)
elif line.startswith('/NumContrasts'):
numContrasts = int(line.split()[1])
except Exception as e:
log.debug('Error reading %s: %s', filename, e, exc_info=True)
raise RuntimeError(f'{filename} does not appear '
'to be a valid design.con file') from e
elif line == '/Matrix':
break
return names, contrasts
matrix = np.loadtxt(f, ndmin=2)
if matrix is None or \
numContrasts != matrix.shape[0]:
raise RuntimeError('{} does not appear to be a '
'valid design.con file'.format(designcon))
def loadFTests(featdir):
"""Loads F-tests from a FEAT directory. Returns a list of f-test vectors
(each of which is a list itself), where each vector contains a 1 or a 0
denoting the contrasts included in the F-test.
# Fill in any missing contrast names
if len(names) != numContrasts:
for i in range(numContrasts):
if i + 1 not in names:
names[i + 1] = str(i + 1)
:arg featdir: A FEAT directory.
"""
names = [names[c + 1] for c in range(numContrasts)]
contrasts = []
filename = op.join(featdir, 'design.fts')
for row in matrix:
contrasts.append(list(row))
if not op.exists(filename):
return []
return names, contrasts
log.debug('Loading FEAT F-tests from %s', filename)
try:
desfts = loadFEATDesignFile(filename)
ftests = np.genfromtxt(io.StringIO(desfts['Matrix']), ndmin=2)
ncols = int(desfts['NumWaves'])
nrows = int(desfts['NumContrasts'])
def loadSettings(featdir):
"""Loads the analysis settings from a FEAT directory.
if ftests.shape != (nrows, ncols):
raise RuntimeError(f'Matrix shape {ftests.shape} does not match '
f'number of EVs/FTests ({ncols}, {nrows})')
Returns a dict containing the settings specified in the ``design.fsf``
file within the directory
ftests = [list(row) for row in ftests]
:arg featdir: A FEAT directory.
except Exception as e:
log.debug('Error reading %s: %s', filename, e, exc_info=True)
raise RuntimeError(f'{filename} does not appear '
'to be a valid design.fts file') from e
return ftests
def loadFsf(designfsf):
"""Loads the analysis settings from a text file (.fsf) used to configure
FEAT.
Returns a dict containing the settings specified in the file
:arg designfsf: A .fsf file.
"""
settings = collections.OrderedDict()
designfsf = op.join(featdir, 'design.fsf')
log.debug('Loading FEAT settings from {}'.format(designfsf))
log.debug('Loading FEAT settings from %s', designfsf)
with open(designfsf, 'rt') as f:
......@@ -252,6 +270,20 @@ def loadSettings(featdir):
return settings
def loadSettings(featdir):
"""Loads the analysis settings from a FEAT directory.
Returns a dict containing the settings specified in the ``design.fsf``
file within the directory
:arg featdir: A FEAT directory.
"""
designfsf = op.join(featdir, 'design.fsf')
return loadFsf(designfsf)
def loadDesign(featdir, settings):
"""Loads the design matrix from a FEAT directory.
......@@ -297,19 +329,22 @@ def isFirstLevelAnalysis(settings):
return settings['level'] == '1'
def loadClusterResults(featdir, settings, contrast):
def loadClusterResults(featdir, settings, contrast, ftest=False):
"""If cluster thresholding was used in the FEAT analysis, this function
will load and return the cluster results for the specified (0-indexed)
contrast number.
contrast or f-test.
If there are no cluster results for the given contrast, ``None`` is
returned.
If there are no cluster results for the given contrast/f-test, ``None``
is returned.
An error will be raised if the cluster file cannot be parsed.
:arg featdir: A FEAT directory.
:arg settings: A FEAT settings dictionary.
:arg contrast: 0-indexed contrast number.
:arg contrast: 0-indexed contrast or f-test number.
:arg ftest: If ``False`` (default), return cluster results for
the contrast numbered ``contrast``. Otherwise, return
cluster results for the f-test numbered ``contrast``.
:returns: A list of ``Cluster`` instances, each of which contains
information about one cluster. A ``Cluster`` object has the
......@@ -330,11 +365,16 @@ def loadClusterResults(featdir, settings, contrast):
gravity.
``zcogz`` Z voxel coordinate of cluster centre of
gravity.
``copemax`` Maximum COPE value in cluster.
``copemaxx`` X voxel coordinate of maximum COPE value.
``copemax`` Maximum COPE value in cluster (not
present for f-tests).
``copemaxx`` X voxel coordinate of maximum COPE value
(not present for f-tests).
``copemaxy`` Y voxel coordinate of maximum COPE value.
(not present for f-tests).
``copemaxz`` Z voxel coordinate of maximum COPE value.
(not present for f-tests).
``copemean`` Mean COPE of all voxels in the cluster.
(not present for f-tests).
============ =========================================
"""
......@@ -344,8 +384,11 @@ def loadClusterResults(featdir, settings, contrast):
# the ZMax/COG etc coordinates
# are usually in voxel coordinates
coordXform = np.eye(4)
clusterFile = op.join(
featdir, 'cluster_zstat{}.txt'.format(contrast + 1))
if ftest: prefix = 'cluster_zfstat'
else: prefix = 'cluster_zstat'
clusterFile = op.join(featdir, f'{prefix}{contrast + 1}.txt')
if not op.exists(clusterFile):
......@@ -354,22 +397,16 @@ def loadClusterResults(featdir, settings, contrast):
# the cluster file will instead be called
# 'cluster_zstatX_std.txt', so we'd better
# check for that too.
clusterFile = op.join(
featdir, 'cluster_zstat{}_std.txt'.format(contrast + 1))
clusterFile = op.join(featdir, f'{prefix}{contrast + 1}_std.txt')
if not op.exists(clusterFile):
return None
# In higher levle analysis run in some standard
# In higher level analysis run in some standard
# space, the cluster coordinates are in standard
# space. We transform them to voxel coordinates.
# later on.
coordXform = fslimage.Image(
getDataFile(featdir),
loadData=False).worldToVoxMat
log.debug('Loading cluster results for contrast {} from {}'.format(
contrast, clusterFile))
coordXform = fslimage.Image(getDataFile(featdir)).worldToVoxMat
# The cluster.txt file is converted
# into a list of Cluster objects,
......@@ -387,10 +424,18 @@ def loadClusterResults(featdir, settings, contrast):
# if cluster thresholding was not used,
# the cluster table will not contain
# P valuse.
# P values.
if not hasattr(self, 'p'): self.p = 1.0
if not hasattr(self, 'logp'): self.logp = 0.0
# F-test cluster results will not have
# COPE-* results
if not hasattr(self, 'copemax'): self.copemax = np.nan
if not hasattr(self, 'copemaxx'): self.copemaxx = np.nan
if not hasattr(self, 'copemaxy'): self.copemaxy = np.nan
if not hasattr(self, 'copemaxz'): self.copemaxz = np.nan
if not hasattr(self, 'copemean'): self.copemean = np.nan
# This dict provides a mapping between
# Cluster object attribute names, and
# the corresponding column name in the
......@@ -422,10 +467,9 @@ def loadClusterResults(featdir, settings, contrast):
'COPE-MAX Z (mm)' : 'copemaxz',
'COPE-MEAN' : 'copemean'}
# An error will be raised if the
# cluster file does not exist (e.g.
# if the specified contrast index
# is invalid)
log.debug('Loading cluster results for contrast %s from %s',
contrast, clusterFile)
with open(clusterFile, 'rt') as f:
# Get every line in the file,
......@@ -447,12 +491,11 @@ def loadClusterResults(featdir, settings, contrast):
colNames = colNames.split('\t')
clusterLines = [cl .split('\t') for cl in clusterLines]
# Turn each cluster line into a
# Cluster instance. An error will
# be raised if the columm names
# are unrecognised (i.e. not in
# the colmap above), or if the
# file is poorly formed.
# Turn each cluster line into a Cluster
# instance. An error will be raised if the
# columm names are unrecognised (i.e. not
# in the colmap above), or if the file is
# poorly formed.
clusters = [Cluster(**dict(zip(colNames, cl))) for cl in clusterLines]
# Make sure all coordinates are in voxels -
......@@ -467,17 +510,51 @@ def loadClusterResults(featdir, settings, contrast):
zcog = [c.zcogx, c.zcogy, c.zcogz]
copemax = [c.copemaxx, c.copemaxy, c.copemaxz]
zmax = transform.transform([zmax], coordXform)[0].round()
zcog = transform.transform([zcog], coordXform)[0].round()
copemax = transform.transform([copemax], coordXform)[0].round()
zmax = affine.transform([zmax], coordXform)[0]
zcog = affine.transform([zcog], coordXform)[0]
copemax = affine.transform([copemax], coordXform)[0]
c.zmaxx, c.zmaxy, c.zmaxz = zmax
c.zcogx, c.zcogy, c.zcogz = zcog
c.copemax, c.copemaxy, c.copemaxz = copemax
c.zmaxx, c.zmaxy, c.zmaxz = zmax
c.zcogx, c.zcogy, c.zcogz = zcog
c.copemaxx, c.copemaxy, c.copemaxz = copemax
return clusters
def loadFEATDesignFile(filename):
"""Load a FEAT design file, e.g. ``design.mat``, ``design.con``, ``design.fts``.
These files contain key-value pairs, and are formatted according to an
undocumented structure where each key is of the form "/KeyName", and is
followed immediately by a whitespace character, and then the value.
:arg filename: File to load
:returns: A dictionary of key-value pairs. The values are all left
as strings.
"""
fields = {}
with open(filename, 'rt') as f:
content = f.read()
content = content.split('/')
for line in content:
line = line.strip()
if line == '':
continue
tokens = line.split(maxsplit=1)
if len(tokens) == 1:
name, value = tokens[0], ''
else:
name, value = tokens
fields[name] = value
return fields
def getDataFile(featdir):
"""Returns the name of the file in the FEAT directory which contains
the model input data (typically called ``filtered_func_data.nii.gz``).
......@@ -521,7 +598,7 @@ def getPEFile(featdir, ev):
:arg featdir: A FEAT directory.
:arg ev: The EV number (0-indexed).
"""
pefile = op.join(featdir, 'stats', 'pe{}'.format(ev + 1))
pefile = op.join(featdir, 'stats', f'pe{ev + 1}')
return fslimage.addExt(pefile, mustExist=True)
......@@ -533,7 +610,7 @@ def getCOPEFile(featdir, contrast):
:arg featdir: A FEAT directory.
:arg contrast: The contrast number (0-indexed).
"""
copefile = op.join(featdir, 'stats', 'cope{}'.format(contrast + 1))
copefile = op.join(featdir, 'stats', f'cope{contrast + 1}')
return fslimage.addExt(copefile, mustExist=True)
......@@ -545,10 +622,22 @@ def getZStatFile(featdir, contrast):
:arg featdir: A FEAT directory.
:arg contrast: The contrast number (0-indexed).
"""
zfile = op.join(featdir, 'stats', 'zstat{}'.format(contrast + 1))
zfile = op.join(featdir, 'stats', f'zstat{contrast + 1}')
return fslimage.addExt(zfile, mustExist=True)
def getZFStatFile(featdir, ftest):
"""Returns the path of the Z-statistic file for the specified F-test.
Raises a :exc:`~fsl.utils.path.PathError` if the file does not exist.
:arg featdir: A FEAT directory.
:arg ftest: The F-test number (0-indexed).
"""
zffile = op.join(featdir, 'stats', f'zfstat{ftest + 1}')
return fslimage.addExt(zffile, mustExist=True)
def getClusterMaskFile(featdir, contrast):
"""Returns the path of the cluster mask file for the specified contrast.
......@@ -557,5 +646,17 @@ def getClusterMaskFile(featdir, contrast):
:arg featdir: A FEAT directory.
:arg contrast: The contrast number (0-indexed).
"""
mfile = op.join(featdir, 'cluster_mask_zstat{}'.format(contrast + 1))
mfile = op.join(featdir, f'cluster_mask_zstat{contrast + 1}')
return fslimage.addExt(mfile, mustExist=True)
def getFClusterMaskFile(featdir, ftest):
"""Returns the path of the cluster mask file for the specified f-test.
Raises a :exc:`~fsl.utils.path.PathError` if the file does not exist.
:arg featdir: A FEAT directory.
:arg contrast: The f-test number (0-indexed).
"""
mfile = op.join(featdir, f'cluster_mask_zfstat{ftest + 1}')
return fslimage.addExt(mfile, mustExist=True)
......@@ -160,7 +160,7 @@ class FEATFSFDesign(object):
# Print a warning if we're
# using an old version of FEAT
if version < 6:
log.warning('Unsupported FEAT version: {}'.format(version))
log.warning('Unsupported FEAT version: %s', version)
# We need to parse the EVS a bit
# differently depending on whether
......@@ -210,8 +210,7 @@ class FEATFSFDesign(object):
continue
if (not self.__loadVoxEVs) or (ev.filename is None):
log.warning('Voxel EV image missing '
'for ev {}'.format(ev.index))
log.warning('Voxel EV image missing for ev %s', ev.index)
continue
design[:, ev.index] = ev.getData(x, y, z)
......@@ -250,8 +249,7 @@ class VoxelwiseEVMixin(object):
if op.exists(filename):
self.__filename = filename
else:
log.warning('Voxelwise EV file does not '
'exist: {}'.format(filename))
log.warning('Voxelwise EV file does not exist: %s', filename)
self.__filename = None
self.__image = None
......@@ -502,11 +500,11 @@ def getFirstLevelEVs(featDir, settings, designMat):
# - voxelwise EVs
for origIdx in range(origEVs):
title = settings[ 'evtitle{}' .format(origIdx + 1)]
shape = int(settings[ 'shape{}' .format(origIdx + 1)])
convolve = int(settings[ 'convolve{}' .format(origIdx + 1)])
deriv = int(settings[ 'deriv_yn{}' .format(origIdx + 1)])
basis = int(settings.get('basisfnum{}'.format(origIdx + 1), -1))
title = settings[ f'evtitle{origIdx + 1}']
shape = int(settings[ f'shape{origIdx + 1}'])
convolve = int(settings[ f'convolve{origIdx + 1}'])
deriv = int(settings[ f'deriv_yn{origIdx + 1}'])
basis = int(settings.get(f'basisfnum{origIdx + 1}', -1))
# Normal EV. This is just a column
# in the design matrix, defined by
......@@ -525,8 +523,7 @@ def getFirstLevelEVs(featDir, settings, designMat):
# The addExt function will raise an
# error if the file does not exist.
filename = op.join(
featDir, 'designVoxelwiseEV{}'.format(origIdx + 1))
filename = op.join(featDir, f'designVoxelwiseEV{origIdx + 1}')
filename = fslimage.addExt(filename, True)
evs.append(VoxelwiseEV(len(evs), origIdx, title, filename))
......@@ -607,7 +604,7 @@ def getFirstLevelEVs(featDir, settings, designMat):
startIdx = len(evs) + 1
if voxConfLocs != list(range(startIdx, startIdx + len(voxConfFiles))):
raise FSFError('Unsupported voxelwise confound ordering '
'({} -> {})'.format(startIdx, voxConfLocs))
f'({startIdx} -> {voxConfLocs})')
# Create the voxelwise confound EVs.
# We make a name for the EV from the
......@@ -680,7 +677,7 @@ def getHigherLevelEVs(featDir, settings, designMat):
for origIdx in range(origEVs):
# All we need is the title
title = settings['evtitle{}'.format(origIdx + 1)]
title = settings[f'evtitle{origIdx + 1}']
evs.append(NormalEV(len(evs), origIdx, title))
# Only the input file is specified for
......@@ -689,7 +686,7 @@ def getHigherLevelEVs(featDir, settings, designMat):
# name.
for origIdx in range(voxEVs):
filename = settings['evs_vox_{}'.format(origIdx + 1)]
filename = settings[f'evs_vox_{origIdx + 1}']
title = op.basename(fslimage.removeExt(filename))
evs.append(VoxelwiseEV(len(evs), origIdx, title, filename))
......@@ -705,12 +702,12 @@ def loadDesignMat(designmat):
:arg designmat: Path to the ``design.mat`` file.
"""
log.debug('Loading FEAT design matrix from {}'.format(designmat))
log.debug('Loading FEAT design matrix from %s', designmat)
matrix = np.loadtxt(designmat, comments='/', ndmin=2)
if matrix is None or matrix.size == 0 or len(matrix.shape) != 2:
raise FSFError('{} does not appear to be a '
'valid design.mat file'.format(designmat))
raise FSFError(f'{designmat} does not appear '
'to be a valid design.mat file')
return matrix
......@@ -63,8 +63,8 @@ class FEATImage(fslimage.Image):
path = op.join(path, 'filtered_func_data')
if not featanalysis.isFEATImage(path):
raise ValueError('{} does not appear to be data '
'from a FEAT analysis'.format(path))
raise ValueError(f'{path} does not appear to be '
'data from a FEAT analysis')
featDir = op.dirname(path)
settings = featanalysis.loadSettings( featDir)
......@@ -72,9 +72,11 @@ class FEATImage(fslimage.Image):
if featanalysis.hasStats(featDir):
design = featanalysis.loadDesign( featDir, settings)
names, cons = featanalysis.loadContrasts(featDir)
ftests = featanalysis.loadFTests( featDir)
else:
design = None
names, cons = [], []
ftests = []
fslimage.Image.__init__(self, path, **kwargs)
......@@ -83,26 +85,31 @@ class FEATImage(fslimage.Image):
self.__design = design
self.__contrastNames = names
self.__contrasts = cons
self.__ftests = ftests
self.__settings = settings
self.__residuals = None
self.__pes = [None] * self.numEVs()
self.__copes = [None] * self.numContrasts()
self.__zstats = [None] * self.numContrasts()
self.__zfstats = [None] * self.numFTests()
self.__clustMasks = [None] * self.numContrasts()
self.__fclustMasks = [None] * self.numFTests()
if 'name' not in kwargs:
self.name = '{}: {}'.format(self.__analysisName, self.name)
self.name = f'{self.__analysisName}: {self.name}'
def __del__(self):
"""Clears references to any loaded images."""
self.__design = None
self.__residuals = None
self.__pes = None
self.__copes = None
self.__zstats = None
self.__clustMasks = None
self.__design = None
self.__residuals = None
self.__pes = None
self.__copes = None
self.__zstats = None
self.__zfstats = None
self.__clustMasks = None
self.__fclustMasks = None
def getFEATDir(self):
......@@ -191,6 +198,11 @@ class FEATImage(fslimage.Image):
return len(self.__contrasts)
def numFTests(self):
"""Returns the number of f-tests in the analysis."""
return len(self.__ftests)
def contrastNames(self):
"""Returns a list containing the name of each contrast in the analysis.
"""
......@@ -206,6 +218,15 @@ class FEATImage(fslimage.Image):
return [list(c) for c in self.__contrasts]
def ftests(self):
"""Returns a list containing the analysis f-test vectors.
See :func:`.featanalysis.loadFTests`
"""
return [list(f) for f in self.__ftests]
def thresholds(self):
"""Returns the statistical thresholds used in the analysis.
......@@ -214,14 +235,16 @@ class FEATImage(fslimage.Image):
return featanalysis.getThresholds(self.__settings)
def clusterResults(self, contrast):
"""Returns the clusters found in the analysis.
def clusterResults(self, contrast, ftest=False):
"""Returns the clusters found in the analysis for the specified
contrast or f-test.
See :func:.featanalysis.loadClusterResults`
"""
return featanalysis.loadClusterResults(self.__featDir,
self.__settings,
contrast)
contrast,
ftest)
def getPE(self, ev):
......@@ -229,12 +252,10 @@ class FEATImage(fslimage.Image):
if self.__pes[ev] is None:
pefile = featanalysis.getPEFile(self.__featDir, ev)
evname = self.evNames()[ev]
self.__pes[ev] = fslimage.Image(
pefile,
name='{}: PE{} ({})'.format(
self.__analysisName,
ev + 1,
self.evNames()[ev]))
name=f'{self.__analysisName}: PE{ev + 1} ({evname})')
return self.__pes[ev]
......@@ -246,7 +267,7 @@ class FEATImage(fslimage.Image):
resfile = featanalysis.getResidualFile(self.__featDir)
self.__residuals = fslimage.Image(
resfile,
name='{}: residuals'.format(self.__analysisName))
name=f'{self.__analysisName}: residuals')
return self.__residuals
......@@ -256,12 +277,10 @@ class FEATImage(fslimage.Image):
if self.__copes[con] is None:
copefile = featanalysis.getCOPEFile(self.__featDir, con)
conname = self.contrastNames()[con]
self.__copes[con] = fslimage.Image(
copefile,
name='{}: COPE{} ({})'.format(
self.__analysisName,
con + 1,
self.contrastNames()[con]))
name=f'{self.__analysisName}: COPE{con + 1} ({conname})')
return self.__copes[con]
......@@ -270,35 +289,54 @@ class FEATImage(fslimage.Image):
"""
if self.__zstats[con] is None:
zfile = featanalysis.getZStatFile(self.__featDir, con)
zfile = featanalysis.getZStatFile(self.__featDir, con)
conname = self.contrastNames()[con]
self.__zstats[con] = fslimage.Image(
zfile,
name='{}: zstat{} ({})'.format(
self.__analysisName,
con + 1,
self.contrastNames()[con]))
name=f'{self.__analysisName}: zstat{con + 1} ({conname})')
return self.__zstats[con]
def getZFStats(self, ftest):
"""Returns the Z statistic image for the given f-test (0-indexed). """
if self.__zfstats[ftest] is None:
zfile = featanalysis.getZFStatFile(self.__featDir, ftest)
self.__zfstats[ftest] = fslimage.Image(
zfile,
name=f'{self.__analysisName}: zfstat{ftest + 1}')
return self.__zfstats[ftest]
def getClusterMask(self, con):
"""Returns the cluster mask image for the given contrast (0-indexed).
"""
if self.__clustMasks[con] is None:
mfile = featanalysis.getClusterMaskFile(self.__featDir, con)
mfile = featanalysis.getClusterMaskFile(self.__featDir, con)
conname = self.contrastNames()[con]
self.__clustMasks[con] = fslimage.Image(
mfile,
name='{}: cluster mask for zstat{} ({})'.format(
self.__analysisName,
con + 1,
self.contrastNames()[con]))
name=f'{self.__analysisName}: cluster mask '
f'for zstat{con + 1} ({conname})')
return self.__clustMasks[con]
def getFClusterMask(self, ftest):
"""Returns the cluster mask image for the given f-test (0-indexed).
"""
if self.__fclustMasks[ftest] is None:
mfile = featanalysis.getFClusterMaskFile(self.__featDir, ftest)
self.__fclustMasks[ftest] = fslimage.Image(
mfile,
name=f'{self.__analysisName}: cluster mask '
f'for zfstat{ftest + 1}')
return self.__fclustMasks[ftest]
def fit(self, contrast, xyz):
"""Calculates the model fit for the given contrast vector
at the given voxel. See the :func:`modelFit` function.
......
......@@ -16,18 +16,21 @@
"""
import os.path as op
import itertools as it
import math
import os.path as op
def loadLabelFile(filename,
includeLabel=None,
excludeLabel=None,
returnIndices=False):
"""Loads component labels from the specified file. The file is assuemd
returnIndices=False,
missingLabel='Unknown',
returnProbabilities=False):
"""Loads component labels from the specified file. The file is assumed
to be of the format generated by FIX, Melview or ICA-AROMA; such a file
should have a structure resembling the following::
filtered_func_data.ica
1, Signal, False
2, Unclassified Noise, True
......@@ -39,7 +42,6 @@ def loadLabelFile(filename,
8, Signal, False
[2, 5, 6, 7]
.. note:: This function will also parse files which only contain a
component list, e.g.::
......@@ -66,31 +68,46 @@ def loadLabelFile(filename,
- One or more labels for the component (multiple labels must be
comma-separated).
- ``'True'`` if the component has been classified as *bad*,
``'False'`` otherwise. This field is optional - if the last
comma-separated token on a line is not equal (case-insensitive)
to ``True`` or ``False``, it is interpreted as a component label.
- ``'True'`` if the component has been classified as *bad*, ``'False'``
otherwise. This field is optional - if the last non-numeric
comma-separated token on a line is not equal to ``True`` or ``False``
(case-insensitive) , it is interpreted as a component label.
- A value between 0 and 1, which gives the probability of the component
being signal, as generated by an automatic classifier (e.g. FIX). This
field is optional - it is output by some versions of FIX.
The last line of the file contains the index (starting from 1) of all
*bad* components, i.e. those components which are not classified as
signal or unknown.
:arg filename: Name of the label file to load.
:arg filename: Name of the label file to load.
:arg includeLabel: If the file contains a single line containing a
list component indices, this label will be used
for the components in the list. Defaults to
``'Unclassified noise'`` for FIX-like files, and
``'Movement'`` for ICA-AROMA-like files.
:arg includeLabel: If the file contains a single line containing a list
component indices, this label will be used for the
components in the list. Defaults to 'Unclassified
noise' for FIX-like files, and 'Movement' for
ICA-AROMA-like files.
:arg excludeLabel: If the file contains a single line containing
component indices, this label will be used for
the components that are not in the list.
Defaults to ``'Signal'`` for FIX-like files, and
``'Unknown'`` for ICA-AROMA-like files.
:arg excludeLabel: If the file contains a single line containing component
indices, this label will be used for the components
that are not in the list. Defaults to 'Signal' for
FIX-like files, and 'Unknown' for ICA-AROMA-like files.
:arg returnIndices: Defaults to ``False``. If ``True``, a list
containing the noisy component numbers that were
listed in the file is returned.
:arg returnIndices: Defaults to ``False``. If ``True``, a list containing
the noisy component numbers that were listed in the
file is returned.
:arg missingLabel: Label to use for any components which are not
present (only used for label files, not for noise
component files).
:arg returnProbabilities: Defaults to ``False``. If ``True``, a list
containing the component classification
probabilities is returned. If the file does not
contain probabilities, every value in this list
will be nan.
:returns: A tuple containing:
......@@ -102,72 +119,55 @@ def loadLabelFile(filename,
- If ``returnIndices is True``, a list of the noisy component
indices (starting from 1) that were specified in the file.
- If ``returnProbabilities is True``, a list of the component
classification probabilities that were specified in the
file (all nan if they are not in the file).
.. note:: Some label files generated by old versions of FIX/Melview do
not contain a line for every component (unknown/unlabelled
components may not be listed). For these files, and also for
files which only contain a component list, there is no way of
knowing how many components were in the data, so the returned
list may contain fewer entries than there are components.
"""
signalLabels = None
filename = op.abspath(filename)
filename = op.abspath(filename)
probabilities = None
signalLabels = None
with open(filename, 'rt') as f:
lines = f.readlines()
if len(lines) < 1:
raise InvalidLabelFileError('Invalid FIX classification '
raise InvalidLabelFileError(f'{filename}: Invalid FIX classification '
'file - not enough lines')
lines = [l.strip() for l in lines]
lines = [l for l in lines if l != '']
# If the file contains a single
# line, we assume that it is just
# a comma-separated list of noise
# components.
if len(lines) == 1:
line = lines[0]
# if the list is contained in
# square brackets, we assume
# that it is a FIX output file,
# where included components have
# been classified as noise, and
# excluded components as signal.
#
# Otherwise we assume that it
# is an AROMA file, where
# included components have
# been classified as being due
# to motion, and excluded
# components unclassified.
if includeLabel is None:
if line[0] == '[': includeLabel = 'Unclassified noise'
else: includeLabel = 'Movement'
if excludeLabel is None:
if line[0] == '[': excludeLabel = 'Signal'
else: excludeLabel = 'Unknown'
else:
signalLabels = [excludeLabel]
# Remove any leading/trailing
# whitespace or brackets.
line = lines[0].strip(' []')
melDir = None
noisyComps = [int(i) for i in line.split(',')]
allLabels = []
for i in range(max(noisyComps)):
if (i + 1) in noisyComps: allLabels.append([includeLabel])
else: allLabels.append([excludeLabel])
# Otherwise, we assume that
# it is a full label file.
else:
# If the file contains one or two lines, we
# assume that it is just a comma-separated list
# of noise components (possibly preceeded by
# the MELODIC directory path)
if len(lines) <= 2:
melDir, noisyComps, allLabels, signalLabels = \
_parseSingleLineLabelFile(lines, includeLabel, excludeLabel)
probabilities = [math.nan] * len(allLabels)
melDir = lines[0]
noisyComps = lines[-1].strip(' []').split(',')
noisyComps = [c for c in noisyComps if c != '']
noisyComps = [int(c) for c in noisyComps]
# Otherwise, we assume that it is a full label file.
else:
melDir, noisyComps, allLabels, probabilities = \
_parseFullLabelFile(filename, lines, missingLabel)
# There's no way to validate
# the melodic directory path,
# but let's try anyway.
if melDir is not None:
if len(melDir.split(',')) >= 3:
raise InvalidLabelFileError(
f'{filename}: First line does not look like '
f'a MELODIC directory path: {melDir}')
# The melodic directory path should
# either be an absolute path, or
......@@ -176,38 +176,6 @@ def loadLabelFile(filename,
if not op.isabs(melDir):
melDir = op.join(op.dirname(filename), melDir)
# Parse the labels for every component
allLabels = []
for i, compLine in enumerate(lines[1:-1]):
tokens = compLine.split(',')
tokens = [t.strip() for t in tokens]
if len(tokens) < 3:
raise InvalidLabelFileError(
'Invalid FIX classification file - '
'line {}: {}'.format(i + 1, compLine))
try:
compIdx = int(tokens[0])
except ValueError:
raise InvalidLabelFileError(
'Invalid FIX classification file - '
'line {}: {}'.format(i + 1, compLine))
if tokens[-1].lower() in ('true', 'false'):
compLabels = tokens[1:-1]
else:
compLabels = tokens[1:]
if compIdx != i + 1:
raise InvalidLabelFileError(
'Invalid FIX classification file - wrong component '
'number at line {}: {}'.format(i + 1, compLine))
allLabels.append(compLabels)
# Validate the labels against
# the noisy list - all components
# in the noisy list should not
......@@ -218,8 +186,8 @@ def loadLabelFile(filename,
noise = isNoisyComponent(labels, signalLabels)
if noise and (comp not in noisyComps):
raise InvalidLabelFileError('Noisy component {} has invalid '
'labels: {}'.format(comp, labels))
raise InvalidLabelFileError(f'{filename}: Noisy component {comp} '
f'has invalid labels: {labels}')
for comp in noisyComps:
......@@ -228,44 +196,187 @@ def loadLabelFile(filename,
noise = isNoisyComponent(labels, signalLabels)
if not noise:
raise InvalidLabelFileError('Noisy component {} is missing '
'a noise label'.format(comp))
raise InvalidLabelFileError(f'{filename}: Noisy component {comp} '
'is missing a noise label')
retval = [melDir, allLabels]
if returnIndices: return melDir, allLabels, noisyComps
else: return melDir, allLabels
if returnIndices: retval.append(noisyComps)
if returnProbabilities: retval.append(probabilities)
return tuple(retval)
def _parseSingleLineLabelFile(lines, includeLabel, excludeLabel):
"""Called by :func:`loadLabelFile`. Parses the contents of an
ICA-AROMA-style label file which just contains a list of noise
components (and possibly the MELODIC directory path), e.g.::
filtered_func_data.ica
[2, 5, 6, 7]
"""
signalLabels = None
noisyComps = lines[-1]
if len(lines) == 2: melDir = lines[0]
else: melDir = None
# if the list is contained in
# square brackets, we assume
# that it is a FIX output file,
# where included components have
# been classified as noise, and
# excluded components as signal.
#
# Otherwise we assume that it
# is an AROMA file, where
# included components have
# been classified as being due
# to motion, and excluded
# components unclassified.
if includeLabel is None:
if noisyComps[0] == '[': includeLabel = 'Unclassified noise'
else: includeLabel = 'Movement'
if excludeLabel is None:
if noisyComps[0] == '[': excludeLabel = 'Signal'
else: excludeLabel = 'Unknown'
else:
signalLabels = [excludeLabel]
# Remove any leading/trailing
# whitespace or brackets.
noisyComps = noisyComps.strip(' []')
noisyComps = [int(i) for i in noisyComps.split(',')]
allLabels = []
for i in range(max(noisyComps)):
if (i + 1) in noisyComps: allLabels.append([includeLabel])
else: allLabels.append([excludeLabel])
return melDir, noisyComps, allLabels, signalLabels
def _parseFullLabelFile(filename, lines, missingLabel):
"""Called by :func:`loadLabelFile`. Parses the contents of a
FIX/Melview-style label file which contains labels for each component,
e.g.:
filtered_func_data.ica
1, Signal, False
2, Unclassified Noise, True
3, Unknown, False
4, Signal, False
5, Unclassified Noise, True
6, Unclassified Noise, True
7, Unclassified Noise, True
8, Signal, False
[2, 5, 6, 7]
"""
melDir = lines[0]
noisyComps = lines[-1].strip(' []').split(',')
noisyComps = [c for c in noisyComps if c != '']
noisyComps = [int(c) for c in noisyComps]
# Parse the labels for every component.
# Initially store as a {comp : ([labels], probability)} dict.
allLabels = {}
for i, compLine in enumerate(lines[1:-1]):
tokens = compLine.split(',')
tokens = [t.strip() for t in tokens]
if len(tokens) < 3:
raise InvalidLabelFileError(
f'{filename}: Invalid FIX classification '
f'file - line: {i + 1}: {compLine}')
try:
compIdx = int(tokens[0])
if compIdx in allLabels:
raise ValueError()
except ValueError:
raise InvalidLabelFileError(
f'{filename}: Invalid FIX classification '
f'file - line {i + 1}: {compLine}')
tokens = tokens[1:]
probability = math.nan
# last token could be classification probability
if _isfloat(tokens[-1]):
probability = float(tokens[-1])
tokens = tokens[:-1]
# true/false is ignored as it is superfluous
if tokens[-1].lower() in ('true', 'false'):
tokens = tokens[:-1]
allLabels[compIdx] = tokens, probability
# Convert {comp : [labels]} into a list
# of lists, filling in missing components
allLabelsList = []
probabilities = []
for i in range(max(it.chain(allLabels.keys(), noisyComps))):
labels, prob = allLabels.get(i + 1, ([missingLabel], math.nan))
allLabelsList.append(labels)
probabilities.append(prob)
allLabels = allLabelsList
return melDir, noisyComps, allLabels, probabilities
def _isfloat(s):
"""Returns True if the given string appears to contain a floating
point number, False otherwise.
"""
try:
float(s)
return True
except Exception:
return False
def saveLabelFile(allLabels,
filename,
dirname=None,
listBad=True,
signalLabels=None):
signalLabels=None,
probabilities=None):
"""Saves the given classification labels to the specified file. The
classifications are saved in the format described in the
:func:`loadLabelFile` method.
:arg allLabels: A list of lists, one list for each component, where
each list contains the labels for the corresponding
component.
:arg allLabels: A list of lists, one list for each component, where
each list contains the labels for the corresponding
component.
:arg filename: Name of the file to which the labels should be saved.
:arg filename: Name of the file to which the labels should be saved.
:arg dirname: If provided, is output as the first line of the file.
Intended to be a relative path to the MELODIC analysis
directory with which this label file is associated. If
not provided, a ``'.'`` is output as the first line.
:arg dirname: If provided, is output as the first line of the file.
Intended to be a relative path to the MELODIC analysis
directory with which this label file is associated. If
not provided, a ``'.'`` is output as the first line.
:arg listBad: If ``True`` (the default), the last line of the file
will contain a comma separated list of components which
are deemed 'noisy' (see :func:`isNoisyComponent`).
:arg listBad: If ``True`` (the default), the last line of the file
will contain a comma separated list of components which
are deemed 'noisy' (see :func:`isNoisyComponent`).
:arg signalLabels: Labels which should be deemed 'signal' - see the
:func:`isNoisyComponent` function.
:arg signalLabels: Labels which should be deemed 'signal' - see the
:func:`isNoisyComponent` function.
:arg probabilities: Classification probabilities. If provided, the
probability for each component is saved to the file.
"""
lines = []
noisyComps = []
if probabilities is not None and len(probabilities) != len(allLabels):
raise ValueError('len(probabilities) != len(allLabels)')
# The first line - the melodic directory name
if dirname is None:
dirname = '.'
......@@ -283,6 +394,9 @@ def saveLabelFile(allLabels,
labels = [l.replace(',', '_') for l in labels]
tokens = [str(comp)] + labels + [str(noise)]
if probabilities is not None:
tokens.append(f'{probabilities[i]:0.6f}')
lines.append(', '.join(tokens))
if noise:
......@@ -318,4 +432,3 @@ class InvalidLabelFileError(Exception):
"""Exception raised by the :func:`loadLabelFile` function when an attempt
is made to load an invalid label file.
"""
pass
......@@ -67,7 +67,8 @@ CORE_GEOMETRY_FILES = ['?h.orig',
'?h.pial',
'?h.white',
'?h.inflated',
'?h.sphere']
'?h.sphere',
'?h.pial_semi_inflated']
"""File patterns for identifying the core Freesurfer geometry files. """
......@@ -76,7 +77,8 @@ CORE_GEOMETRY_DESCRIPTIONS = [
"Freesurfer surface (pial)",
"Freesurfer surface (white matter)",
"Freesurfer surface (inflated)",
"Freesurfer surface (sphere)"]
"Freesurfer surface (sphere)",
"Freesurfer surface (pial semi-inflated)"]
"""A description for each extension in :attr:`GEOMETRY_EXTENSIONS`. """
......
......@@ -25,12 +25,14 @@ are available:
import glob
import re
import os.path as op
import numpy as np
import nibabel as nib
import fsl.utils.path as fslpath
import fsl.utils.bids as bids
import fsl.data.constants as constants
import fsl.data.mesh as fslmesh
......@@ -45,6 +47,15 @@ EXTENSION_DESCRIPTIONS = ['GIFTI surface file', 'GIFTI file']
"""A description for each of the :data:`ALLOWED_EXTENSIONS`. """
VERTEX_DATA_EXTENSIONS = ['.func.gii',
'.shape.gii',
'.label.gii',
'.time.gii']
"""File suffixes which are interpreted as GIFTI vertex data files,
containing data values for every vertex in the mesh.
"""
class GiftiMesh(fslmesh.Mesh):
"""Class which represents a GIFTI surface image. This is essentially
just a 3D model made of triangles.
......@@ -90,9 +101,24 @@ class GiftiMesh(fslmesh.Mesh):
for i, v in enumerate(vertices):
if i == 0: key = infile
else: key = '{}_{}'.format(infile, i)
else: key = f'{infile}_{i}'
self.addVertices(v, key, select=(i == 0), fixWinding=fixWinding)
self.setMeta(infile, surfimg)
self.meta[infile] = surfimg
# Copy all metadata entries for the GIFTI image
for k, v in surfimg.meta.items():
self.meta[k] = v
# and also for each GIFTI data array - triangles
# are stored under "faces", and pointsets are
# stored under "vertices"/[0,1,2...] (as there may
# be multiple pointsets in a file)
self.meta['vertices'] = {}
for i, arr in enumerate(surfimg.darrays):
if arr.intent == constants.NIFTI_INTENT_POINTSET:
self.meta['vertices'][i] = dict(arr.meta)
elif arr.intent == constants.NIFTI_INTENT_TRIANGLE:
self.meta['faces'] = dict(arr.meta)
if vdata is not None:
self.addVertexData(infile, vdata)
......@@ -102,8 +128,11 @@ class GiftiMesh(fslmesh.Mesh):
# as the specfiied one.
if loadAll:
# Only attempt to auto-load sensibly
# named gifti files (i.e. *.surf.gii,
# rather than *.gii).
surfFiles = relatedFiles(infile, [ALLOWED_EXTENSIONS[0]])
nvertices = vertices[0].shape[0]
surfFiles = relatedFiles(infile, ALLOWED_EXTENSIONS)
for sfile in surfFiles:
......@@ -116,7 +145,7 @@ class GiftiMesh(fslmesh.Mesh):
continue
self.addVertices(vertices[0], sfile, select=False)
self.setMeta(sfile, surfimg)
self.meta[sfile] = surfimg
def loadVertices(self, infile, key=None, *args, **kwargs):
......@@ -138,9 +167,12 @@ class GiftiMesh(fslmesh.Mesh):
surfimg, _, vertices, _ = loadGiftiMesh(infile)
vertices = self.addVertices(vertices, key, *args, **kwargs)
for i, v in enumerate(vertices):
if i == 0: key = infile
else: key = f'{infile}_{i}'
vertices[i] = self.addVertices(v, key, *args, **kwargs)
self.setMeta(infile, surfimg)
self.meta[infile] = surfimg
return vertices
......@@ -204,15 +236,15 @@ def loadGiftiMesh(filename):
vdata = [d for d in gimg.darrays if d.intent not in (pscode, tricode)]
if len(triangles) != 1:
raise ValueError('{}: GIFTI surface files must contain '
'exactly one triangle array'.format(filename))
raise ValueError(f'{filename}: GIFTI surface files must '
'contain exactly one triangle array')
if len(pointsets) == 0:
raise ValueError('{}: GIFTI surface files must contain '
'at least one pointset array'.format(filename))
raise ValueError(f'{filename}: GIFTI surface files must '
'contain at least one pointset array')
vertices = [ps.data for ps in pointsets]
indices = triangles[0].data
indices = np.atleast_2d(triangles[0].data)
if len(vdata) == 0: vdata = None
else: vdata = prepareGiftiVertexData(vdata, filename)
......@@ -256,17 +288,17 @@ def prepareGiftiVertexData(darrays, filename=None):
vertices.
"""
intents = set([d.intent for d in darrays])
intents = {d.intent for d in darrays}
if len(intents) != 1:
raise ValueError('{} contains multiple (or no) intents'
': {}'.format(filename, intents))
raise ValueError(f'{filename} contains multiple '
f'(or no) intents: {intents}')
intent = intents.pop()
if intent in (constants.NIFTI_INTENT_POINTSET,
constants.NIFTI_INTENT_TRIANGLE):
raise ValueError('{} contains surface data'.format(filename))
raise ValueError(f'{filename} contains surface data')
# Just a single array - return it as-is.
# n.b. Storing (M, N) data in a single
......@@ -281,8 +313,8 @@ def prepareGiftiVertexData(darrays, filename=None):
vdata = [d.data for d in darrays]
if any([len(d.shape) != 1 for d in vdata]):
raise ValueError('{} contains one or more non-vector '
'darrays'.format(filename))
raise ValueError(f'{filename} contains one or '
'more non-vector darrays')
vdata = np.vstack(vdata).T
vdata = vdata.reshape(vdata.shape[0], -1)
......@@ -295,45 +327,110 @@ def relatedFiles(fname, ftypes=None):
directory which appear to be related with the given one. Files which
share the same prefix are assumed to be related to the given file.
This function assumes that the GIFTI files are named according to a
standard convention - the following conventions are supported:
- HCP-style, i.e.: ``<subject>.<hemi>.<type>.<space>.<ftype>.gii``
- BIDS-style, i.e.:
``<source_prefix>_hemi-<hemi>[_space-<space>]*_<suffix>.<ftype>.gii``
If the files are not named according to one of these conventions, this
function will return an empty list.
:arg fname: Name of the file to search for related files for
:arg ftype: If provided, only files with suffixes in this list are
searched for. Defaults to files which contain vertex data.
searched for. Defaults to :attr:`VERTEX_DATA_EXTENSIONS`.
"""
if ftypes is None:
ftypes = ['.func.gii', '.shape.gii', '.label.gii', '.time.gii']
ftypes = VERTEX_DATA_EXTENSIONS
# We want to return all files in the same
# directory which have the following name:
path = op.abspath(fname)
dirname, fname = op.split(path)
# We want to identify all files in the same
# directory which are associated with the
# given file. We assume that the files are
# named according to one of the following
# conventions:
#
# [subj].[hemi].[type].*.[ftype]
# - HCP style:
# <subject>.<hemi>.<type>.<space>.<ftype>.gii
#
# where
# - [subj] is the subject ID, and matches fname
# - BIDS style:
# <source_prefix>_hemi-<hemi>[_space-<space>]*.<ftype>.gii
#
# - [hemi] is the hemisphere, and matches fname
# We cannot assume consistent ordering of
# the entities (key-value pairs) within a
# BIDS style filename, so we cannot simply
# use a regular expression or glob pattern.
# Instead, for each style we define:
#
# - [type] defines the file contents
# - a "matcher" function, which tests
# whether the file matches the style,
# and returns the important elements
# from the file name.
#
# - suffix is func, shape, label, time, or `ftype`
path = op.abspath(fname)
dirname, fname = op.split(path)
# get the [subj].[hemi] prefix
try:
subj, hemi, _ = fname.split('.', 2)
prefix = '.'.join((subj, hemi))
except Exception:
# - a "searcher" function, which takes
# the elements of the input file
# that were extracted by the matcher,
# and searches for other related files
# HCP style - extract "<subject>.<hemi>"
# and "<space>".
def matchhcp(f):
pat = r'^(.*\.[LR])\..*\.(.*)\..*\.gii$'
match = re.match(pat, f)
if match:
return match.groups()
else:
return None
def searchhcp(match, ftype):
prefix, space = match
template = f'{prefix}.*.{space}{ftype}'
return glob.glob(op.join(dirname, template))
# BIDS style - extract all entities (kv
# pairs), ignoring specific irrelevant
# ones.
def matchbids(f):
try: match = bids.BIDSFile(f)
except ValueError: return None
match.entities.pop('desc', None)
return match
def searchbids(match, ftype):
allfiles = glob.glob(op.join(dirname, '*{}'.format(ftype)))
for f in allfiles:
try: bf = bids.BIDSFile(f)
except ValueError: continue
if bf.match(match, False):
yield f
# find the first style that matches
matchers = [matchhcp, matchbids]
searchers = [searchhcp, searchbids]
for matcher, searcher in zip(matchers, searchers):
match = matcher(fname)
if match:
break
# Give up if the file does
# not match any known style.
else:
return []
# Build a list of files in the same
# directory and matching the template
related = []
for ftype in ftypes:
hits = glob.glob(op.join(dirname, '{}*{}'.format(prefix, ftype)))
hits = searcher(match, ftype)
# eliminate dupes
related.extend([h for h in hits if h not in related])
# exclude the file itself
return [r for r in related if r != path]
......@@ -32,28 +32,37 @@ and file names:
"""
import os
import os.path as op
import string
import logging
import tempfile
import warnings
import six
import numpy as np
import scipy.ndimage as ndimage
import os
import os.path as op
import itertools as it
import collections.abc as abc
import enum
import json
import string
import logging
import tempfile
from pathlib import Path
from typing import Union
import numpy as np
import numpy.linalg as npla
import nibabel as nib
import nibabel.fileslice as fileslice
import fsl.utils.meta as meta
import fsl.utils.transform as transform
import fsl.utils.deprecated as deprecated
import fsl.transform.affine as affine
import fsl.utils.notifier as notifier
import fsl.utils.naninfrange as nir
import fsl.utils.memoize as memoize
import fsl.utils.path as fslpath
import fsl.utils.deprecated as deprecated
import fsl.utils.bids as fslbids
import fsl.data.constants as constants
import fsl.data.imagewrapper as imagewrapper
PathLike = Union[str, Path]
ImageSource = Union[PathLike, nib.Nifti1Image, np.ndarray, 'Image']
log = logging.getLogger(__name__)
......@@ -61,14 +70,14 @@ log = logging.getLogger(__name__)
ALLOWED_EXTENSIONS = ['.nii.gz', '.nii', '.img', '.hdr', '.img.gz', '.hdr.gz']
"""The file extensions which we understand. This list is used as the default
if the ``allowedExts`` parameter is not passed to any of the functions
below.
if the ``allowedExts`` parameter is not passed to any of the ``*Ext``
functions, or the :func:`looksLikeImage` function.
"""
EXTENSION_DESCRIPTIONS = ['Compressed NIFTI images',
'NIFTI images',
'ANALYZE75 images',
'NIFTI/ANALYZE75 images',
'NIFTI/ANALYZE75 headers',
'Compressed NIFTI/ANALYZE75 images',
'Compressed NIFTI/ANALYZE75 headers']
......@@ -88,6 +97,47 @@ Made available in this module for convenience.
"""
class DataManager:
"""The ``DataManager`` defines an interface which may be used by
:class:`Image` instances for managing access and modification of
data in a ``nibabel.Nifti1Image`` image.
"""
def copy(self, nibImage : nib.Nifti1Image):
"""Return a copy of this ``DataManager``, associated with the
given ``nibImage``,
"""
raise NotImplementedError()
@property
def dataRange(self):
"""Return the image minimum/maximum data values as a ``(min, max)``
tuple.
"""
raise NotImplementedError()
@property
def editable(self):
"""Return ``True`` if the image data can be modified, ``False``
otherwise. The default implementation returns ``True``.
"""
return True
def __getitem__(self, slc):
"""Return data at ``slc``. """
raise NotImplementedError()
def __setitem__(self, slc, val):
"""Set data at ``slc`` to ``val``. """
raise NotImplementedError()
class Nifti(notifier.Notifier, meta.Meta):
"""The ``Nifti`` class is intended to be used as a base class for
things which either are, or are associated with, a NIFTI image.
......@@ -107,6 +157,9 @@ class Nifti(notifier.Notifier, meta.Meta):
``shape`` A list/tuple containing the number of voxels along
each image dimension.
``realShape`` A list/tuple containing the actual image data shape
- see notes below.
``pixdim`` A list/tuple containing the length of one voxel
along each image dimension.
......@@ -130,19 +183,51 @@ class Nifti(notifier.Notifier, meta.Meta):
property if you need to know what type of image you are dealing with.
The ``shape`` attribute may not precisely match the image shape as
reported in the NIFTI header, because trailing dimensions of size 1 are
squeezed out. See the :meth:`__determineShape` and :meth:`mapIndices`
methods.
**Image dimensionality**
By default, the ``Nifti`` and ``Image`` classes "normalise" the
dimensionality of an image to always have at least 3 dimensions, and so
that trailing dimensions of length 1 are removed. Therefore, the
``shape`` attribute may not precisely match the image shape as reported in
the NIFTI header, because trailing dimensions of size 1 are squeezed
out. The actual image data shape can be queried via the :meth:`realShape`
property. Note also that the :class:`Image` class expects data
access/slicing to be with respect to the normalised shape, not the real
shape. See the :meth:`__determineShape` method and the
:func:`canonicalSliceObj` function for more details.
**Affine transformations**
The ``Nifti`` class is aware of three coordinate systems:
- The ``voxel`` coordinate system, used to access image data
**The affine transformation**
- The ``world`` coordinate system, where voxel coordinates are
transformed into a millimetre coordinate system, defined by the
``sform`` and/or ``qform`` elements of the NIFTI header.
- The ``fsl`` coordinate system, where voxel coordinates are scaled by
the ``pixdim`` values in the NIFTI header, and the X axis is inverted
if the voxel-to-world affine has a positive determinant. The
coordinates ``(0, 0, 0)`` correspond to the corner of voxel
``(0, 0, 0)``.
The :meth:`voxToWorldMat` and :meth:`worldToVoxMat` attributes contain
transformation matrices for transforming between voxel and world
coordinates. The ``Nifti`` class follows the same process as ``nibabel``
in selecting the affine (see
- The ``scaled`` coordinate system, where voxel coordinates are scaled by
the ``pixdim`` values in the NIFTI header.
The :meth:`getAffine` method is a simple means of acquiring an affine
which will transform between any of these coordinate systems.
See `here <http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FLIRT/FAQ#What_is_the_format_of_the_matrix_used_by_FLIRT.2C_and_how_does_it_relate_to_the_transformation_parameters.3F>`_
for more details on the ``fsl`` coordinate system.
The ``Nifti`` class follows the same process as ``nibabel`` in determining
the ``voxel`` to ``world`` affine (see
http://nipy.org/nibabel/nifti_images.html#the-nifti-affines):
......@@ -175,7 +260,7 @@ class Nifti(notifier.Notifier, meta.Meta):
A ``Nifti`` instance expects to be passed either a
``nibabel.nifti1.Nifti1Header`` or a ``nibabel.nifti2.Nifti2Header``, but
can als encapsulate a ``nibabel.analyze.AnalyzeHeader``. In this case:
can also encapsulate a ``nibabel.analyze.AnalyzeHeader``. In this case:
- The image voxel orientation is assumed to be R->L, P->A, I->S.
......@@ -205,18 +290,20 @@ class Nifti(notifier.Notifier, meta.Meta):
=============== ========================================================
``'transform'`` The affine transformation matrix has changed. This topic
will occur when the ``voxToWorldMat`` is changed.
will occur when the :meth:`voxToWorldMat` is changed.
``'header'`` A header field has changed. This will occur when the
:meth:`intent` is changed.
=============== ========================================================
"""
""" # noqa
def __init__(self, header):
"""Create a ``Nifti`` object.
:arg header: A :class:`nibabel.nifti1.Nifti1Header`,
:class:`nibabel.nifti2.Nifti2Header`, or
``nibabel.analyze.AnalyzeHeader`` to be used as the
image header.
:class:`nibabel.nifti2.Nifti2Header`, or
``nibabel.analyze.AnalyzeHeader`` to be used as the
image header.
"""
# Nifti2Header is a sub-class of Nifti1Header,
......@@ -225,23 +312,64 @@ class Nifti(notifier.Notifier, meta.Meta):
if not isinstance(header, nib.analyze.AnalyzeHeader):
raise ValueError('Unrecognised header: {}'.format(header))
header = header
origShape, shape, pixdim = self.__determineShape(header)
origShape, shape, pixdim = Nifti.determineShape(header)
voxToWorldMat = Nifti.determineAffine(header)
affines, isneuro = Nifti.generateAffines(voxToWorldMat,
shape,
pixdim)
voxToWorldMat = self.__determineTransform(header)
worldToVoxMat = transform.invert(voxToWorldMat)
self.__header = header
self.__shape = shape
self.__origShape = origShape
self.__pixdim = pixdim
self.__affines = affines
self.__isNeurological = isneuro
self.header = header
self.__shape = shape
self.__intent = header.get('intent_code',
constants.NIFTI_INTENT_NONE)
self.__origShape = origShape
self.__pixdim = pixdim
self.__voxToWorldMat = voxToWorldMat
self.__worldToVoxMat = worldToVoxMat
def __del__(self):
"""Clears the reference to the ``nibabel`` header object. """
self.__header = None
def __determineTransform(self, header):
@staticmethod
def determineShape(header):
"""This method is called by :meth:`__init__`. It figures out the actual
shape of the image data, and the zooms/pixdims for each data axis. Any
empty trailing dimensions are squeezed, but the returned shape is
guaranteed to be at least 3 dimensions. Returns:
- A sequence/tuple containing the image shape, as reported in the
header.
- A sequence/tuple containing the effective image shape.
- A sequence/tuple containing the zooms/pixdims.
"""
# The canonicalShape function figures out
# the data shape that we should use.
origShape = list(header.get_data_shape())
shape = canonicalShape(origShape)
pixdims = list(header.get_zooms())
# if get_zooms() doesn't return at
# least len(shape) values, we'll
# fall back to the pixdim field in
# the header.
if len(pixdims) < len(shape):
pixdims = header['pixdim'][1:]
pixdims = pixdims[:len(shape)]
# should never happen, but if we only
# have zoom values for the original
# (< 3D) shape, pad them with 1s.
if len(pixdims) < len(shape):
pixdims = pixdims + [1] * (len(shape) - len(pixdims))
return origShape, shape, pixdims
@staticmethod
def determineAffine(header):
"""Called by :meth:`__init__`. Figures out the voxel-to-world
coordinate transformation matrix that is associated with this
``Nifti`` instance.
......@@ -251,33 +379,40 @@ class Nifti(notifier.Notifier, meta.Meta):
# specially, as FNIRT clobbers the
# sform section of the NIFTI header
# to store other data.
#
# TODO Nibabel <= 2.1.0 has a bug in header.get
# for fields with a value of 0. When this
# bug gets fixed, we can replace the if-else
# block below with this:
#
# intent = header.get('intent_code', -1)
# qform = header.get('qform_code', -1)
# sform = header.get('sform_code', -1)
#
# TODO Change this in fslpy 2.0.0
#
if isinstance(header, nib.nifti1.Nifti1Header):
intent = header['intent_code']
qform = header['qform_code']
sform = header['sform_code']
else:
intent = -1
qform = -1
sform = -1
if intent in (constants.FSL_FNIRT_DISPLACEMENT_FIELD,
intent = header.get('intent_code', -1)
qform = header.get('qform_code', -1)
sform = header.get('sform_code', -1)
# FNIRT non-linear coefficient files
# clobber the sform/qform/intent
# and pixdims of the nifti header,
# so we can't correctly place it in
# the world coordinate system. See
# $FSLDIR/src/fnirt/fnirt_file_writer.cpp
# and fsl.transform.nonlinear for more
# details.
if intent in (constants.FSL_DCT_COEFFICIENTS,
constants.FSL_CUBIC_SPLINE_COEFFICIENTS,
constants.FSL_DCT_COEFFICIENTS,
constants.FSL_QUADRATIC_SPLINE_COEFFICIENTS):
log.debug('FNIRT output image detected - using qform matrix')
voxToWorldMat = np.array(header.get_qform())
constants.FSL_QUADRATIC_SPLINE_COEFFICIENTS,
constants.FSL_TOPUP_CUBIC_SPLINE_COEFFICIENTS,
constants.FSL_TOPUP_QUADRATIC_SPLINE_COEFFICIENTS):
log.debug('FNIRT coefficient field detected - generating affine')
# Knot spacing is stored in the pixdims
# (specified in terms of reference image
# voxels), and reference image pixdims
# are stored as intent code parameters.
# If we combine the two, we can at least
# get the shape/size of the coefficient
# field about right
knotpix = header.get_zooms()[:3]
refpix = (header.get('intent_p1', 1) or 1,
header.get('intent_p2', 1) or 1,
header.get('intent_p3', 1) or 1)
voxToWorldMat = affine.concat(
affine.scaleOffsetXform(refpix, 0),
affine.scaleOffsetXform(knotpix, 0))
# If the qform or sform codes are unknown,
# then we can't assume that the transform
......@@ -291,7 +426,7 @@ class Nifti(notifier.Notifier, meta.Meta):
# should just be a straight scaling matrix.
elif qform == 0 and sform == 0:
pixdims = header.get_zooms()
voxToWorldMat = transform.scaleOffsetXform(pixdims, 0)
voxToWorldMat = affine.scaleOffsetXform(pixdims, 0)
# Otherwise we let nibabel decide
# which transform to use.
......@@ -301,40 +436,119 @@ class Nifti(notifier.Notifier, meta.Meta):
return voxToWorldMat
def __determineShape(self, header):
"""This method is called by :meth:`__init__`. It figures out the actual
shape of the image data, and the zooms/pixdims for each data axis. Any
empty trailing dimensions are squeezed, but the returned shape is
guaranteed to be at least 3 dimensions. Returns:
@staticmethod
def generateAffines(voxToWorldMat, shape, pixdim):
"""Called by :meth:`__init__`, and the :meth:`voxToWorldMat` setter.
Generates and returns a dictionary containing affine transformations
between the ``voxel``, ``fsl``, ``scaled``, and ``world`` coordinate
systems. These affines are accessible via the :meth:`getAffine`
method.
:arg voxToWorldMat: The voxel-to-world affine transformation
:arg shape: Image shape (number of voxels along each dimension
:arg pixdim: Image pixdims (size of one voxel along each
dimension)
:returns: A tuple containing:
- a dictionary of affine transformations between
each pair of coordinate systems
- ``True`` if the image is to be considered
"neurological", ``False`` otherwise - see the
:meth:`isNeurological` method.
"""
- A sequence/tuple containing the image shape, as reported in the
header.
- A sequence/tuple containing the effective image shape.
- A sequence/tuple containing the zooms/pixdims.
affines = {}
shape = list(shape[ :3])
pixdim = list(pixdim[:3])
voxToScaledMat = np.diag(pixdim + [1.0])
voxToFSLMat = np.array(voxToScaledMat)
isneuro = npla.det(voxToWorldMat) > 0
if isneuro:
x = (shape[0] - 1) * pixdim[0]
flip = affine.scaleOffsetXform([-1, 1, 1],
[ x, 0, 0])
voxToFSLMat = affine.concat(flip, voxToFSLMat)
affines['voxel', 'voxel'] = np.eye(4)
affines['voxel', 'scaled'] = voxToScaledMat
affines['voxel', 'fsl'] = voxToFSLMat
affines['voxel', 'world'] = voxToWorldMat
affines['scaled', 'scaled'] = np.eye(4)
affines['scaled', 'voxel'] = affine.invert(voxToScaledMat)
affines['scaled', 'fsl'] = affine.concat(affines['voxel', 'fsl'],
affines['scaled', 'voxel'])
affines['scaled', 'world'] = affine.concat(affines['voxel', 'world'],
affines['scaled', 'voxel'])
affines['fsl', 'fsl'] = np.eye(4)
affines['fsl', 'voxel'] = affine.invert(voxToFSLMat)
affines['fsl', 'scaled'] = affine.invert(affines['scaled', 'fsl'])
affines['fsl', 'world'] = affine.concat(affines['voxel', 'world'],
affines['fsl', 'voxel'])
affines['world', 'world'] = np.eye(4)
affines['world', 'voxel'] = affine.invert(voxToWorldMat)
affines['world', 'scaled'] = affine.concat(affines['voxel', 'scaled'],
affines['world', 'voxel'])
affines['world', 'fsl'] = affine.concat(affines['voxel', 'fsl'],
affines['world', 'voxel'])
return affines, isneuro
@staticmethod
def identifyAffine(image, xform, from_=None, to=None):
"""Attempt to identify the source or destination space for the given
affine.
``xform`` is assumed to be an affine transformation which can be used
to transform coordinates between two coordinate systems associated with
``image``.
If one of ``from_`` or ``to`` is provided, the other will be derived.
If neither are provided, both will be derived. See the
:meth:`.Nifti.getAffine` method for details on the valild values that
``from_`` and ``to`` may take.
:arg image: :class:`.Nifti` instance associated with the affine.
:arg xform: ``(4, 4)`` ``numpy`` array encoding an affine
transformation
:arg from_: Label specifying the coordinate system which ``xform``
takes as input
:arg to: Label specifying the coordinate system which ``xform``
produces as output
:returns: A tuple containing:
- A label for the ``from_`` coordinate system
- A label for the ``to`` coordinate system
"""
# The canonicalShape function figures out
# the data shape that we should use.
origShape = list(header.get_data_shape())
shape = canonicalShape(origShape)
pixdims = list(header.get_zooms())
if (from_ is not None) and (to is not None):
return from_, to
# if get_zooms() doesn't return at
# least len(shape) values, we'll
# fall back to the pixdim field in
# the header.
if len(pixdims) < len(shape):
pixdims = header['pixdim'][1:]
if from_ is not None: froms = [from_]
else: froms = ['voxel', 'scaled', 'fsl', 'world']
if to is not None: tos = [to]
else: tos = ['voxel', 'scaled', 'fsl', 'world']
pixdims = pixdims[:len(shape)]
for from_, to in it.product(froms, tos):
return origShape, shape, pixdims
candidate = image.getAffine(from_, to)
if np.all(np.isclose(candidate, xform)):
return from_, to
raise ValueError('Could not identify affine')
def strval(self, key):
"""Returns the specified NIFTI header field, converted to a python
string, correctly null-terminated, and with non-printable characters
removed.
string, with non-printable characters removed.
This method is used to sanitise some NIFTI header fields. The default
Python behaviour for converting a sequence of bytes to a string is to
......@@ -353,9 +567,28 @@ class Nifti(notifier.Notifier, meta.Meta):
try: val = bytes(val).partition(b'\0')[0]
except Exception: val = bytes(val)
val = val.decode('ascii')
val = [chr(c) for c in val]
val = ''.join(c for c in val if c in string.printable).strip()
return ''.join([c for c in val if c in string.printable]).strip()
return val
@property
def header(self):
"""Return a reference to the ``nibabel`` header object. """
return self.__header
@header.setter
def header(self, header):
"""Replace the ``nibabel`` header object managed by this ``Nifti``
with a new header. The new header must have the same dimensions,
voxel size, and orientation as the old one.
"""
new = Nifti(header)
if not (self.sameSpace(new) and self.ndim == new.ndim):
raise ValueError('Incompatible header')
self.__header = header
@property
......@@ -374,15 +607,36 @@ class Nifti(notifier.Notifier, meta.Meta):
elif isinstance(self.header, nib.nifti1.Nifti1Header): return 1
elif isinstance(self.header, nib.analyze.AnalyzeHeader): return 0
else: raise RuntimeError('Unrecognised header: {}'.format(self.header))
else: raise RuntimeError(f'Unrecognised header: {self.header}')
@property
def shape(self):
"""Returns a tuple containing the image data shape. """
"""Returns a tuple containing the normalised image data shape. The
image shape is at least three dimensions, and trailing dimensions of
length 1 are squeezed out.
"""
return tuple(self.__shape)
@property
def realShape(self):
"""Returns a tuple containing the image data shape, as reported in
the NIfTI image header.
"""
return tuple(self.__origShape)
@property
def ndim(self):
"""Returns the number of dimensions in this image. This number may not
match the number of dimensions specified in the NIFTI header, as
trailing dimensions of length 1 are ignored. But it is guaranteed to be
at least 3.
"""
return len(self.__shape)
@property
def pixdim(self):
"""Returns a tuple containing the image pixdims (voxel sizes)."""
......@@ -391,9 +645,58 @@ class Nifti(notifier.Notifier, meta.Meta):
@property
def intent(self):
"""Returns the NIFTI intent code of this image.
"""Returns the NIFTI intent code of this image. """
return self.header.get('intent_code', constants.NIFTI_INTENT_NONE)
@property
def niftiDataType(self):
"""Returns the NIFTI data type code of this image. """
dt = self.header.get('datatype', constants.NIFTI_DT_UNKNOWN)
return int(dt)
@property
def niftiDataTypeSize(self):
"""Returns the number of bits per voxel, according to the NIfTI
data type. Returns ``None`` if the data type is not recognised.
"""
return self.__intent
sizes = {
constants.NIFTI_DT_BINARY : 1,
constants.NIFTI_DT_UNSIGNED_CHAR : 8,
constants.NIFTI_DT_SIGNED_SHORT : 16,
constants.NIFTI_DT_SIGNED_INT : 32,
constants.NIFTI_DT_FLOAT : 32,
constants.NIFTI_DT_COMPLEX : 64,
constants.NIFTI_DT_DOUBLE : 64,
constants.NIFTI_DT_RGB : 24,
constants.NIFTI_DT_UINT8 : 8,
constants.NIFTI_DT_INT16 : 16,
constants.NIFTI_DT_INT32 : 32,
constants.NIFTI_DT_FLOAT32 : 32,
constants.NIFTI_DT_COMPLEX64 : 64,
constants.NIFTI_DT_FLOAT64 : 64,
constants.NIFTI_DT_RGB24 : 24,
constants.NIFTI_DT_INT8 : 8,
constants.NIFTI_DT_UINT16 : 16,
constants.NIFTI_DT_UINT32 : 32,
constants.NIFTI_DT_INT64 : 64,
constants.NIFTI_DT_UINT64 : 64,
constants.NIFTI_DT_FLOAT128 : 128,
constants.NIFTI_DT_COMPLEX128 : 128,
constants.NIFTI_DT_COMPLEX256 : 256,
constants.NIFTI_DT_RGBA32 : 32}
return sizes.get(self.niftiDataType, None)
@intent.setter
def intent(self, val):
"""Sets the NIFTI intent code of this image. """
# analyze has no intent
if (self.niftiVersion > 0) and (val != self.intent):
self.header.set_intent(val, allow_unknown=True)
self.notify(topic='header')
@property
......@@ -427,12 +730,42 @@ class Nifti(notifier.Notifier, meta.Meta):
return units
def getAffine(self, from_, to):
"""Return an affine transformation which can be used to transform
coordinates from ``from_`` to ``to``.
Valid values for the ``from_`` and ``to`` arguments are:
- ``'voxel'``: The voxel coordinate system
- ``'world'``: The world coordinate system, as defined by the image
sform/qform
- ``'fsl'``: The FSL coordinate system (scaled voxels, with a
left-right flip if the sform/qform has a positive determinant)
- ``'scaled'``: Scaled voxel coordinate system (equivalent to
``'fsl'`` without the flip).
:arg from_: Source coordinate system
:arg to: Destination coordinate system
:returns: A ``numpy`` array of shape ``(4, 4)``
"""
from_ = from_.lower()
to = to .lower()
if from_ not in ('voxel', 'scaled', 'fsl', 'world') or \
to not in ('voxel', 'scaled', 'fsl', 'world'):
raise ValueError('Invalid source/reference spaces: "{}" -> "{}".'
'Recognised spaces are "voxel", "fsl", "scaled", '
'and "world"'.format(from_, to))
return np.copy(self.__affines[from_, to])
@property
def worldToVoxMat(self):
"""Returns a ``numpy`` array of shape ``(4, 4)`` containing an
affine transformation from world coordinates to voxel coordinates.
"""
return np.array(self.__worldToVoxMat)
return self.getAffine('world', 'voxel')
@property
......@@ -440,7 +773,7 @@ class Nifti(notifier.Notifier, meta.Meta):
"""Returns a ``numpy`` array of shape ``(4, 4)`` containing an
affine transformation from voxel coordinates to world coordinates.
"""
return np.array(self.__voxToWorldMat)
return self.getAffine('voxel', 'world')
@voxToWorldMat.setter
......@@ -464,8 +797,12 @@ class Nifti(notifier.Notifier, meta.Meta):
header.set_sform(xform, code=sformCode)
self.__voxToWorldMat = self.__determineTransform(header)
self.__worldToVoxMat = transform.invert(self.__voxToWorldMat)
affines, isneuro = Nifti.generateAffines(xform,
self.shape,
self.pixdim)
self.__affines = affines
self.__isNeurological = isneuro
log.debug('Affine changed:\npixdims: '
'%s\nsform: %s\nqform: %s',
......@@ -476,29 +813,34 @@ class Nifti(notifier.Notifier, meta.Meta):
self.notify(topic='transform')
def mapIndices(self, sliceobj):
"""Adjusts the given slice object so that it may be used to index the
underlying ``nibabel`` NIFTI image object.
See the :meth:`__determineShape` method.
@property
@deprecated.deprecated('3.22.0', '4.0.0', 'Use getAffine instead')
def voxToScaledVoxMat(self):
"""Returns a transformation matrix which transforms from ``voxel``
coordinates into ``fsl`` coordinates, with a left-right flip
if the image appears to be stored in neurological order.
:arg sliceobj: Something that can be used to slice a
multi-dimensional array, e.g. ``arr[sliceobj]``.
See http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FLIRT/FAQ#What_is_the\
_format_of_the_matrix_used_by_FLIRT.2C_and_how_does_it_relate_to\
_the_transformation_parameters.3F
"""
# How convenient - nibabel has a function
# that does the dirty work for us.
return fileslice.canonical_slicers(sliceobj, self.__origShape)
return self.getAffine('voxel', 'fsl')
@property
def ndim(self):
"""Returns the number of dimensions in this image. This number may not
match the number of dimensions specified in the NIFTI header, as
trailing dimensions of length 1 are ignored. But it is guaranteed to be
at least 3.
@deprecated.deprecated('3.22.0', '4.0.0', 'Use getAffine instead')
def scaledVoxToVoxMat(self):
"""Returns a transformation matrix which transforms from ``fsl``
coordinates into ``voxel`` coordinates, the inverse of the
:meth:`voxToScaledVoxMat` transform.
"""
return len(self.__shape)
return self.getAffine('fsl', 'voxel')
@deprecated.deprecated('3.9.0', '4.0.0', 'Use canonicalSliceObj instead')
def mapIndices(self, sliceobj):
"""Deprecated - use :func:`canonicalSliceObj` instead. """
return fileslice.canonical_slicers(sliceobj, self.__origShape)
def getXFormCode(self, code=None):
......@@ -514,6 +856,7 @@ class Nifti(notifier.Notifier, meta.Meta):
- :data:`~.constants.NIFTI_XFORM_ALIGNED_ANAT`
- :data:`~.constants.NIFTI_XFORM_TALAIRACH`
- :data:`~.constants.NIFTI_XFORM_MNI_152`
- :data:`~.constants.NIFTI_XFORM_TEMPLATE_OTHER`
- :data:`~.constants.NIFTI_XFORM_ANALYZE`
"""
......@@ -541,12 +884,14 @@ class Nifti(notifier.Notifier, meta.Meta):
if sform_code != constants.NIFTI_XFORM_UNKNOWN: code = sform_code
elif qform_code != constants.NIFTI_XFORM_UNKNOWN: code = qform_code
# Invalid values
if code not in range(5):
# Invalid code (the maxmimum NIFTI_XFORM_*
# code value is 5 at present)
if code not in range(6):
code = constants.NIFTI_XFORM_UNKNOWN
return int(code)
# TODO Check what has worse performance - hashing
# a 4x4 array (via memoizeMD5), or the call
# to aff2axcodes. I'm guessing the latter,
......@@ -564,7 +909,6 @@ class Nifti(notifier.Notifier, meta.Meta):
return nib.orientations.aff2axcodes(xform, inaxes)
@memoize.Instanceify(memoize.memoize)
def isNeurological(self):
"""Returns ``True`` if it looks like this ``Nifti`` object has a
neurological voxel orientation, ``False`` otherwise. This test is
......@@ -583,51 +927,7 @@ class Nifti(notifier.Notifier, meta.Meta):
_format_of_the_matrix_used_by_FLIRT.2C_and_how_does_it_relate_to\
_the_transformation_parameters.3F
"""
import numpy.linalg as npla
return npla.det(self.__voxToWorldMat) > 0
@property
def voxToScaledVoxMat(self):
"""Returns a transformation matrix which transforms from voxel
coordinates into scaled voxel coordinates, with a left-right flip
if the image appears to be stored in neurological order.
See http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FLIRT/FAQ#What_is_the\
_format_of_the_matrix_used_by_FLIRT.2C_and_how_does_it_relate_to\
_the_transformation_parameters.3F
"""
return self.__voxToScaledVoxMat()
@memoize.Instanceify(memoize.memoize)
def __voxToScaledVoxMat(self):
"""See :meth:`voxToScaledVoxMat`. """
shape = list(self.shape[ :3])
pixdim = list(self.pixdim[:3])
voxToPixdimMat = np.diag(pixdim + [1.0])
if self.isNeurological():
x = (shape[0] - 1) * pixdim[0]
flip = transform.scaleOffsetXform([-1, 1, 1], [x, 0, 0])
voxToPixdimMat = transform.concat(flip, voxToPixdimMat)
return voxToPixdimMat
@property
def scaledVoxToVoxMat(self):
"""Returns a transformation matrix which transforms from scaled voxels
into voxels, the inverse of the :meth:`voxToScaledVoxMat` transform.
"""
return self.__scaledVoxToVoxMat()
@memoize.Instanceify(memoize.memoize)
def __scaledVoxToVoxMat(self):
"""See :meth:`scaledVoxToVoxMat`. """
return transform.invert(self.voxToScaledVoxMat)
return self.__isNeurological
def sameSpace(self, other):
......@@ -689,11 +989,79 @@ class Nifti(notifier.Notifier, meta.Meta):
return code
def adjust(self, pixdim=None, shape=None, origin=None):
"""Return a new ``Nifti`` object with the specified ``pixdim`` or
``shape``. The affine of the new ``Nifti`` is adjusted accordingly.
Only one of ``pixdim`` or ``shape`` can be specified.
See :func:`.affine.rescale` for the meaning of the ``origin`` argument.
Only the spatial dimensions may be adjusted - use the functions in
the :mod:`.image.resample` module if you need to adjust non-spatial
dimensions.
:arg pixdim: New voxel dimensions
:arg shape: New image shape
:arg origin: Voxel grid alignment - either ``'centre'`` (the default)
or ``'corner'``
:returns: A new ``Nifti`` object based on this one, with adjusted
pixdims, shape and affine.
"""
if ((pixdim is not None) and (shape is not None)) or \
((pixdim is None) and (shape is None)):
raise ValueError('Exactly one of pixdim or '
'shape must be specified')
if shape is not None: ndim = len(shape)
else: ndim = len(pixdim)
# We only allow adjustment of
# the spatial dimensions
if ndim != 3:
raise ValueError('Three dimensions must be specified')
oldShape = np.array(self.shape[ :ndim])
oldPixdim = np.array(self.pixdim[:ndim])
newShape = shape
newPixdim = pixdim
# if pixdims were specified,
# convert them into a shape,
# and vice versa
if newPixdim is not None:
newShape = oldShape * (oldPixdim / newPixdim)
else:
newPixdim = oldPixdim * (oldShape / newShape)
# Rescale the voxel-to-world affine
xform = affine.rescale(oldShape, newShape, origin)
xform = affine.concat(self.getAffine('voxel', 'world'), xform)
# Now that we've got our spatial
# scaling/offset matrix, pad the
# new shape/pixdims with those
# from any non-spatial dimensions
newShape = list(newShape) + list(self.shape[ 3:])
newPixdim = list(newPixdim) + list(self.pixdim[3:])
# And create the new header
# and we're away
header = self.header.copy()
header.set_data_shape(newShape)
header.set_zooms( newPixdim)
header.set_sform( xform)
header.set_qform( xform)
return Nifti(header)
class Image(Nifti):
"""Class which represents a NIFTI image. Internally, the image is
loaded/stored using a :mod:`nibabel.nifti1.Nifti1Image` or
:mod:`nibabel.nifti2.Nifti2Image`, and data access managed by a
:class:`.ImageWrapper`.
:mod:`nibabel.nifti2.Nifti2Image`. This class adds functionality for
loading metadata from JSON sidecar files, and for keeping track of
modifications to the image data.
In addition to the attributes added by the :meth:`Nifti.__init__` method,
......@@ -709,20 +1077,78 @@ class Image(Nifti):
file from where it was loaded, or some other string
describing its origin.
``dataRange`` The ``(min, max)`` image data values.
``nibImage`` A reference to the ``nibabel`` NIFTI image object.
``saveState`` A boolean value which is ``True`` if this image is
saved to disk, ``False`` if it is in-memory, or has
been edited.
``dataRange`` The minimum/maximum values in the image. Depending upon
the value of the ``calcRange`` parameter to
:meth:`__init__`, this may be calculated when the ``Image``
is created, or may be incrementally updated as more image
data is loaded from disk.
============== ===========================================================
*Data access*
The ``Image`` class supports access to and assignment of the image data
via the ``[]`` slice operator, e.g.::
img = Image('image.nii.gz')
val = img[20, 30, 25]
img[30, 40, 20] = 999
Internally, the image data is managed using one of the following methods:
1. For read-only access, the ``Image`` class delegates to the
underlying ``nibabel.Nifti1Image`` instance, accessing the data
via the ``Nifti1Image.dataobj`` attribute. Refer to
https://nipy.org/nibabel/nibabel_images.html#the-image-data-array for
more details.
2. As soon as any data is modified, the ``Image`` class will
load the image data as a numpy array into memory and will maintain its
own reference to the array for subsequent access. Note that this
array is entirely independent of any array that is cached by
the underlying ``nibabel.Nifti1Image`` object (refer to
https://nipy.org/nibabel/images_and_memory.html)
3. The ``nibabel.Nifti1Image`` class does not support indexing of image
data with boolean mask arrays (e.g. ``image[mask > 0]``). If an
attempt is made to access image data in this way, the ``Image`` class
will be loaded into memory in the same way as described in point 2
above.
4. For more complicated requirements, a :class:`DataManager`,
implementing custom data access management logic, can be provided when
an ``Image`` is created. If a ``DataManager`` is provided, an
internal reference to the data (see 2 above) will **not** be created or
maintained.
It is also possible to obtain a reference to a numpy array containing
the image data via the :meth:`data` method. However, modifications to
the returned array:
- will not result in any notifications (described below)
- will not affect the value of :meth:`saveState`
- have undefined semantics when a custom :class:`DataManager` is in use
*Image dimensionality*
The ``Image`` class abstracts away trailing image dimensions of length 1.
This means that if the header for a NIFTI image specifies that the image
has four dimensions, but the fourth dimension is of length 1, you do not
need to worry about indexing that fourth dimension. However, all NIFTI
images will be presented as having at least three dimensions, so if your
image header specifies a third dimension of length 1, you will still
need provide an index of 0 for that dimensions, for all data accesses.
*Notification of changes to an Image*
The ``Image`` class adds some :class:`.Notifier` topics to those which are
already provided by the :class:`Nifti` class - listeners may register to
be notified of changes to the above properties, by registering on the
......@@ -740,28 +1166,31 @@ class Image(Nifti):
image changes (i.e. data or ``voxToWorldMat`` is
edited, or the image saved to disk).
``'dataRange'`` This topic is notified whenever the image data range
is changed/adjusted.
``'dataRange'`` Deprecated - No notifications are made on this topic.
=============== ======================================================
"""
def __init__(self,
image,
name=None,
header=None,
xform=None,
loadData=True,
calcRange=True,
indexed=False,
threaded=False,
dataSource=None,
image : ImageSource = None,
name : str = None,
header : nib.Nifti1Header = None,
xform : np.ndarray = None,
loadData : bool = None,
calcRange : bool = None,
threaded : bool = None,
dataSource : PathLike = None,
loadMeta : bool = False,
dataMgr : DataManager = None,
version : int = None,
**kwargs):
"""Create an ``Image`` object with the given image data or file name.
:arg image: A string containing the name of an image file to load,
or a :mod:`numpy` array, or a :mod:`nibabel` image
object.
or a Path object pointing to an image file, or a
:mod:`numpy` array, or a :mod:`nibabel` image object,
or an ``Image`` object. If not provided, a ``header``
and a ``dataMgr`` must be provided.
:arg name: A name for the image.
......@@ -780,45 +1209,51 @@ class Image(Nifti):
``header`` are provided, the ``xform`` is used in
preference to the header transformation.
:arg loadData: If ``True`` (the default) the image data is loaded
in to memory. Otherwise, only the image header
information is read, and the image data is kept
from disk. In either case, the image data is
accessed through an :class:`.ImageWrapper` instance.
The data may be loaded into memory later on via the
:meth:`loadData` method.
:arg loadData: Deprecated, has no effect
:arg calcRange: If ``True`` (the default), the image range is
calculated immediately (vi a call to
:meth:`calcRange`). Otherwise, the image range is
incrementally updated as more data is read from memory
or disk.
:arg calcRange: Deprecated, has no effect
:arg indexed: Deprecated. Has no effect, and will be removed in
``fslpy`` 3.0.
:arg threaded: If ``True``, the :class:`.ImageWrapper` will use a
separate thread for data range calculation. Defaults
to ``False``. Ignored if ``loadData`` is ``True``.
:arg threaded: Deprecated, has no effect
:arg dataSource: If ``image`` is not a file name, this argument may be
used to specify the file from which the image was
loaded.
:arg loadMeta: If ``True``, any metadata contained in JSON sidecar
files is loaded and attached to this ``Image`` via
the :class:`.Meta` interface. if ``False``, metadata
can be loaded at a later stage via the
:func:`loadMeta` function. Defaults to ``False``.
:arg dataMgr: Object implementing the :class:`DataManager`
interface, for managing access to the image data.
:arg version: NIfTI version - either 1 or 2. Only used when creating
an image from a numpy array, and when a ``header`` is
not provided. Defaults to the value dictated by the
``FSLOUTPUTTYPE`` environment variable.
All other arguments are passed through to the ``nibabel.load`` function
(if it is called).
"""
nibImage = None
if threaded is not None:
deprecated.warn('Image(threaded)', vin='3.9.0', rin='4.0.0',
msg='The threaded option has no effect')
if loadData is not None:
deprecated.warn('Image(loadData)', vin='3.9.0', rin='4.0.0',
msg='The loadData option has no effect')
if calcRange is not None:
deprecated.warn('Image(calcRange)', vin='3.9.0', rin='4.0.0',
msg='The calcRange option has no effect')
if indexed is not False:
warnings.warn('The indexed argument is deprecated '
'and has no effect',
category=DeprecationWarning,
stacklevel=2)
if version not in (None, 1, 2):
raise ValueError('Invalid value for version - only NIfTI '
'versions 1 and 2 are supported')
if loadData:
threaded = False
nibImage = None
saved = False
# Take a copy of the header if one has
# been provided
......@@ -841,11 +1276,14 @@ class Image(Nifti):
header.set_qform(xform, code=qform)
# The image parameter may be the name of an image file
if isinstance(image, six.string_types):
if isinstance(image, (str, Path)):
# resolve path to source if it is a sym-link
image = Path(image).resolve()
image = op.abspath(addExt(image))
nibImage = nib.load(image, **kwargs)
header = nibImage.header
dataSource = image
saved = True
# Or a numpy array - we wrap it in a nibabel image,
# with an identity transformation (each voxel maps
......@@ -856,11 +1294,15 @@ class Image(Nifti):
if header is not None: xform = header.get_best_affine()
else: xform = np.identity(4)
# We default to NIFTI1 and not
# NIFTI2, because the rest of
# FSL is not yet NIFTI2 compatible.
# NIfTI1 or NIfTI2 - if version was provided,
# use that, otherwise use the FSLOUTPUTTYPE
# environment variable
if header is None:
ctr = nib.nifti1.Nifti1Image
outputType = defaultOutputType()
if version == 2: ctr = nib.Nifti2Image
elif version == 1: ctr = nib.Nifti1Image
elif 'NIFTI2' in outputType.name: ctr = nib.Nifti2Image
else: ctr = nib.Nifti1Image
# make sure that the data type is correct,
# in case this header was passed in from
......@@ -878,18 +1320,36 @@ class Image(Nifti):
ctr = nib.analyze.AnalyzeImage
nibImage = ctr(image, xform, header=header)
header = nibImage.header
# otherwise, we assume that it is a nibabel image
else:
# If it's an Image object, we
# just take the nibabel image
elif isinstance(image, Image):
nibImage = image.nibImage
header = image.header
# otherwise, we assume that
# it is a nibabel image
elif image is not None:
nibImage = image
header = nibImage.header
# If an image is not provided,
# a DataManager must be provided
elif dataMgr is None:
raise ValueError('A DataManager must be provided '
'if an image is not provided')
if header is None:
raise ValueError('Could not identify/create NIfTI header')
# Figure out the name of this image, if
# it has not beenbeen explicitly passed in
# it has not been explicitly passed in
if name is None:
# If this image was loaded
# from disk, use the file name.
if isinstance(image, six.string_types):
if isinstance(image, (str, Path)):
name = removeExt(op.basename(image))
# Or the image was created from a numpy array
......@@ -900,28 +1360,31 @@ class Image(Nifti):
else:
name = 'Nibabel image'
Nifti.__init__(self, nibImage.header)
Nifti.__init__(self, header)
self.name = name
self.__lName = '{}_{}'.format(id(self), self.name)
self.__dataSource = dataSource
self.__threaded = threaded
self.__nibImage = nibImage
self.__saveState = dataSource is not None
self.__imageWrapper = imagewrapper.ImageWrapper(self.nibImage,
self.name,
loadData=loadData,
threaded=threaded)
self.name = name
self.__lName = f'{id(self)}_{self.name}'
self.__dataSource = dataSource
self.__nibImage = nibImage
self.__saveState = saved
self.__dataMgr = dataMgr
self.__dataRange = None
self.__data = None
# Listen to ourself for changes
# to the voxToWorldMat, so we
# to header attributse so we
# can update the saveState.
self.register(self.name, self.__transformChanged, topic='transform')
self.register(self.name, self.__headerChanged, topic='transform')
self.register(self.name, self.__headerChanged, topic='header')
if calcRange:
self.calcRange()
self.__imageWrapper.register(self.__lName, self.__dataRangeChanged)
# try and load metadata
# from JSON sidecar files
if self.dataSource is not None and loadMeta:
try:
self.updateMeta(loadMetadata(self))
except Exception as e:
log.warning('Failed to load metadata for %s: %s',
self.dataSource, e)
def __hash__(self):
......@@ -946,16 +1409,30 @@ class Image(Nifti):
def __del__(self):
"""Closes any open file handles, and clears some references. """
self.header = None
self.__nibImage = None
self.__imageWrapper = None
# Nifti class may have
# been GC'd at shutdown
if Nifti is not None:
Nifti.__del__(self)
self.__nibImage = None
self.__dataMgr = None
self.__data = None
@deprecated.deprecated('3.9.0', '4.0.0',
'The Image class no longer uses an ImageWrapper')
def getImageWrapper(self):
"""Returns the :class:`.ImageWrapper` instance used to manage
access to the image data.
"""
return self.__imageWrapper
return None
@property
def dataManager(self):
"""Return the :class:`.DataManager` associated with this ``Image``,
if one was specified when it was created.
"""
return self.__dataMgr
@property
......@@ -969,19 +1446,49 @@ class Image(Nifti):
@property
def nibImage(self):
"""Returns a reference to the ``nibabel`` NIFTI image instance.
Note that if the image data has been modified through this ``Image``,
it will be out of sync with what is returned by the ``nibabel`` object,
until a call to :meth:`save` is made.
"""
return self.__nibImage
@property
def data(self):
"""Returns the image data as a ``numpy`` array.
"""Returns the image data as a ``numpy`` array. The shape of the
returned array is normalised - it will have at least three dimensions,
and any trailing dimensions of length 1 will be squeezed out. See
:meth:`Nifti.shape` and :meth:`Nifti.realShape`.
.. warning:: Calling this method will cause the entire image to be
.. warning:: Calling this method may cause the entire image to be
loaded into memory.
"""
self.__imageWrapper.loadData()
return self[:]
if self.__dataMgr is not None:
return self[:]
# The internal cache has real image shape -
# this makes code in __getitem__ easier,
# as it can use the real shape regardless
# of how the data is accessed
if self.__data is None:
self.__data = self.nibImage.dataobj[:]
return self.__data.reshape(self.shape)
@property
def inMemory(self):
"""Returns ``True`` if the image data has been loaded into memory,
``False``otherwise. This does not reflect whether the underlying
``nibabel.Nifti1Image`` object has loaded the image data into
memory. However, if all data access takes place through the ``Image``
class, the underlying ``nibabel`` image will not use a cache.
If custom ``DataManager`` has loaded the data, this method will always
return ``False``.
"""
return self.__data is not None
@property
......@@ -994,26 +1501,13 @@ class Image(Nifti):
@property
def dataRange(self):
"""Returns the image data range as a ``(min, max)`` tuple. If the
``calcRange`` parameter to :meth:`__init__` was ``False``, these
values may not be accurate, and may change as more image data is
accessed.
If the data range has not been no data has been accessed,
``(None, None)`` is returned.
"""
if self.__imageWrapper is None: drange = (None, None)
else: drange = self.__imageWrapper.dataRange
"""Returns the minimum/maxmimum image data values. """
# Fall back to the cal_min/max
# fields in the NIFTI header
# if we don't yet know anything
# about the image data range.
if drange[0] is None or drange[1] is None:
drange = (float(self.header['cal_min']),
float(self.header['cal_max']))
if self.__dataMgr is not None: return self.__dataMgr.dataRange
elif self.__dataRange is not None: return self.__dataRange
return drange
self.__dataRange = nir.naninfrange(self.data)
return self.__dataRange
@property
......@@ -1022,10 +1516,40 @@ class Image(Nifti):
# Get the data type from the
# first voxel in the image
coords = [0] * len(self.__nibImage.shape)
coords = [0] * len(self.shape)
return self[tuple(coords)].dtype
@property
def nvals(self):
"""Returns the number of values per voxel in this image. This will
usually be 1, but may be 3 or 4, for images of type
``NIFTI_TYPE_RGB24`` or ``NIFTI_TYPE_RGBA32``.
"""
nvals = len(self.dtype)
if nvals == 0: return 1
else: return nvals
@property
def iscomplex(self):
"""Returns ``True`` if this image has a complex data type, ``False``
otherwise.
"""
return np.issubdtype(self.dtype, np.complexfloating)
@property
def editable(self):
"""Return ``True`` if the image data can be modified, ``False``
otherwise. Delegates to :meth:`DataManager.editable` if a
:class:`DataManager` is in use.
"""
if self.__dataMgr is not None: return self.__dataMgr.editable
else: return True
@Nifti.voxToWorldMat.setter
def voxToWorldMat(self, xform):
"""Overrides the :meth:`Nifti.voxToWorldMat` property setter.
......@@ -1037,14 +1561,17 @@ class Image(Nifti):
Nifti.voxToWorldMat.fset(self, xform)
if self.__nibImage is None:
return
xform = self.voxToWorldMat
code = int(self.header['sform_code'])
self.__nibImage.set_sform(xform, code)
def __transformChanged(self, *args, **kwargs):
"""Called when the ``voxToWorldMat`` of this :class:`Nifti` instance
def __headerChanged(self, *args, **kwargs):
"""Called when header properties of this :class:`Nifti` instance
changes. Updates the :attr:`saveState` accordinbgly.
"""
if self.__saveState:
......@@ -1052,58 +1579,23 @@ class Image(Nifti):
self.notify(topic='saveState')
def __dataRangeChanged(self, *args, **kwargs):
"""Called when the :class:`.ImageWrapper` data range changes.
Notifies any listeners of this ``Image`` (registered through the
:class:`.Notifier` interface) on the ``'dataRange'`` topic.
"""
self.notify(topic='dataRange')
def calcRange(self, sizethres=None):
"""Forces calculation of the image data range.
:arg sizethres: If not ``None``, specifies an image size threshold
(total number of bytes). If the number of bytes in
the image is greater than this threshold, the range
is calculated on a sample (the first volume for a
4D image, or slice for a 3D image).
"""
# The ImageWrapper automatically calculates
# the range of the specified slice, whenever
# it gets indexed. All we have to do is
# access a portion of the data to trigger the
# range calculation.
nbytes = np.prod(self.shape) * self.dtype.itemsize
# If an image size threshold has not been specified,
# then we'll calculate the full data range right now.
if sizethres is None or nbytes < sizethres:
log.debug('%s: Forcing calculation of full '
'data range', self.name)
self.__imageWrapper[:]
else:
log.debug('%s: Calculating data range '
'from sample', self.name)
# Otherwise if the number of values in the
# image is bigger than the size threshold,
# we'll calculate the range from a sample:
self.__imageWrapper[..., 0]
@deprecated.deprecated('3.9.0', '4.0.0', 'calcRange has no effect')
def calcRange(self, *args, **kwargs):
"""Deprecated, has no effect """
@deprecated.deprecated('3.9.0', '4.0.0', 'loadData has no effect')
def loadData(self):
"""Makes sure that the image data is loaded into memory.
See :meth:`.ImageWrapper.loadData`.
"""
self.__imageWrapper.loadData()
"""Deprecated, has no effect """
def save(self, filename=None):
"""Saves this ``Image`` to the specifed file, or the :attr:`dataSource`
if ``filename`` is ``None``.
Note that calling ``save`` on an image with modified data will cause
the entire image data to be loaded into memory if it has not already
been loaded.
"""
import fsl.utils.imcp as imcp
......@@ -1111,6 +1603,10 @@ class Image(Nifti):
if self.__dataSource is None and filename is None:
raise ValueError('A file name must be specified')
if self.__nibImage is None:
raise ValueError('Cannot save images without an '
'associated nibabel object')
if filename is None:
filename = self.__dataSource
......@@ -1125,19 +1621,32 @@ class Image(Nifti):
# We save the image out to a temp file,
# then close the old image, move the
# temp file to the real destination,
# then re-open the file.
# then re-open the file. This is done
# to ensure that all references to the
# old file are destroyed.
tmphd, tmpfname = tempfile.mkstemp(suffix=getExt(filename))
os.close(tmphd)
try:
nib.save(self.__nibImage, tmpfname)
# First of all, the nibabel object won't know
# about any image data modifications, so if
# any have occurred, we need to create a new
# nibabel image using our copy of the data,
# and the old header.
#
# Assuming here that analyze/nifti1/nifti2
# nibabel classes have an __init__ which
# expects (data, affine, header)
if not self.saveState:
self.__nibImage = type(self.__nibImage)(self.data,
affine=None,
header=self.header)
self.header = self.__nibImage.header
# nibabel should close any old
# file handles when the image/
# header refs are deleted
self.__nibImage = None
self.header = None
nib.save(self.__nibImage, tmpfname)
# Copy to final destination,
# and reload from there
imcp.imcp(tmpfname, filename, overwrite=True)
self.__nibImage = nib.load(filename)
......@@ -1147,18 +1656,11 @@ class Image(Nifti):
os.remove(tmpfname)
# Because we've created a new nibabel image,
# we have to create a new ImageWrapper
# we may have to create a new DataManager
# instance too, as we have just destroyed
# the nibabel image we gave to the last
# one.
self.__imageWrapper.deregister(self.__lName)
self.__imageWrapper = imagewrapper.ImageWrapper(
self.nibImage,
self.name,
loadData=False,
dataRange=self.dataRange,
threaded=self.__threaded)
self.__imageWrapper.register(self.__lName, self.__dataRangeChanged)
# the nibabel image we gave to the last one.
if self.__dataMgr is not None:
self.__dataMgr = self.__dataMgr.copy(self.nibImage)
self.__dataSource = filename
self.__saveState = True
......@@ -1166,139 +1668,136 @@ class Image(Nifti):
self.notify(topic='saveState')
def resample(self,
newShape,
sliceobj=None,
dtype=None,
order=1,
smooth=True):
"""Returns a copy of the data in this ``Image``, resampled to the
specified ``newShape``.
:arg newShape: Desired shape. May containg floating point values,
in which case the resampled image will have shape
``round(newShape)``, but the voxel sizes will
have scales ``self.shape / newShape``.
:arg sliceobj: Slice into this ``Image``. If ``None``, the whole
image is resampled, and it is assumed that it has the
same number of dimensions as ``newShape``. A
:exc:`ValueError` is raised if this is not the case.
:arg dtype: ``numpy`` data type of the resampled data. If ``None``,
the :meth:`dtype` of this ``Image`` is used.
:arg order: Spline interpolation order, passed through to the
``scipy.ndimage.affine_transform`` function - ``0``
corresponds to nearest neighbour interpolation, ``1``
(the default) to linear interpolation, and ``3`` to
cubic interpolation.
:arg smooth: If ``True`` (the default), the data is smoothed before
being resampled, but only along axes which are being
down-sampled (i.e. where
``newShape[i] < self.shape[i]``).
def __getitem__(self, slc):
"""Access the image data with the specified ``slc``.
:returns: A tuple containing:
- A ``numpy`` array of shape ``newShape``, containing
an interpolated copy of the data in this ``Image``.
- A ``numpy`` array of shape ``(4, 4)``, containing the
adjusted voxel-to-world transformation for the spatial
dimensions of the resampled data.
:arg slc: Something which can slice the image data.
"""
if sliceobj is None: sliceobj = slice(None)
if dtype is None: dtype = self.dtype
data = self[sliceobj]
data = np.array(data, dtype=dtype, copy=False)
oldShape = np.array(data.shape, dtype=np.float)
newShape = np.array(newShape, dtype=np.float)
if len(oldShape) != len(newShape):
raise ValueError('Shapes don\'t match')
if not np.all(np.isclose(oldShape, newShape)):
ratio = oldShape / newShape
newShape = np.array(np.round(newShape), dtype=np.int)
scale = np.diag(ratio)
# If interpolating and smoothing, we apply a
# gaussian filter along axes with a resampling
# ratio greater than 1.1. We do this so that
# interpolation has an effect when down-sampling
# to a resolution where the voxel centres are
# aligned (as otherwise any interpolation regime
# will be equivalent to nearest neighbour). This
# more-or-less mimics the behaviour of FLIRT.
if order > 0 and smooth:
sigma = np.array(ratio)
sigma[ratio < 1.1] = 0
sigma[ratio >= 1.1] *= 0.425
data = ndimage.gaussian_filter(data, sigma)
data = ndimage.affine_transform(data,
scale,
output_shape=newShape,
order=order)
# Construct an affine transform which
# puts the resampled image into the
# same world coordinate system as this
# image.
scale = transform.scaleOffsetXform(ratio[:3], 0)
xform = transform.concat(self.voxToWorldMat, scale)
else:
xform = self.voxToWorldMat
return data, xform
def __getitem__(self, sliceobj):
"""Access the image data with the specified ``sliceobj``.
:arg sliceobj: Something which can slice the image data.
log.debug('%s: __getitem__ [%s]', self.name, slc)
# Make the slice object compatible
# with the actual image shape - e.g.
# an underlying 2D image is presented
# as having 3 dimensions.
shape = self.shape
realShape = self.realShape
slc = canonicalSliceObj( slc, shape)
fancy = isValidFancySliceObj(slc, shape)
expNdims, expShape = expectedShape( slc, shape)
slc = canonicalSliceObj( slc, realShape)
# The nibabel arrayproxy does not support
# boolean mask-based (a.k.a. "fancy")
# indexing. If we are given a mask, we
# force-load the image data into memory.
if all((fancy,
self.__dataMgr is None,
self.__data is None)):
log.debug('Fancy slice detected - force-loading image '
'data into memory (%s)', self.name)
self.data
if self.__dataMgr is not None: data = self.__dataMgr[slc]
elif self.__data is not None: data = self.__data[slc]
else: data = self.__nibImage.dataobj[slc]
# Make sure that the result has the
# shape that the caller is expecting.
if fancy: data = data.reshape((data.size, ))
else: data = data.reshape(expShape)
# If expNdims == 0, we should
# return a scalar. If expNdims
# == 0, but data.size != 1,
# something is wrong somewhere
# (and is not being handled
# here).
if expNdims == 0 and data.size == 1:
# Funny behaviour with numpy scalar arrays.
# data[()] returns a numpy scalar (which is
# what we want). But data.item() returns a
# python scalar. And if the data is a
# ndarray with 0 dims, data[0] will raise
# an error!
data = data[()]
return data
def __setitem__(self, slc, values):
"""Set the image data at ``slc`` to ``values``.
:arg slc: Something which can slice the image data.
:arg values: New image data.
.. note:: Modifying image data may force the entire image to be
loaded into memory if it has not already been loaded.
"""
log.debug('%s: __getitem__ [%s]', self.name, sliceobj)
if not self.editable:
raise RuntimeError('Image is not editable')
return self.__imageWrapper.__getitem__(sliceobj)
values = np.array(values)
if values.size == 0:
return
def __setitem__(self, sliceobj, values):
"""Set the image data at ``sliceobj`` to ``values``.
log.debug('%s: __setitem__ [%s = %s]', self.name, slc, values.shape)
:arg sliceobj: Something which can slice the image data.
:arg values: New image data.
realShape = self.realShape
origslc = slc
slc = canonicalSliceObj(origslc, realShape)
.. note:: Modifying image data will force the entire image to be
loaded into memory if it has not already been loaded.
"""
values = np.array(values)
# If the image shape does not match its
# 'display' shape (either less three
# dims, or has trailing dims of length
# 1), we might need to re-shape the
# values to prevent numpy from raising
# an error in the assignment below.
if realShape != self.shape:
log.debug('%s: __setitem__ [%s = %s]',
self.name, sliceobj, values.shape)
expNdims, expShape = expectedShape(slc, realShape)
with self.__imageWrapper.skip(self.__lName):
# If we are slicing a scalar, the
# assigned value has to be scalar.
if expNdims == 0 and isinstance(values, abc.Sequence):
oldRange = self.__imageWrapper.dataRange
self.__imageWrapper.__setitem__(sliceobj, values)
newRange = self.__imageWrapper.dataRange
if len(values) > 1:
raise IndexError('Invalid assignment: [{}] = {}'.format(
slc, len(values)))
if values.size > 0:
values = np.array(values).flatten()[0]
self.notify(topic='data', value=sliceobj)
# Make sure that the values
# have a compatible shape.
else:
values = np.array(values)
if values.shape != expShape:
values = values.reshape(expShape)
if self.__saveState:
self.__saveState = False
self.notify(topic='saveState')
# Use DataManager to manage data
# access if one has been specified
if self.__dataMgr is not None:
self.__dataMgr[slc] = values
if not np.all(np.isclose(oldRange, newRange)):
self.notify(topic='dataRange')
# Use an internal numpy array
# to persist data changes
else:
# force-load data - see the data() method
# Reset data range whenever the data is
# modified - see dataRange() method
if self.__data is None:
self.data
self.__data[slc] = values
self.__dataRange = None
# Notify that data has changed/image is not saved
self.notify(topic='data', value=origslc)
if self.__saveState:
self.__saveState = False
self.notify(topic='saveState')
def canonicalShape(shape):
......@@ -1324,6 +1823,133 @@ def canonicalShape(shape):
return shape
def isValidFancySliceObj(sliceobj, shape):
"""Returns ``True`` if the given ``sliceobj`` is a valid and fancy slice
object.
``nibabel`` refers to slice objects as "fancy" if they comprise anything
but tuples of integers and simple ``slice`` objects. The ``Image`` class
supports an additional type of "fancy" slicing, where the ``sliceobj`` is
a boolean ``numpy`` array of the same shape as the image.
This function returns ``True`` if the given ``sliceobj`` adheres to these
requirements, ``False`` otherwise.
"""
# We only support boolean numpy arrays
# which have the same shape as the image
return (isinstance(sliceobj, np.ndarray) and
sliceobj.dtype == bool and
np.prod(sliceobj.shape) == np.prod(shape))
def canonicalSliceObj(sliceobj, shape):
"""Returns a canonical version of the given ``sliceobj``. See the
``nibabel.fileslice.canonical_slicers`` function.
"""
# Fancy slice objects must have
# the same shape as the data
if isValidFancySliceObj(sliceobj, shape):
return sliceobj.reshape(shape)
else:
if not isinstance(sliceobj, tuple):
sliceobj = (sliceobj,)
if len(sliceobj) > len(shape):
sliceobj = sliceobj[:len(shape)]
return nib.fileslice.canonical_slicers(sliceobj, shape)
def expectedShape(sliceobj, shape):
"""Given a slice object, and the shape of an array to which
that slice object is going to be applied, returns the expected
shape of the result.
.. note:: It is assumed that the ``sliceobj`` has been passed through
the :func:`canonicalSliceObj` function.
:arg sliceobj: Something which can be used to slice an array
of shape ``shape``.
:arg shape: Shape of the array being sliced.
:returns: A tuple containing:
- Expected number of dimensions of the result
- Expected shape of the result (or ``None`` if
``sliceobj`` is fancy).
"""
if isValidFancySliceObj(sliceobj, shape):
return 1, None
# Truncate some dimensions from the
# slice object if it has too many
# (e.g. trailing dims of length 1).
elif len(sliceobj) > len(shape):
sliceobj = sliceobj[:len(shape)]
# Figure out the number of dimensions
# that the result should have, given
# this slice object.
expShape = []
for i in range(len(sliceobj)):
# Each dimension which has an
# int slice will be collapsed
if isinstance(sliceobj[i], int):
continue
start = sliceobj[i].start
stop = sliceobj[i].stop
if start is None: start = 0
if stop is None: stop = shape[i]
stop = min(stop, shape[i])
expShape.append(stop - start)
return len(expShape), expShape
def loadMetadata(image):
"""Searches for and loads any sidecar JSON files associated with the given
:class:`.Image`.
If the image looks to be part of a BIDS data set,
:func:`.bids.loadMetadata` is used. Otherwise, if a JSON file with the same
file prefix is present alongside the image, it is directly loaded.
:arg image: :class:`.Image` instance
:returns: Dict containing any metadata that was loaded.
"""
if image.dataSource is None:
return {}
filename = image.dataSource
basename = op.basename(removeExt(filename))
dirname = op.dirname(filename)
if fslbids.isBIDSFile(image.dataSource) and \
fslbids.inBIDSDir( image.dataSource):
return fslbids.loadMetadata(image.dataSource)
jsonfile = op.join(dirname, '{}.json'.format(basename))
if op.exists(jsonfile):
with open(jsonfile, 'rt') as f:
return json.load(f, strict=False)
return {}
def looksLikeImage(filename, allowedExts=None):
"""Returns ``True`` if the given file looks like a NIFTI image, ``False``
otherwise.
......@@ -1348,12 +1974,21 @@ def addExt(prefix, mustExist=True, unambiguous=True):
"""Adds a file extension to the given file ``prefix``. See
:func:`~fsl.utils.path.addExt`.
"""
return fslpath.addExt(prefix,
allowedExts=ALLOWED_EXTENSIONS,
mustExist=mustExist,
defaultExt=defaultExt(),
fileGroups=FILE_GROUPS,
unambiguous=unambiguous)
try:
return fslpath.addExt(prefix,
allowedExts=ALLOWED_EXTENSIONS,
mustExist=mustExist,
defaultExt=defaultExt(),
fileGroups=FILE_GROUPS,
unambiguous=unambiguous)
except fslpath.PathError as e:
# hacky: if more than one file with
# the prefix exists, we emit a
# warning, because in most cases
# this is a bad thing.
if str(e).startswith('More than'):
log.warning(e)
raise e
def splitExt(filename):
......@@ -1377,40 +2012,105 @@ def removeExt(filename):
return fslpath.removeExt(filename, ALLOWED_EXTENSIONS)
def fixExt(filename):
def fixExt(filename, **kwargs):
"""Fix the extension of ``filename``.
For example, if a file name is passed in as ``file.nii.gz``, but the
file is actually ``file.nii``, this function will fix the file name.
If ``filename`` already exists, it is returned unchanged.
All other arguments are passed through to :func:`addExt`.
"""
if op.exists(filename):
return filename
else:
return addExt(removeExt(filename))
return addExt(removeExt(filename), **kwargs)
class FileType(enum.Enum):
"""Enumeration of supported image file types. The values for each type
are the same as defined in the FSL ``newimage/newimage.h`` header file.
"""
NIFTI = 1
NIFTI2 = 2
ANALYZE = 10
NIFTI_PAIR = 11
NIFTI2_PAIR = 12
ANALYZE_GZ = 100
NIFTI_GZ = 101
NIFTI2_GZ = 102
NIFTI_PAIR_GZ = 111
NIFTI2_PAIR_GZ = 112
def fileType(filename) -> FileType:
"""Infer an image file type. """
filename = addExt( filename)
extension = getExt( filename)
img = nib.load(filename)
# Mappings between (image type, file extension), and type code.
# Order is important due to the nibabel class hierarchy - we
# must test in order NIFTI2, NIFTI1, ANALYZE
anlz = (nib.AnalyzeImage, nib.AnalyzeHeader)
nii1 = (nib.Nifti1Image, nib.Nifti1Pair, nib.Nifti1Header)
nii2 = (nib.Nifti2Image, nib.Nifti2Pair, nib.Nifti2Header)
mappings = [
(nii2, '.nii', FileType.NIFTI2),
(nii2, '.nii.gz', FileType.NIFTI2_GZ),
(nii2, '.hdr', FileType.NIFTI2_PAIR),
(nii2, '.img', FileType.NIFTI2_PAIR),
(nii2, '.hdr.gz', FileType.NIFTI2_PAIR_GZ),
(nii2, '.img.gz', FileType.NIFTI2_PAIR_GZ),
(nii1, '.nii', FileType.NIFTI),
(nii1, '.nii.gz', FileType.NIFTI_GZ),
(nii1, '.hdr', FileType.NIFTI_PAIR),
(nii1, '.img', FileType.NIFTI_PAIR),
(nii1, '.hdr.gz', FileType.NIFTI_PAIR_GZ),
(nii1, '.img.gz', FileType.NIFTI_PAIR_GZ),
(anlz, '.hdr', FileType.ANALYZE),
(anlz, '.img', FileType.ANALYZE),
(anlz, '.hdr.gz', FileType.ANALYZE_GZ),
(anlz, '.img.gz', FileType.ANALYZE_GZ),
]
for ftype, ext, code in mappings:
if isinstance(img, ftype) and extension == ext:
return code
raise ValueError(f'Could not infer image type: {filename}')
def defaultOutputType() -> FileType:
"""Return the default output file type. This is based on the
``$FSLOUTPUTTYPE`` environment variable.
"""
fsloutputtype = os.environ.get('FSLOUTPUTTYPE', None)
if fsloutputtype not in FileType.__members__:
fsloutputtype = 'NIFTI_GZ'
return FileType[fsloutputtype]
def defaultExt():
def defaultExt() -> str:
"""Returns the default NIFTI file extension that should be used.
If the ``$FSLOUTPUTTYPE`` variable is set, its value is used.
Otherwise, ``.nii.gz`` is returned.
"""
# TODO: Add analyze support.
options = {
'NIFTI' : '.nii',
'NIFTI_PAIR' : '.img',
'NIFTI_GZ' : '.nii.gz',
FileType.ANALYZE : '.img',
FileType.NIFTI : '.nii',
FileType.NIFTI2 : '.nii',
FileType.NIFTI_GZ : '.nii.gz',
FileType.NIFTI2_GZ : '.nii.gz',
FileType.NIFTI_PAIR : '.img',
FileType.NIFTI2_PAIR : '.img',
FileType.ANALYZE_GZ : '.img.gz',
FileType.NIFTI_PAIR_GZ : '.img.gz',
FileType.NIFTI2_PAIR_GZ : '.img.gz',
}
outputType = os.environ.get('FSLOUTPUTTYPE', 'NIFTI_GZ')
return options.get(outputType, '.nii.gz')
@deprecated.deprecated('2.0.0', '3.0.0', 'Use nibabel.load instead.')
def loadIndexedImageFile(filename):
"""Deprecated - this call is equivalent to calling ``nibabel.load``. """
return nib.load(filename)
return options[defaultOutputType()]
......@@ -7,6 +7,9 @@
"""This module provides the :class:`ImageWrapper` class, which can be used
to manage data access to ``nibabel`` NIFTI images.
.. note:: This module is deprecated - it is being moved to the FSLeyes project,
and will be removed in a future version of fslpy.
Terminology
-----------
......@@ -42,9 +45,10 @@ import collections
import collections.abc as abc
import itertools as it
import numpy as np
import nibabel as nib
import numpy as np
import fsl.data.image as fslimage
import fsl.utils.deprecated as deprecated
import fsl.utils.notifier as notifier
import fsl.utils.naninfrange as nir
import fsl.utils.idle as idle
......@@ -76,8 +80,7 @@ class ImageWrapper(notifier.Notifier):
- The image data is not modified (via :meth:`__setitem__`.
If any of these conditions do not hold, the image data will be loaded into
memory and accessed directly, via the ``nibabel.Nifti1Image.get_data``
method.
memory and accessed directly.
*Image dimensionality*
......@@ -149,6 +152,8 @@ class ImageWrapper(notifier.Notifier):
"""
@deprecated.deprecated('3.9.0', '4.0.0',
'The ImageWrapper has been migrated to FSLeyes')
def __init__(self,
image,
name=None,
......@@ -176,8 +181,6 @@ class ImageWrapper(notifier.Notifier):
data range is updated directly on reads/writes.
"""
import fsl.data.image as fslimage
self.__image = image
self.__name = name
self.__taskThread = None
......@@ -190,10 +193,10 @@ class ImageWrapper(notifier.Notifier):
if d == 1: self.__numRealDims -= 1
else: break
# Degenerate case - if every
# dimension has length 1
if self.__numRealDims == 0:
self.__numRealDims = len(image.shape)
# Degenerate case - less
# than three real dimensions
if self.__numRealDims < 3:
self.__numRealDims = min(3, len(image.shape))
# And save the number of
# 'padding' dimensions too.
......@@ -218,7 +221,11 @@ class ImageWrapper(notifier.Notifier):
self.reset(dataRange)
if loadData:
# We keep an internal ref to
# the data numpy array if/when
# it is loaded in memory
self.__data = None
if loadData or image.in_memory:
self.loadData()
if threaded:
......@@ -232,6 +239,7 @@ class ImageWrapper(notifier.Notifier):
the :class:`.TaskThread` is stopped.
"""
self.__image = None
self.__data = None
if self.__taskThread is not None:
self.__taskThread.stop()
self.__taskThread = None
......@@ -303,9 +311,11 @@ class ImageWrapper(notifier.Notifier):
# data range for each volume/slice/vector
#
# We use nan as a placeholder, so the
# dtype must be non-integral
# dtype must be non-integral. The
# len(dtype) check takes into account
# structured data (e.g. RGB)
dtype = self.__image.get_data_dtype()
if np.issubdtype(dtype, np.integer):
if np.issubdtype(dtype, np.integer) or len(dtype) > 0:
dtype = np.float32
self.__volRanges = np.zeros((nvols, 2),
dtype=dtype)
......@@ -375,16 +385,19 @@ class ImageWrapper(notifier.Notifier):
"""Forces all of the image data to be loaded into memory.
.. note:: This method will be called by :meth:`__init__` if its
``loadData`` parameter is ``True``.
``loadData`` parameter is ``True``. It will also be called
on all write operations (see :meth:`__setitem__`).
"""
if self.__data is None:
self.__data = np.asanyarray(self.__image.dataobj)
# If the data is not already loaded, this will
# cause nibabel to load it. By default, nibabel
# will cache the numpy array that contains the
# image data, so subsequent calls to this
# method will not overwrite any changes that
# have been made to the data array.
self.__image.get_data()
@property
def dataIsLoaded(self):
"""Return true if the image data has been loaded into memory, ``False``
otherwise.
"""
return self.__data is not None
def __getData(self, sliceobj, isTuple=False):
......@@ -405,14 +418,7 @@ class ImageWrapper(notifier.Notifier):
# ArrayProxy. Otheriwse if it is in
# memory, we can access it directly.
#
# Furthermore, if it is in memory and
# has been modified, the ArrayProxy
# will give us out-of-date values (as
# the ArrayProxy reads from disk). So
# we have to read from the in-memory
# array to get changed values.
#
# Finally, note that if the caller has
# Note also that if the caller has
# given us a 'fancy' slice object (a
# boolean numpy array), but the image
# data is not in memory, we can't access
......@@ -420,8 +426,8 @@ class ImageWrapper(notifier.Notifier):
# (the dataobj attribute) cannot handle
# fancy indexing. In this case an error
# will be raised.
if self.__image.in_memory: return self.__image.get_data()[sliceobj]
else: return self.__image.dataobj[ sliceobj]
if self.__data is not None: return self.__data[ sliceobj]
else: return self.__image.dataobj[sliceobj]
def __imageIsCovered(self):
......@@ -442,18 +448,18 @@ class ImageWrapper(notifier.Notifier):
_, expansions = calcExpansion(slices, self.__coverage)
expansions = collapseExpansions(expansions, self.__numRealDims - 1)
log.debug('Updating image {} data range [slice: {}] '
'(current range: [{}, {}]; '
'number of expansions: {}; '
'current coverage: {}; '
'volume ranges: {})'.format(
self.__name,
slices,
self.__range[0],
self.__range[1],
len(expansions),
self.__coverage,
self.__volRanges))
log.debug('Updating image %s data range [slice: %s] '
'(current range: [%s, %s]; '
'number of expansions: %s; '
'current coverage: %s; '
'volume ranges: %s)',
self.__name,
slices,
self.__range[0],
self.__range[1],
len(expansions),
self.__coverage,
self.__volRanges)
# As we access the data for each expansions,
# we want it to have the same dimensionality
......@@ -505,12 +511,12 @@ class ImageWrapper(notifier.Notifier):
if any((oldmin is None, oldmax is None)) or \
not np.all(np.isclose([oldmin, oldmax], [newmin, newmax])):
log.debug('Image {} range changed: [{}, {}] -> [{}, {}]'.format(
self.__name,
oldmin,
oldmax,
newmin,
newmax))
log.debug('Image %s range changed: [%s, %s] -> [%s, %s]',
self.__name,
oldmin,
oldmax,
newmin,
newmax)
self.notify()
......@@ -589,15 +595,15 @@ class ImageWrapper(notifier.Notifier):
slices = np.array(slices.T, dtype=np.uint32)
slices = tuple(it.chain(map(tuple, slices), [(lowVol, highVol)]))
log.debug('Image {} data written - clearing known data '
'range on volumes {} - {} (write slice: {}; '
'coverage: {}; volRanges: {})'.format(
self.__name,
lowVol,
highVol,
slices,
self.__coverage[:, :, lowVol:highVol],
self.__volRanges[lowVol:highVol, :]))
log.debug('Image %s data written - clearing known data '
'range on volumes %s - %s (write slice: %s; '
'coverage: %s; volRanges: %s)',
self.__name,
lowVol,
highVol,
slices,
self.__coverage[:, :, lowVol:highVol],
self.__volRanges[lowVol:highVol, :])
for vol in range(lowVol, highVol):
self.__coverage[:, :, vol] = np.nan
......@@ -620,7 +626,7 @@ class ImageWrapper(notifier.Notifier):
:arg sliceobj: Something which can slice the image data.
"""
log.debug('Getting image data: {}'.format(sliceobj))
log.debug('Getting image data: %s', sliceobj)
shape = self.__canonicalShape
realShape = self.__image.shape
......@@ -703,7 +709,7 @@ class ImageWrapper(notifier.Notifier):
raise IndexError('Invalid assignment: [{}] = {}'.format(
sliceobj, len(values)))
values = values[0]
values = np.array(values).flatten()[0]
# Make sure that the values
# have a compatible shape.
......@@ -719,108 +725,33 @@ class ImageWrapper(notifier.Notifier):
# have any effect.
self.loadData()
self.__image.get_data()[sliceobj] = values
self.__data[sliceobj] = values
self.__updateDataRangeOnWrite(slices, values)
@deprecated.deprecated('3.9.0', '4.0.0', 'Moved to fsl.data.image')
def isValidFancySliceObj(sliceobj, shape):
"""Returns ``True`` if the given ``sliceobj`` is a valid and fancy slice
object.
``nibabel`` refers to slice objects as "fancy" if they comprise anything
but tuples of integers and simple ``slice`` objects. The ``ImageWrapper``
class supports one type of "fancy" slicing, where the ``sliceobj`` is a
boolean ``numpy`` array of the same shape as the image.
This function returns ``True`` if the given ``sliceobj`` adheres to these
requirements, ``False`` otherwise.
"""
# We only support boolean numpy arrays
# which have the same shape as the image
return (isinstance(sliceobj, np.ndarray) and
sliceobj.dtype == np.bool and
np.prod(sliceobj.shape) == np.prod(shape))
"""Deprecated - moved to :mod:`fsl.data.image`."""
return fslimage.isValidFancySliceObj(sliceobj, shape)
@deprecated.deprecated('3.9.0', '4.0.0', 'Moved to fsl.data.image')
def canonicalSliceObj(sliceobj, shape):
"""Returns a canonical version of the given ``sliceobj``. See the
``nibabel.fileslice.canonical_slicers`` function.
"""
# Fancy slice objects must have
# the same shape as the data
if isValidFancySliceObj(sliceobj, shape):
return sliceobj.reshape(shape)
else:
if not isinstance(sliceobj, tuple):
sliceobj = (sliceobj,)
if len(sliceobj) > len(shape):
sliceobj = sliceobj[:len(shape)]
return nib.fileslice.canonical_slicers(sliceobj, shape)
"""Deprecated - moved to :mod:`fsl.data.image`."""
return fslimage.canonicalSliceObj(sliceobj, shape)
@deprecated.deprecated('3.9.0', '4.0.0', 'Moved to fsl.data.image')
def expectedShape(sliceobj, shape):
"""Given a slice object, and the shape of an array to which
that slice object is going to be applied, returns the expected
shape of the result.
.. note:: It is assumed that the ``sliceobj`` has been passed through
the :func:`canonicalSliceObj` function.
:arg sliceobj: Something which can be used to slice an array
of shape ``shape``.
:arg shape: Shape of the array being sliced.
:returns: A tuple containing:
- Expected number of dimensions of the result
- Expected shape of the result (or ``None`` if
``sliceobj`` is fancy).
"""
if isValidFancySliceObj(sliceobj, shape):
return 1, None
# Truncate some dimensions from the
# slice object if it has too many
# (e.g. trailing dims of length 1).
elif len(sliceobj) > len(shape):
sliceobj = sliceobj[:len(shape)]
# Figure out the number of dimensions
# that the result should have, given
# this slice object.
expShape = []
for i in range(len(sliceobj)):
# Each dimension which has an
# int slice will be collapsed
if isinstance(sliceobj[i], int):
continue
start = sliceobj[i].start
stop = sliceobj[i].stop
if start is None: start = 0
if stop is None: stop = shape[i]
stop = min(stop, shape[i])
expShape.append(stop - start)
return len(expShape), expShape
"""Deprecated - moved to :mod:`fsl.data.image`."""
return fslimage.expectedShape(sliceobj, shape)
@deprecated.deprecated('3.9.0', '4.0.0', 'Moved to FSLeyes')
def sliceObjToSliceTuple(sliceobj, shape):
"""Turns an array slice object into a tuple of (low, high) index
"""Deprecated - the imagewrapper has been moved to FSLeyes.
Turns an array slice object into a tuple of (low, high) index
pairs, one pair for each dimension in the given shape
:arg sliceobj: Something which can be used to slice an array of shape
......@@ -859,8 +790,11 @@ def sliceObjToSliceTuple(sliceobj, shape):
return tuple(indices)
@deprecated.deprecated('3.9.0', '4.0.0', 'Moved to FSLeyes')
def sliceTupleToSliceObj(slices):
"""Turns a sequence of (low, high) index pairs into a tuple of array
"""Deprecated - the imagewrapper has been moved to FSLeyes.
Turns a sequence of (low, high) index pairs into a tuple of array
``slice`` objects.
:arg slices: A sequence of (low, high) index pairs.
......@@ -874,8 +808,11 @@ def sliceTupleToSliceObj(slices):
return tuple(sliceobj)
@deprecated.deprecated('3.9.0', '4.0.0', 'Moved to FSLeyes')
def adjustCoverage(oldCoverage, slices):
"""Adjusts/expands the given ``oldCoverage`` so that it covers the
"""Deprecated - the imagewrapper has been moved to FSLeyes.
Adjusts/expands the given ``oldCoverage`` so that it covers the
given set of ``slices``.
:arg oldCoverage: A ``numpy`` array of shape ``(2, n)`` containing
......@@ -922,8 +859,11 @@ return code for the :func:`sliceOverlap` function.
"""
@deprecated.deprecated('3.9.0', '4.0.0', 'Moved to FSLeyes')
def sliceOverlap(slices, coverage):
"""Determines whether the given ``slices`` overlap with the given
"""Deprecated - the imagewrapper has been moved to FSLeyes.
Determines whether the given ``slices`` overlap with the given
``coverage``.
:arg slices: A sequence of (low, high) index pairs, assumed to cover
......@@ -989,8 +929,11 @@ def sliceOverlap(slices, coverage):
elif np.all(overlapStates == OVERLAP_ALL): return OVERLAP_ALL
@deprecated.deprecated('3.9.0', '4.0.0', 'Moved to FSLeyes')
def sliceCovered(slices, coverage):
"""Returns ``True`` if the portion of the image data calculated by
"""Deprecated - the imagewrapper has been moved to FSLeyes.
Returns ``True`` if the portion of the image data calculated by
the given ``slices` has already been calculated, ``False`` otherwise.
:arg slices: A sequence of (low, high) index pairs, assumed to cover
......@@ -1021,8 +964,11 @@ def sliceCovered(slices, coverage):
return True
@deprecated.deprecated('3.9.0', '4.0.0', 'Moved to FSLeyes')
def calcExpansion(slices, coverage):
"""Calculates a series of *expansion* slices, which can be used to expand
"""Deprecated - the imagewrapper has been moved to FSLeyes.
Calculates a series of *expansion* slices, which can be used to expand
the given ``coverage`` so that it includes the given ``slices``.
:arg slices: Slices that the coverage needs to be expanded to cover.
......@@ -1191,8 +1137,11 @@ def calcExpansion(slices, coverage):
return volumes, expansions
@deprecated.deprecated('3.9.0', '4.0.0', 'Moved to FSLeyes')
def collapseExpansions(expansions, numDims):
"""Scans through the given list of expansions (each assumed to pertain
"""Deprecated - the imagewrapper has been moved to FSLeyes.
Scans through the given list of expansions (each assumed to pertain
to a single 3D image), and combines any which cover the same
image area, and cover adjacent volumes.
......
......@@ -33,7 +33,6 @@ import logging
import os.path as op
import numpy as np
import fsl.utils.path as fslpath
import fsl.data.image as fslimage
import fsl.data.featanalysis as featanalysis
......@@ -63,10 +62,9 @@ def isMelodicImage(path):
def isMelodicDir(path):
"""Returns ``True`` if the given path looks like it is contained within
a MELODIC directory, ``False`` otherwise. A melodic directory:
"""Returns ``True`` if the given path looks like it is a MELODIC directory,
``False`` otherwise. A MELODIC directory:
- Must be named ``*.ica``.
- Must contain a file called ``melodic_IC.nii.gz`` or
``melodic_oIC.nii.gz``.
- Must contain a file called ``melodic_mix``.
......@@ -75,12 +73,7 @@ def isMelodicDir(path):
path = op.abspath(path)
if op.isdir(path): dirname = path
else: dirname = op.dirname(path)
sufs = ['.ica']
if not any([dirname.endswith(suf) for suf in sufs]):
if not op.isdir(path):
return False
# Must contain an image file called
......@@ -88,7 +81,7 @@ def isMelodicDir(path):
prefixes = ['melodic_IC', 'melodic_oIC']
for p in prefixes:
try:
fslimage.addExt(op.join(dirname, p))
fslimage.addExt(op.join(path, p))
break
except fslimage.PathError:
pass
......@@ -97,8 +90,8 @@ def isMelodicDir(path):
# Must contain files called
# melodic_mix and melodic_FTmix
if not op.exists(op.join(dirname, 'melodic_mix')): return False
if not op.exists(op.join(dirname, 'melodic_FTmix')): return False
if not op.exists(op.join(path, 'melodic_mix')): return False
if not op.exists(op.join(path, 'melodic_FTmix')): return False
return True
......@@ -108,10 +101,13 @@ def getAnalysisDir(path):
to that MELODIC directory is returned. Otherwise, ``None`` is returned.
"""
meldir = fslpath.deepest(path, ['.ica'])
if not op.isdir(path):
path = op.dirname(path)
if meldir is not None and isMelodicDir(meldir):
return meldir
while path not in (op.sep, ''):
if isMelodicDir(path):
return path
path = op.dirname(path)
return None
......@@ -137,10 +133,18 @@ def getDataFile(meldir):
if topDir is None:
return None
dataFile = op.join(topDir, 'filtered_func_data')
# People often rename filtered_func_data.nii.gz
# to something like filtered_func_data_clean.nii.gz,
# because that is the recommended approach when
# performing ICA-based denoising). So we try both.
candidates = ['filtered_func_data', 'filtered_func_data_clean']
try: return fslimage.addExt(dataFile)
except fslimage.PathError: return None
for candidate in candidates:
dataFile = op.join(topDir, candidate)
try: return fslimage.addExt(dataFile)
except fslimage.PathError: continue
return None
def getMeanFile(meldir):
......@@ -187,7 +191,7 @@ def getNumComponents(meldir):
contained in the given directrory.
"""
icImg = fslimage.Image(getICFile(meldir), loadData=False, calcRange=False)
icImg = fslimage.Image(getICFile(meldir))
return icImg.shape[3]
......