diff --git a/fsl/data/gifti.py b/fsl/data/gifti.py index c27054ab8ba3a7be26f2f98d54b78b9609c07b0d..7bc088d1735cce7c02309b4be66e30d10cc5b86a 100644 --- a/fsl/data/gifti.py +++ b/fsl/data/gifti.py @@ -49,7 +49,7 @@ class GiftiSurface(mesh.TriangleMesh): """ - def __init__(self, infile): + def __init__(self, infile, fixWinding=False): """Load the given GIFTI file using ``nibabel``, and extracts surface data using the :func:`loadGiftiSurface` function. @@ -61,7 +61,7 @@ class GiftiSurface(mesh.TriangleMesh): surfimg, vertices, indices = loadGiftiSurface(infile) - mesh.TriangleMesh.__init__(self, vertices, indices) + mesh.TriangleMesh.__init__(self, vertices, indices, fixWinding) name = fslpath.removeExt(op.basename(infile), ALLOWED_EXTENSIONS) infile = op.abspath(infile) diff --git a/fsl/data/mesh.py b/fsl/data/mesh.py index 2a9c47df7bdacc938cbcef57bf459b05529f8db1..c3c701232406c37d3bdd0ddff8e54cd63d3fab28 100644 --- a/fsl/data/mesh.py +++ b/fsl/data/mesh.py @@ -27,6 +27,8 @@ import numpy as np import six +import fsl.utils.transform as transform + from . import image as fslimage @@ -34,9 +36,9 @@ log = logging.getLogger(__name__) class TriangleMesh(object): - """The ``TriangleMesh`` class represents a 3D model. A mesh is defined by - a collection of vertices and indices. The indices index into the list of - vertices, and define a set of triangles which make the model. + """The ``TriangleMesh`` class represents a 3D model. A mesh is defined by a + collection of ``N`` vertices, and ``M`` triangles. The triangles are + defined by ``(M, 3)) indices into the list of vertices. A ``TriangleMesh`` instance has the following attributes: @@ -53,6 +55,12 @@ class TriangleMesh(object): ``indices`` A :meth:`M\times 3` ``numpy`` array containing the vertex indices for :math:`M` triangles + + ``normals`` A :math:`M\times 3` ``numpy`` array containing + face normals. + + ``vnormals`` A :math:`N\times 3` ``numpy`` array containing + vertex normals. ============== ==================================================== @@ -68,16 +76,20 @@ class TriangleMesh(object): """ - def __init__(self, data, indices=None): + def __init__(self, data, indices=None, fixWinding=False): """Create a ``TriangleMesh`` instance. - :arg data: Can either be a file name, or a :math:`N\\times 3` - ``numpy`` array containing vertex data. If ``data`` is - a file name, it is passed to the - :func:`loadVTKPolydataFile` function. + :arg data: Can either be a file name, or a :math:`N\\times 3` + ``numpy`` array containing vertex data. If ``data`` + is a file name, it is passed to the + :func:`loadVTKPolydataFile` function. + + :arg indices: A list of indices into the vertex data, defining + the triangles. - :arg indices: A list of indices into the vertex data, defining - the triangles. + :arg fixWinding: Defaults to ``False``. If ``True``, the vertex + winding order of every triangle is is fixed so they + all have outward-facing normal vectors. """ if isinstance(data, six.string_types): @@ -97,12 +109,17 @@ class TriangleMesh(object): if indices is None: indices = np.arange(data.shape[0]) - self.vertices = np.array(data) - self.indices = np.array(indices).reshape((-1, 3)) + self.__vertices = np.array(data) + self.__indices = np.array(indices).reshape((-1, 3)) self.__vertexData = {} - self.__loBounds = self.vertices.min(axis=0) - self.__hiBounds = self.vertices.max(axis=0) + self.__faceNormals = None + self.__vertNormals = None + self.__loBounds = self.vertices.min(axis=0) + self.__hiBounds = self.vertices.max(axis=0) + + if fixWinding: + self.__fixWindingOrder() def __repr__(self): @@ -118,6 +135,101 @@ class TriangleMesh(object): return self.__repr__() + @property + def vertices(self): + """The ``(N, 3)`` vertices of this mesh. """ + return self.__vertices + + + @property + def indices(self): + """The ``(M, 3)`` triangles of this mesh. """ + return self.__indices + + + def __fixWindingOrder(self): + """Called by :meth:`__init__` if ``fixWinding is True``. Fixes the + mesh triangle winding order so that all face normals are facing + outwards from the centre of the mesh. + """ + + # Define a viewpoint which is + # far away from the mesh. + fnormals = self.normals + camera = self.__loBounds - (self.__hiBounds - self.__loBounds) + + # Find the nearest vertex + # to the viewpoint + dists = np.sqrt(np.sum((self.vertices - camera) ** 2, axis=1)) + ivert = np.argmin(dists) + vert = self.vertices[ivert] + + # Pick a triangle that + # this vertex in and + # ges its face normal + itri = np.where(self.indices == ivert)[0][0] + n = fnormals[itri, :] + + # Make sure the angle between the + # normal, and a vector from the + # vertex to the camera is positive + # If it isn't, flip the triangle + # winding order. + if np.dot(n, transform.normalise(camera - vert)) < 0: + self.indices[:, [1, 2]] = self.indices[:, [2, 1]] + self.__faceNormals *= -1 + + + @property + def normals(self): + """A ``(M, 3)`` array containing surface normals for every + triangle in the mesh, normalised to unit length. + """ + + if self.__faceNormals is not None: + return self.__faceNormals + + v0 = self.vertices[self.indices[:, 0]] + v1 = self.vertices[self.indices[:, 1]] + v2 = self.vertices[self.indices[:, 2]] + + n = np.cross((v1 - v0), (v2 - v0)) + + self.__faceNormals = transform.normalise(n) + + return self.__faceNormals + + + @property + def vnormals(self): + """A ``(N, 3)`` array containing normals for every vertex + in the mesh. + """ + if self.__vertNormals is not None: + return self.__vertNormals + + # per-face normals + fnormals = self.normals + vnormals = np.zeros((self.vertices.shape[0], 3), dtype=np.float) + + # TODO make fast. I can't figure + # out how to use np.add.at to + # accumulate the face normals for + # each vertex. + for i in range(self.indices.shape[0]): + + v0, v1, v2 = self.indices[i] + + vnormals[v0, :] += fnormals[i] + vnormals[v1, :] += fnormals[i] + vnormals[v2, :] += fnormals[i] + + # normalise to unit length + self.__vertNormals = transform.normalise(vnormals) + + return self.__vertNormals + + def getBounds(self): """Returns a tuple of values which define a minimal bounding box that will contain all vertices in this ``TriangleMesh`` instance. The diff --git a/fsl/utils/memoize.py b/fsl/utils/memoize.py index 8f56a1a61eb9fa3346da69a27d3da13fdccc0b5d..cbe95a4494f10934d311cd1856ec66bebeade431 100644 --- a/fsl/utils/memoize.py +++ b/fsl/utils/memoize.py @@ -10,6 +10,8 @@ a function: .. autosummary:: :nosignatures: + memoize + Memoize Instanceify memoizeMD5 skipUnchanged @@ -17,7 +19,6 @@ a function: import logging - import hashlib import functools import six @@ -25,20 +26,91 @@ import six log = logging.getLogger(__name__) -# TODO Make this a class, and add -# a "clearCache" method to it. -def memoize(func): +def memoize(func=None): """Memoize the given function by the value of the input arguments. + This function simply returns a :class:`Memoize` instance. + """ + + return Memoize(func) + + +class Memoize(object): + """Decorator which can be used to memoize a function or method. Use like + so:: + + @memoize + def myfunc(*a, **kwa): + ... + + @memoize() + def otherfunc(*a, **kwax): + ... + + A ``Memoize`` instance maintains a cache which contains ``{args : value}`` + mappings, where ``args`` are the input arguments to the function, and + ``value`` is the value that the function returned for those arguments. + When a memoized function is called with arguments that are present in the + cache, the cached values are returned, and the function itself is not + called. + + + The :meth:`invalidate` method may be used to clear the internal cache. + + Note that the arguments used for memoization must be hashable, as they are used as keys in a dictionary. """ - cache = {} - defaultKey = '_memoize_noargs_' - def wrapper(*a, **kwa): + def __init__(self, *args, **kwargs): + """Create a ``Memoize`` object. + """ + + self.__cache = {} + self.__func = None + self.__defaultKey = '_memoize_noargs_' + + self.__setFunction(*args, **kwargs) + + + def invalidate(self, *args, **kwargs): + """Clears the internal cache. If no arguments are given, the entire + cache is cleared. Otherwise, only the cached value for the provided + arguments is cleared. + """ + + if len(args) + len(kwargs) == 0: + self.__cache = {} + + else: + key = self.__makeKey(*args, **kwargs) + + try: + self.__cache.pop(key) + except KeyError: + pass + + + def __setFunction(self, *args, **kwargs): + """Used internally to set the memoized function. """ + + if self.__func is not None: + return False + + # A no-brackets style + # decorator was used + isfunc = (len(kwargs) == 0 and len(args) == 1 and callable(args[0])) + + if isfunc: + self.__func = args[0] + + return isfunc + + def __makeKey(self, *a, **kwa): + """Constructs a key for use with the cache from the given arguments. + """ key = [] if a is not None: key += list(a) @@ -48,24 +120,35 @@ def memoize(func): # any arguments specified - use the # default cache key. if len(key) == 0: - key = [defaultKey] + key = [self.__defaultKey] + + return tuple(key) - key = tuple(key) + + def __call__(self, *a, **kwa): + """Checks the cache against the given arguments. If a cached value + is present, it is returned. Otherwise the memoized function is called, + and its value is cached and returned. + """ + + if self.__setFunction(*a, **kwa): + return self + + key = self.__makeKey(*a, **kwa) try: - result = cache[key] + result = self.__cache[key] log.debug(u'Retrieved from cache[{}]: {}'.format(key, result)) except KeyError: - result = func(*a, **kwa) - cache[key] = result + result = self.__func(*a, **kwa) + self.__cache[key] = result log.debug(u'Adding to cache[{}]: {}'.format(key, result)) return result - return wrapper def memoizeMD5(func): @@ -143,8 +226,13 @@ def skipUnchanged(func): newIsArray = isinstance(value, np.ndarray) isarray = oldIsArray or newIsArray - if isarray: nochange = np.all(oldVal == value) - else: nochange = oldVal == value + if isarray: + a = np.array(oldVal, copy=False) + b = np.array(value, copy=False) + + nochange = (a.shape == b.shape) and np.allclose(a, b) + else: + nochange = oldVal == value if nochange: return False diff --git a/fsl/utils/transform.py b/fsl/utils/transform.py index d934d21ff5c675fac4f7c96a866c7a1954f9b8c6..4fe7fe8e4cfd33e97c4e4cd72db90081c5ce8582 100644 --- a/fsl/utils/transform.py +++ b/fsl/utils/transform.py @@ -11,16 +11,26 @@ spaces. The following functions are provided: :nosignatures: transform + transformNormal scaleOffsetXform invert concat compose decompose + rotMatToAffine rotMatToAxisAngles axisAnglesToRotMat axisBounds flirtMatrixToSform sformToFlirtMatrix + +And a few more functions are provided for working with vectors: + +.. autosummary:: + :nosignatures: + + veclength + normalise """ import numpy as np @@ -44,6 +54,29 @@ def concat(*xforms): return result +def veclength(vec): + """Returns the length of the given vector(s). + + Multiple vectors may be passed in, with a shape of ``(n, 3)``. + """ + vec = np.array(vec, copy=False).reshape(-1, 3) + return np.sqrt(np.einsum('ij,ij->i', vec, vec)) + + +def normalise(vec): + """Normalises the given vector(s) to unit length. + + Multiple vectors may be passed in, with a shape of ``(n, 3)``. + """ + vec = np.array(vec, copy=False).reshape(-1, 3) + n = (vec.T / veclength(vec)).T + + if n.size == 3: + n = n[0] + + return n + + def scaleOffsetXform(scales, offsets): """Creates and returns an affine transformation matrix which encodes the specified scale(s) and offset(s). @@ -60,10 +93,12 @@ def scaleOffsetXform(scales, offsets): :returns: A ``numpy.float32`` array of size :math:`4 \\times 4`. """ - if not isinstance(scales, collections.Sequence): scales = [scales] - if not isinstance(offsets, collections.Sequence): offsets = [offsets] - if not isinstance(scales, list): scales = list(scales) - if not isinstance(offsets, list): offsets = list(offsets) + oktypes = (collections.Sequence, np.ndarray) + + if not isinstance(scales, oktypes): scales = [scales] + if not isinstance(offsets, oktypes): offsets = [offsets] + if not isinstance(scales, list): scales = list(scales) + if not isinstance(offsets, list): offsets = list(offsets) lens = len(scales) leno = len(offsets) @@ -131,7 +166,7 @@ def compose(scales, offsets, rotations, origin=None): return concat(offset, postRotate, rotate, preRotate, scale) -def decompose(xform): +def decompose(xform, angles=True): """Decomposes the given transformation matrix into separate offsets, scales, and rotations, according to the algorithm described in: @@ -142,12 +177,17 @@ def decompose(xform): It is assumed that the given transform has no perspective components. Any shears in the affine are discarded. - :arg xform: A ``(4, 4)`` affine transformation matrix. + :arg xform: A ``(4, 4)`` affine transformation matrix. + + :arg angles: If ``True`` (the default), the rotations are returned + as axis-angles, in radians. Otherwise, the rotation matrix + is returned. :returns: The following: - A sequence of three scales - A sequence of three translations - - A sequence of three rotations, in radians + - A sequence of three rotations, in radians. Or, if + ``angles is False``, a rotation matrix. """ # The inline comments in the code below are taken verbatim from @@ -216,9 +256,17 @@ def decompose(xform): # Finally, we need to decompose the rotation matrix into a sequence # of rotations about the x, y, and z axes. [This is done in the # rotMatToAxisAngles function] - rx, ry, rz = rotMatToAxisAngles(R.T) + if angles: rotations = rotMatToAxisAngles(R.T) + else: rotations = R.T + + return [sx, sy, sz], translations, rotations + - return [sx, sy, sz], translations, [rx, ry, rz] +def rotMatToAffine(rotmat, origin=None): + """Convenience function which encodes the given ``(3, 3)`` rotation + matrix into a ``(4, 4)`` affine. + """ + return compose([1, 1, 1], [0, 0, 0], rotmat, origin) def rotMatToAxisAngles(rotmat): @@ -396,24 +444,29 @@ def axisBounds(shape, else: return (lo, hi) -def transform(p, xform, axes=None): +def transform(p, xform, axes=None, vector=False): """Transforms the given set of points ``p`` according to the given affine transformation ``xform``. - :arg p: A sequence or array of points of shape :math:`N \\times 3`. + :arg p: A sequence or array of points of shape :math:`N \\times 3`. + + :arg xform: A ``(4, 4)`` affine transformation matrix with which to + transform the points in ``p``. - :arg xform: An affine transformation matrix with which to transform the - points in ``p``. + :arg axes: If you are only interested in one or two axes, and the source + axes are orthogonal to the target axes (see the note below), + you may pass in a 1D, ``N*1``, or ``N*2`` array as ``p``, and + use this argument to specify which axis/axes that the data in + ``p`` correspond to. - :arg axes: If you are only interested in one or two axes, and the source - axes are orthogonal to the target axes (see the note below), - you may pass in a 1D, ``N*1``, or ``N*2`` array as ``p``, and - use this argument to specify which axis/axes that the data in - ``p`` correspond to. + :arg vector: Defaults to ``False``. If ``True``, the points are treated + as vectors - the translation component of the transformation + is not applied. If you set this flag, you pass in a ``(3, 3)`` + transformation matrix. - :returns: The points in ``p``, transformed by ``xform``, as a ``numpy`` - array with the same data type as the input. + :returns: The points in ``p``, transformed by ``xform``, as a ``numpy`` + array with the same data type as the input. .. note:: The ``axes`` argument should only be used if the source @@ -426,7 +479,10 @@ def transform(p, xform, axes=None): """ p = _fillPoints(p, axes) - t = np.dot(xform[:3, :3], p.T).T + xform[:3, 3] + t = np.dot(xform[:3, :3], p.T).T + + if not vector: + t = t + xform[:3, 3] if axes is not None: t = t[:, axes] @@ -435,6 +491,14 @@ def transform(p, xform, axes=None): else: return t +def transformNormal(p, xform, axes=None): + """Transforms the given point(s), under the assumption that they + are normal vectors. In this case, the points are transformed by + ``invert(xform[:3, :3]).T``. + """ + return transform(p, invert(xform[:3, :3]).T, axes, vector=True) + + def _fillPoints(p, axes): """Used by the :func:`transform` function. Turns the given array p into a ``N*3`` array of ``x,y,z`` coordinates. The array p may be a 1D array, diff --git a/tests/test_image.py b/tests/test_image.py index 22568f92620f97f7117d628af3217ac4e1acd6d7..1c6ebc5b513cd5607beb02af2a8c4da786f24a41 100644 --- a/tests/test_image.py +++ b/tests/test_image.py @@ -723,9 +723,9 @@ def _test_Image_changeData(imgtype): assert img.saveState assert np.all(np.isclose(img.dataRange, (dmin, dmax))) - randval = dmin + np.random.random() * drange - rx, ry, rz = randvox() - + # random value within the existing data range + randval = dmin + np.random.random() * drange + rx, ry, rz = randvox() img[rx, ry, rz] = randval assert np.isclose(img[rx, ry, rz], randval) @@ -738,23 +738,29 @@ def _test_Image_changeData(imgtype): newdmin = dmin - 100 newdmax = dmax + 100 - rx, ry, rz = randvox() - img[rx, ry, rz] = newdmin + # random value below the data range + minx, miny, minz = randvox() + img[minx, miny, minz] = newdmin assert notified.get('data', False) assert notified.get('dataRange', False) - assert np.isclose(img[rx, ry, rz], newdmin) + assert np.isclose(img[minx, miny, minz], newdmin) assert np.all(np.isclose(img.dataRange, (newdmin, dmax))) notified.pop('data') notified.pop('dataRange') - rx, ry, rz = randvox() - img[rx, ry, rz] = newdmax + # random value above the data range + maxx, maxy, maxz = randvox() + + while (maxx, maxy, maxz) == (minx, miny, minz): + maxx, maxy, maxz = randvox() + + img[maxx, maxy, maxz] = newdmax assert notified.get('data', False) assert notified.get('dataRange', False) - assert np.isclose(img[rx, ry, rz], newdmax) + assert np.isclose(img[maxx, maxy, maxz], newdmax) assert np.all(np.isclose(img.dataRange, (newdmin, newdmax))) finally: diff --git a/tests/test_memoize.py b/tests/test_memoize.py index 63ce1a267ba5ee00e3551e7ea81bf27b3c618054..9b885d0fd7d2c739fddd1d7bc15bb8eb753f5ba0 100644 --- a/tests/test_memoize.py +++ b/tests/test_memoize.py @@ -5,6 +5,7 @@ # Author: Paul McCarthy <pauldmccarthy@gmail.com> # +import collections import six import numpy as np @@ -50,6 +51,72 @@ def test_memoize(): assert timesCalled[0] == 7 +def test_memoize_create(): + + timesCalled = { + 'without_brackets' : 0, + 'with_brackets' : 0 + } + + @memoize.memoize + def without_brackets(): + timesCalled['without_brackets'] += 1 + return 5 + + @memoize.memoize() + def with_brackets(): + timesCalled['with_brackets'] += 1 + return 10 + + + for i in range(10): + assert without_brackets() == 5 + assert with_brackets() == 10 + assert timesCalled['without_brackets'] == 1 + assert timesCalled['with_brackets'] == 1 + + +def test_memoize_invalidate(): + + timesCalled = collections.defaultdict(lambda: 0) + + @memoize.memoize + def func(arg): + timesCalled[arg] += 1 + return arg * 5 + + + for i in range(5): + assert func(5) == 25 + assert func(10) == 50 + assert timesCalled[5] == 1 + assert timesCalled[10] == 1 + + func.invalidate() + + for i in range(5): + assert func(5) == 25 + assert func(10) == 50 + assert timesCalled[5] == 2 + assert timesCalled[10] == 2 + + func.invalidate(5) + for i in range(5): + assert func(5) == 25 + assert func(10) == 50 + assert timesCalled[5] == 3 + assert timesCalled[10] == 2 + + func.invalidate(10) + for i in range(5): + assert func(5) == 25 + assert func(10) == 50 + assert timesCalled[5] == 3 + assert timesCalled[10] == 3 + + + + def test_memoizeMD5(): timesCalled = [0] @@ -90,11 +157,7 @@ def test_skipUnchanged(): """ """ - timesCalled = { - 'key1' : 0, - 'key2' : 0, - 'key3' : 0, - } + timesCalled = collections.defaultdict(lambda: 0) def setter(name, value): timesCalled[name] = timesCalled[name] + 1 @@ -165,6 +228,22 @@ def test_skipUnchanged(): assert timesCalled['key2'] == 5 assert timesCalled['key3'] == 5 + # Regression - zero + # sized numpy arrays + # could previously be + # tested incorrectly + # because e.g. + # + # np.all(np.zeros((0, 3)), np.ones((1, 3)) + # + # evaluates to True + wrapped('key4', np.zeros((0, 4))) + assert timesCalled['key4'] == 1 + wrapped('key4', np.zeros((1, 4))) + assert timesCalled['key4'] == 2 + + + def test_Instanceify(): diff --git a/tests/test_transform.py b/tests/test_transform.py index 62943d51bfb8cb32a4084a825a39f5824d2c1395..609df61df5aa842718b6f1a5690c3e1f321b1129 100644 --- a/tests/test_transform.py +++ b/tests/test_transform.py @@ -8,10 +8,12 @@ from __future__ import division -import glob -import os.path as op -import itertools as it -import numpy as np +import random +import glob +import os.path as op +import itertools as it +import numpy as np +import numpy.linalg as npla import six @@ -92,6 +94,7 @@ def test_concat(): def test_scaleOffsetXform(): + # Test numerically testfile = op.join(datadir, 'test_transform_test_scaleoffsetxform.txt') lines = readlines(testfile) ntests = len(lines) // 5 @@ -108,11 +111,51 @@ def test_scaleOffsetXform(): expected = [[float(v) for v in l.split()] for l in expected] expected = np.array(expected) - result1 = transform.scaleOffsetXform( scales, offsets) - result2 = transform.scaleOffsetXform(tuple(scales), tuple(offsets)) + result = transform.scaleOffsetXform(scales, offsets) + + assert np.all(np.isclose(result, expected)) + + # Test that different input types work: + # - scalars + # - lists/tuples of length <= 3 + # - numpy arrays + a = np.array + stests = [ + (5, [5, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]), + ([5], [5, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]), + ((5,), [5, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]), + (a([5]), [5, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]), + ([5, 6], [5, 0, 0, 0, 0, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]), + ((5, 6), [5, 0, 0, 0, 0, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]), + (a([5, 6]), [5, 0, 0, 0, 0, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]), + ([5, 6, 7], [5, 0, 0, 0, 0, 6, 0, 0, 0, 0, 7, 0, 0, 0, 0, 1]), + ((5, 6, 7), [5, 0, 0, 0, 0, 6, 0, 0, 0, 0, 7, 0, 0, 0, 0, 1]), + (a([5, 6, 7]), [5, 0, 0, 0, 0, 6, 0, 0, 0, 0, 7, 0, 0, 0, 0, 1]), + ] + otests = [ + (5, [1, 0, 0, 5, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]), + ([5], [1, 0, 0, 5, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]), + ((5,), [1, 0, 0, 5, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]), + (a([5]), [1, 0, 0, 5, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]), + ([5, 6], [1, 0, 0, 5, 0, 1, 0, 6, 0, 0, 1, 0, 0, 0, 0, 1]), + ((5, 6), [1, 0, 0, 5, 0, 1, 0, 6, 0, 0, 1, 0, 0, 0, 0, 1]), + (a([5, 6]), [1, 0, 0, 5, 0, 1, 0, 6, 0, 0, 1, 0, 0, 0, 0, 1]), + ([5, 6, 7], [1, 0, 0, 5, 0, 1, 0, 6, 0, 0, 1, 7, 0, 0, 0, 1]), + ((5, 6, 7), [1, 0, 0, 5, 0, 1, 0, 6, 0, 0, 1, 7, 0, 0, 0, 1]), + (a([5, 6, 7]), [1, 0, 0, 5, 0, 1, 0, 6, 0, 0, 1, 7, 0, 0, 0, 1]), + ] + + for (scale, expected) in stests: + expected = np.array(expected).reshape(4, 4) + result = transform.scaleOffsetXform(scale, 0) + assert np.all(np.isclose(result, expected)) + for (offset, expected) in otests: + expected = np.array(expected).reshape(4, 4) + result = transform.scaleOffsetXform(1, offset) + assert np.all(np.isclose(result, expected)) + + - assert np.all(np.isclose(result1, expected)) - assert np.all(np.isclose(result2, expected)) def test_compose_and_decompose(): @@ -281,6 +324,30 @@ def test_transform(): transform.transform(badcoords[:, (1, 2, 3)], xform, axes=[1, 2]) +def test_transform_vector(seed): + + # Some transform with a + # translation component + xform = transform.compose([1, 2, 3], + [5, 10, 15], + [np.pi / 2, np.pi / 2, 0]) + + vecs = np.random.random((20, 3)) + + for v in vecs: + + vecExpected = np.dot(xform, list(v) + [0])[:3] + ptExpected = np.dot(xform, list(v) + [1])[:3] + + vecResult = transform.transform(v, xform, vector=True) + vec33Result = transform.transform(v, xform[:3, :3], vector=True) + ptResult = transform.transform(v, xform, vector=False) + + assert np.all(np.isclose(vecExpected, vecResult)) + assert np.all(np.isclose(vecExpected, vec33Result)) + assert np.all(np.isclose(ptExpected, ptResult)) + + def test_flirtMatrixToSform(): testfile = op.join(datadir, 'test_transform_test_flirtMatrixToSform.txt') @@ -329,3 +396,92 @@ def test_sformToFlirtMatrix(): assert np.all(np.isclose(result1, expected)) assert np.all(np.isclose(result2, expected)) + + + +def test_normalise(seed): + + vectors = -100 + 200 * np.random.random((200, 3)) + + def parallel(v1, v2): + v1 = v1 / transform.veclength(v1) + v2 = v2 / transform.veclength(v2) + + return np.isclose(np.dot(v1, v2), 1) + + for v in vectors: + + vtype = random.choice((list, tuple, np.array)) + v = vtype(v) + vn = transform.normalise(v) + vl = transform.veclength(vn) + + assert np.isclose(vl, 1.0) + assert parallel(v, vn) + + # normalise should also be able + # to do multiple vectors at once + results = transform.normalise(vectors) + lengths = transform.veclength(results) + pars = np.zeros(200) + for i in range(200): + + v = vectors[i] + r = results[i] + + pars[i] = parallel(v, r) + + assert np.all(np.isclose(lengths, 1)) + assert np.all(pars) + + +def test_veclength(seed): + + def l(v): + v = np.array(v, copy=False).reshape((-1, 3)) + x = v[:, 0] + y = v[:, 1] + z = v[:, 2] + l = x * x + y * y + z * z + return np.sqrt(l) + + vectors = -100 + 200 * np.random.random((200, 3)) + + for v in vectors: + + vtype = random.choice((list, tuple, np.array)) + v = vtype(v) + + assert np.isclose(transform.veclength(v), l(v)) + + # Multiple vectors in parallel + result = transform.veclength(vectors) + expected = l(vectors) + assert np.all(np.isclose(result, expected)) + + +def test_transformNormal(seed): + + normals = -100 + 200 * np.random.random((50, 3)) + + def tn(n, xform): + + xform = npla.inv(xform[:3, :3]).T + return np.dot(xform, n) + + for n in normals: + + scales = -10 + np.random.random(3) * 10 + offsets = -100 + np.random.random(3) * 200 + rotations = -np.pi + np.random.random(3) * 2 * np.pi + origin = -100 + np.random.random(3) * 200 + + xform = transform.compose(scales, + offsets, + rotations, + origin) + + expected = tn(n, xform) + result = transform.transformNormal(n, xform) + + assert np.all(np.isclose(expected, result))