diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000000000000000000000000000000000000..07f04632fc22dee40c5f68bde9406197619cb128 --- /dev/null +++ b/.gitattributes @@ -0,0 +1 @@ +*.nii.gz filter=lfs diff=lfs merge=lfs -text diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..49b91c24df1b8bd816439c94ea843303ad7e8e17 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +.vscode +pyfeeds-out \ No newline at end of file diff --git a/Makefile b/Makefile index 29eaa0c1d36bf6141761bf04bbe4116e2c6fc5d1..b1692d313dd4dbd241b331731770b2a3017de08b 100644 --- a/Makefile +++ b/Makefile @@ -5,7 +5,7 @@ PROJNAME = avwutils LIBS = -lfsl-newimage -lfsl-miscmaths -lfsl-cprob -lfsl-NewNifti -lfsl-utils -lfsl-znz XFILES = fslorient fslstats fslmerge \ - fslpspec fslroi fslnvols fsl2ascii fslascii2img \ + fslpspec fslroi fslnvols fsl2ascii fslascii2img \ fslsplit fslcc fslinterleave \ fslhd fslcpgeom fslcreatehd fslmaths \ fslcomplex fslfft fslmeants fslslice fslswapdim_exe \ diff --git a/fslstats.cc b/fslstats.cc index 03514a997d10e93b5724c541dbe231a5aa0efbac..b29d9f934a274dd6ea98fb081eb499897d1c0c4b 100644 --- a/fslstats.cc +++ b/fslstats.cc @@ -8,23 +8,31 @@ #include <limits> #include <string> +#include <typeinfo> #include "utils/fsl_isfinite.h" #include "armawrap/newmat.h" #include "miscmaths/miscmaths.h" #include "newimage/newimageall.h" #include "newimage/costfns.h" - +#include "nlohmann/json.hpp" using namespace std; using namespace NEWMAT; using namespace NEWIMAGE; +using json = nlohmann::json; -int print_usage(const string& progname) { - cout << "Usage: fslstats [preoptions] <input> [options]" << endl << endl; +int print_usage(const string &progname) +{ + cout << "Usage: fslstats [preoptions] <input> [options]" << endl + << endl; + // add --json output format option + // keys will correspond to the options above + cout << "preoption -json: output in JSON format to standard out" << endl; cout << "preoption -t will give a separate output line for each 3D volume of a 4D timeseries" << endl; cout << "preoption -K < indexMask > will generate seperate n submasks from indexMask, for indexvalues 1..n where n is the maximum index value in indexMask, and generate statistics for each submask" << endl; - cout << "Note - options are applied in order, e.g. -M -l 10 -M will report the non-zero mean, apply a threshold and then report the new nonzero mean" << endl << endl; + cout << "Note - options are applied in order, e.g. -M -l 10 -M will report the non-zero mean, apply a threshold and then report the new nonzero mean" << endl + << endl; cout << "-l <lthresh> : set lower threshold" << endl; cout << "-u <uthresh> : set upper threshold" << endl; cout << "-r : output <robust min intensity> <robust max intensity>" << endl; @@ -44,12 +52,12 @@ int print_usage(const string& progname) { cout << "-C : output centre-of-gravity (cog) in voxel coordinates" << endl; cout << "-p <n> : output nth percentile (n between 0 and 100)" << endl; cout << "-P <n> : output nth percentile (for nonzero voxels)" << endl; - cout << "-a : use absolute values of all image intensities"<< endl; + cout << "-a : use absolute values of all image intensities" << endl; cout << "-n : treat NaN or Inf as zero for subsequent stats" << endl; cout << "-k <mask> : use the specified image (filename) for masking - overrides lower and upper thresholds" << endl; cout << "-d <image> : take the difference between the base image and the image specified here" << endl; cout << "-h <nbins> : output a histogram (for the thresholded/masked voxels only) with nbins" << endl; - cout << "-H <nbins> <min> <max> : output a histogram (for the thresholded/masked voxels only) with nbins and histogram limits of min and max" << endl << endl; + cout << "-H <nbins> <min> <max> : output a histogram (for the thresholded/masked voxels only) with nbins and histogram limits of min and max" << endl; cout << "Note - thresholds are not inclusive ie lthresh<allowed<uthresh" << endl; return 1; } @@ -57,461 +65,838 @@ int print_usage(const string& progname) { // Some specialised nonzero functions just for speedup // (it avoids generating masks when not absolutely necessary) -long nonzerocount(const volume4D<float>& vol) +long nonzerocount(const volume4D<float> &vol) { - long totn=0; - for(volume4D<float>::fast_const_iterator it=vol.fbegin(), end=vol.fend(); it<end; it++ ) - if ( (*it) != 0 ) + long totn = 0; + for (volume4D<float>::fast_const_iterator it = vol.fbegin(), end = vol.fend(); it < end; it++) + if ((*it) != 0) totn++; return totn; } -double nonzeromean(const volume4D<float>& vol) +double nonzeromean(const volume4D<float> &vol) { - double total=0.0; - long int totn=0; - for(volume4D<float>::fast_const_iterator it=vol.fbegin(), end=vol.fend(); it<end; it++ ) - if ( (*it) != 0 ) { - total+=(double)(*it); + double total = 0.0; + long int totn = 0; + for (volume4D<float>::fast_const_iterator it = vol.fbegin(), end = vol.fend(); it < end; it++) + if ((*it) != 0) + { + total += (double)(*it); totn++; } - if (totn>0) - total/=totn; + if (totn > 0) + total /= totn; return total; } -double nonzerostddev(const volume4D<float>& vol) +double nonzerostddev(const volume4D<float> &vol) { - double totv=0.0, totvv=0.0; - long int totn=0; - for(volume4D<float>::fast_const_iterator it=vol.fbegin(), end=vol.fend(); it<end; it++ ) - if ( (*it) != 0 ) { - totvv+=(double)((*it)*(*it)); - totv+=(double)(*it); + double totv = 0.0, totvv = 0.0; + long int totn = 0; + for (volume4D<float>::fast_const_iterator it = vol.fbegin(), end = vol.fend(); it < end; it++) + if ((*it) != 0) + { + totvv += (double)((*it) * (*it)); + totv += (double)(*it); totn++; } - double var=0.0; - if (totn>1) { - double meanval = totv/totn; - var = (totvv - totn*meanval*meanval)/(totn-1); + double var = 0.0; + if (totn > 1) + { + double meanval = totv / totn; + var = (totvv - totn * meanval * meanval) / (totn - 1); } return std::sqrt(var); } -template<class M> +template <class M> int generateNonZeroMask(const M &mask, volume4D<float> &masknz, const volume4D<float> &input) { - masknz = binarise(input,0.0f,0.0f,inclusive,true)*mask; + masknz = binarise(input, 0.0f, 0.0f, inclusive, true) * mask; return 0; } -int generate_masks(volume4D<float>& mask, volume4D<float>& masknz, const volume4D<float>& input, const float& lthr, const float& uthr) +int generate_masks(volume4D<float> &mask, volume4D<float> &masknz, const volume4D<float> &input, const float <hr, const float &uthr) +{ + mask = binarise(input, lthr, uthr, exclusive); + return generateNonZeroMask(mask, masknz, input); +} + +// print function that can handle outputting in JSON format or not. Use a template to allow for different types +template <typename T> +void update_json(string key, T &value, json &j) +{ + j[key] = value; +} + +template <typename T> +void update_json(string key, initializer_list<T> &value, json &j) +{ + j[key] = value; +} + +template <typename T> +void update_json(string key, vector<T> &value, json &j) { - mask = binarise(input,lthr,uthr,exclusive); - return generateNonZeroMask(mask,masknz,input); + j[key] = value; } -int fmrib_main_float(int argc, char* argv[],const bool timeseriesMode, const string& indexMaskName) +void update_json(string key, ColumnVector &value, json &j) { + j[key] = value; +} + +int fmrib_main_float(int argc, char *argv[], const bool timeseriesMode, const string &indexMaskName, const bool jsonMode = false) +{ + + json js; + cout.setf(ios::dec); cout.setf(ios::fixed, ios::floatfield); cout.setf(ios::left, ios::adjustfield); cout.precision(6); volume<float> vol, inputMaster; volume<int> indexMask; - if ( timeseriesMode || indexMaskName != "" ) - read_volume4D(inputMaster,argv[1]); + if (timeseriesMode || indexMaskName != "") + read_volume4D(inputMaster, argv[1]); else - read_volume4D(vol,argv[1]); - volume<float> & indexMaster = (timeseriesMode ) ? vol : inputMaster; - if ( indexMaskName != "" ) - read_volume4D(indexMask,indexMaskName); + read_volume4D(vol, argv[1]); + volume<float> &indexMaster = (timeseriesMode) ? vol : inputMaster; + if (indexMaskName != "") + read_volume4D(indexMask, indexMaskName); int nTimepoints((timeseriesMode) ? inputMaster.tsize() : 1), nIndices((indexMaskName != "") ? indexMask.max() : 1); - for ( int timepoint=0; timepoint < nTimepoints ; timepoint++ ) { - for ( int index=1; index <= nIndices; index++ ) { - if ( timeseriesMode ) - vol=inputMaster[timepoint]; - volume<float> mask, masknz; - float lthr(-numeric_limits<float>::max()); - float uthr(numeric_limits<float>::max()); - if ( indexMask.nvoxels() ) { - if ( indexMask.dimensionality() > 3 ) { - cerr << "Index mask must be 3D" << endl; - return 3; - } - copyconvert(indexMask,mask); - mask.binarise(index-1,index+1,exclusive); - vol=indexMaster*mask; - if (mask.max() < 1) { - cout << "missing label: " << index << " in mask" << endl; - continue; - } - generateNonZeroMask(mask,masknz,vol); - } - int narg(2); - while (narg<argc) { - string sarg(argv[narg]); - if (sarg=="-n") { - for (int t=0; t<vol.tsize(); t++) - for (int z=0; z<vol.zsize(); z++) - for (int y=0; y<vol.ysize(); y++) - for (int x=0; x<vol.xsize(); x++) - if (!isfinite((double)vol(x,y,z,t))) - vol(x,y,z,t)=0; - } else if (sarg=="-m") { - if (mask.nvoxels()>0) cout << vol.mean(mask) << " "; - else cout << vol.mean() << " "; - } else if (sarg=="-M") { - if (masknz.nvoxels()>0) cout << vol.mean(masknz) << " "; - else { - double nzmean=0; - nzmean = nonzeromean(vol); - cout << nzmean << " "; - } - } else if (sarg=="-X") { - ColumnVector coord(4); - vector<int64_t> iCoords; - vol.min(mask,iCoords); - coord << (Real)iCoords[7] << (Real)iCoords[8] << (Real)iCoords[9] << 1.0; - coord = vol.niftivox2newimagevox_mat().i() * coord; - cout << MISCMATHS::round(coord(1)) << " " << - MISCMATHS::round(coord(2)) << " " << MISCMATHS::round(coord(3)) << " "; - } else if (sarg=="-x") { - ColumnVector coord(4); - vector<int64_t> iCoords; - vol.min(mask,iCoords); - coord << (Real)iCoords[0] << (Real)iCoords[1] << (Real)iCoords[2] << 1.0; - coord = vol.niftivox2newimagevox_mat().i() * coord; - cout << MISCMATHS::round(coord(1)) << " " << - MISCMATHS::round(coord(2)) << " " << MISCMATHS::round(coord(3)) << " "; - } else if (sarg=="-w") { - if (masknz.nvoxels()<1) { //Need to generate non-zeromask - generate_masks(mask,masknz,vol,lthr,uthr); - vol*=mask; - } - int xmin=masknz.xsize()-1,xmax(0),ymin=masknz.ysize()-1,ymax(0),zmin=masknz.zsize()-1,zmax(0),tmin=masknz.tsize()-1,tmax(0); + // initialise the volumes array in the json object. This will get appended to later + js["volumes"] = json::array(); - for(int t=0;t<masknz.tsize();t++) - for(int z=0;z<masknz.zsize();z++) - for(int y=0;y<masknz.ysize();y++) - for(int x=0;x<masknz.xsize();x++) - if (masknz(x,y,z,t)>0.5) { - if (x<xmin) xmin=x; - if (x>xmax) xmax=x; - if (y<ymin) ymin=y; - if (y>ymax) ymax=y; - if (z<zmin) zmin=z; - if (z>zmax) zmax=z; - if (t<tmin) tmin=t; - if (t>tmax) tmax=t; - } - // change voxel coords from newimage to nifti convention for output - ColumnVector v(4); - v << xmin << ymin << zmin << 1.0; - v = masknz.niftivox2newimagevox_mat().i() * v; - xmin = MISCMATHS::round(v(1)); - ymin = MISCMATHS::round(v(2)); - zmin = MISCMATHS::round(v(3)); - v << xmax << ymax << zmax << 1.0; - v = masknz.niftivox2newimagevox_mat().i() * v; - xmax = MISCMATHS::round(v(1)); - ymax = MISCMATHS::round(v(2)); - zmax = MISCMATHS::round(v(3)); - if (xmin>xmax) { int tmp=xmax; xmax=xmin; xmin=tmp; } - if (ymin>ymax) { int tmp=ymax; ymax=ymin; ymin=tmp; } - if (zmin>zmax) { int tmp=zmax; zmax=zmin; zmin=tmp; } - // now output nifti coords - cout << xmin << " " << 1+xmax-xmin << " " << ymin << " " << 1+ymax-ymin << " " << zmin << " " << 1+zmax-zmin << " " << tmin << " " << 1+tmax-tmin << " "; - } else if (sarg=="-e") { - if (mask.nvoxels()<1) { - generate_masks(mask,masknz,vol,lthr,uthr); - vol*=mask; - } - ColumnVector hist; - int nbins=1000; - double entropy=0; - hist = vol.histogram(nbins,mask); - double ntot = hist.Sum(); - for (int j=1; j<=nbins; j++) { - if (hist(j)>0) { - entropy -= (hist(j)/ntot) * log(hist(j)/ntot); - } - } - entropy /= log((double) nbins); - cout << entropy << " "; - } else if (sarg=="-E") { - ColumnVector hist; - int nbins=1000; - double entropy=0; - if (mask.nvoxels()<1) { - generate_masks(mask,masknz,vol,lthr,uthr); - vol*=mask; - } - hist = vol.histogram(nbins,masknz); - double ntot = hist.Sum(); - for (int j=1; j<=nbins; j++) { - if (hist(j)>0) { - entropy -= (hist(j)/ntot) * log(hist(j)/ntot); - } - } - entropy /= log((double) nbins); - cout << entropy << " "; - } else if (sarg=="-k") { - narg++; - volume<float> mask2; - if (narg>=argc) { - cerr << "Must specify an argument to -k" << endl; - exit(2); - } - read_volume4D(mask,argv[narg]); - if ( !samesize(mask,vol,3) || mask.tsize() > vol.tsize() ) { - cerr << "Mask and image must be the same size" << endl; - exit(3); - } - if (timeseriesMode && mask.tsize() != 1 ) { mask2=mask[timepoint]; } - if ( mask.tsize() != vol.tsize() && mask.tsize() != 1) - while (mask.tsize() < vol.tsize() ) { // copy the last max volume until the correct size is reached - mask.addvolume(mask[mask.tsize()-1]); + for (int timepoint = 0; timepoint < nTimepoints; timepoint++) + { + json tp = json::object(); + tp["indices"] = json::array(); + for (int index = 1; index <= nIndices; index++) + { + json idx = json::object(); + if (timeseriesMode) + vol = inputMaster[timepoint]; + volume<float> mask, masknz; + float lthr(-numeric_limits<float>::max()); + float uthr(numeric_limits<float>::max()); + if (indexMask.nvoxels()) + { + if (indexMask.dimensionality() > 3) + { + cerr << "Index mask must be 3D" << endl; + return 3; } - mask.binarise(0.5); - if (mask.tsize()!=1) { - generateNonZeroMask(mask,masknz,vol); - vol*=mask; - } - else { - generateNonZeroMask(mask[0],masknz,vol); - vol*=mask[0]; - } - } else if (sarg=="-d") { - narg++; - if (narg>=argc) { - cerr << "Must specify an argument to -d" << endl; - exit(2); - } - volume4D<float> image2,image3; - read_volume4D(image2,argv[narg]); - if (!samesize(image2[0],vol[0])) { - cerr << "Image used with -d must be the same size as the base image" << endl; - exit(3); - } - if (timeseriesMode) { image3=image2[timepoint]; } - if ( image2.tsize() > vol.tsize() ) { - cerr << "Image must be the same size as the base image" << endl; - exit(3); - } - if ( image2.tsize() != vol.tsize() && image2.tsize() != 1) { - // copy the last max volume until the correct size is reached - while (image2.tsize() < vol.tsize() ) { - image2.addvolume(image2[image2.maxt()]); - } - } - // now substract the new image from the base image - vol -= image2; - } else if (sarg=="-l") { - narg++; - if (narg<argc) { - lthr = atof(argv[narg]); - } else { - cerr << "Must specify an argument to -l" << endl; - exit(2); - } - generate_masks(mask,masknz,vol,lthr,uthr); - vol*=mask; - } else if (sarg=="-u") { - narg++; - if (narg<argc) { - uthr = atof(argv[narg]); - } else { - cerr << "Must specify an argument to -u" << endl; - exit(2); - } - generate_masks(mask,masknz,vol,lthr,uthr); - vol*=mask; - } else if (sarg=="-a") { - vol = abs(vol); - } else if (sarg=="-v") { - if (mask.nvoxels()>0) { - long int nvox = mask.sum(); - if (mask.tsize() == 1) nvox = nvox * vol.tsize(); - cout << (long int) nvox << " " - << nvox * vol.xdim() * vol.ydim() * vol.zdim() << " "; - } else { - cout << (long int) vol.nvoxels() * vol.tsize() << " " - << vol.nvoxels() * vol.tsize() * vol.xdim() * vol.ydim() * vol.zdim() << " "; - } - } else if (sarg=="-V") { - if (masknz.nvoxels()>0) { - cout << (long int) masknz.sum() << " " - << masknz.sum() * vol.xdim() * vol.ydim() * vol.zdim() << " "; - } else { - long int nzvox; - nzvox = nonzerocount(vol); - cout << nzvox << " " - << nzvox * vol.xdim() * vol.ydim() * vol.zdim() << " "; - } - } else if (sarg=="-D") { - // hidden debug option! - cout << vol.sum() << " "; - } else if (sarg=="-s") { - if (mask.nvoxels()>0) cout << vol.stddev(mask) << " "; - else cout << vol.stddev() << " "; - } else if (sarg=="-S") { - if (masknz.nvoxels()>0) { - cout << vol.stddev(masknz) << " "; - } else { - cout << nonzerostddev(vol) << " "; - } - } else if (sarg=="-r") { - vector<float> limits(vol.robustlimits(mask)); - cout << limits[0] << " " << limits[1] << " "; - } else if (sarg=="-R") { - cout << vol.min(mask) << " " << vol.max(mask) << " "; - } else if (sarg=="-c") { - ColumnVector cog(4); - // convert from fsl mm to voxel to nifti sform coord - cog.SubMatrix(1,3,1,1) = vol.cog(); - cog(4) = 1.0; - cog = vol.newimagevox2mm_mat() * cog; - cout << cog(1) << " " << cog(2) << " " << cog(3) << " " ; - } else if (sarg=="-C") { - ColumnVector cog(4); - // convert from fsl mm to fsl voxel coord to nifti voxel coord - cog.SubMatrix(1,3,1,1) = vol.cog(); - cog(4) = 1.0; - cog = vol.niftivox2newimagevox_mat().i() * cog; - cout << cog(1) << " " << cog(2) << " " << cog(3) << " " ; - } else if (sarg=="-p") { - float n; - narg++; - if (narg<argc) { - n = atof(argv[narg]); - } else { - cerr << "Must specify an argument to -p" << endl; - exit(2); - } - if ( (n<0) || (n>100) ) { - cerr << "Percentile must be between 0 and 100" << endl; - exit(1); - } - if (mask.nvoxels()>0) cout << vol.percentile((float) n/100.0, mask) << " "; - else cout << vol.percentile((float) n/100.0) << " "; - } else if (sarg=="-P") { - float n; - narg++; - if (narg<argc) { - n = atof(argv[narg]); - } else { - cerr << "Must specify an argument to -P" << endl; - exit(2); - } - if ( (n<0) || (n>100) ) { - cerr << "Percentile must be between 0 and 100" << endl; - exit(1); - } - if (mask.nvoxels()<1) { - generate_masks(mask,masknz,vol,lthr,uthr); - vol*=mask; - } - cout << vol.percentile((float) n/100.0,masknz) << " "; - } else if (sarg=="-h") { - float n; - narg++; - if (narg<argc) { - n = atof(argv[narg]); - } else { - cerr << "Must specify the number of bins" << endl; - exit(2); - } - int nbins = (int) n; - if (nbins<1) { - cerr << "Must specify at least 1 bin" << endl; - exit(1); - } - if (mask.nvoxels()>0) { - cout << vol.histogram(nbins,vol.min(),vol.max(),mask) << " "; - } else { - cout << vol.histogram(nbins,vol.min(),vol.max()) << " "; - } - } else if (sarg=="-H") { - float n; - narg++; - if (narg<argc) { - n = atof(argv[narg]); - } else { - cerr << "Must specify the number of bins" << endl; - exit(2); - } - int nbins = (int) n; - if (nbins<1) { - cerr << "Must specify at least 1 bin" << endl; - exit(1); - } - float min=0; - narg++; - if (narg<argc) { - min = atof(argv[narg]); - } else { - cerr << "Must specify the histogram minimum intensity" << endl; - exit(2); - } - float max=0; - narg++; - if (narg<argc) { - max = atof(argv[narg]); - } else { - cerr << "Must specify the histogram maximum intensity" << endl; - exit(2); - } - if (mask.nvoxels()>0) { - cout << vol.histogram(nbins,min,max,mask) << " "; - } else { - cout << vol.histogram(nbins,min,max) << " "; + copyconvert(indexMask, mask); + mask.binarise(index - 1, index + 1, exclusive); + vol = indexMaster * mask; + if (mask.max() < 1) + { + cout << "missing label: " << index << " in mask" << endl; + continue; + } + generateNonZeroMask(mask, masknz, vol); } - } else { - cerr << "Unrecognised option: " << sarg << endl; - exit(3); - } + int narg(2); + while (narg < argc) + { - narg++; - } - cout << endl; - } + string sarg(argv[narg]); + if (sarg == "-n") + { + for (int t = 0; t < vol.tsize(); t++) + for (int z = 0; z < vol.zsize(); z++) + for (int y = 0; y < vol.ysize(); y++) + for (int x = 0; x < vol.xsize(); x++) + if (!isfinite((double)vol(x, y, z, t))) + vol(x, y, z, t) = 0; + } + else if (sarg == "-m") + { + if (mask.nvoxels() > 0){ + auto val = vol.mean(mask); + if (jsonMode){ + update_json(sarg, val, idx); + } else { + cout << val << " "; + } + } else { + // print_value(sarg, val, json, jsonMode); + auto val = vol.mean(); + if (jsonMode){ + update_json(sarg, val, idx); + } else { + cout << val << " "; + } + } + } + else if (sarg == "-M") + { + if (masknz.nvoxels() > 0){ + auto val = vol.mean(masknz); + if (jsonMode){ + update_json(sarg, val, idx); + } else { + cout << val << " "; + } + } + else + { + double nzmean = 0; + nzmean = nonzeromean(vol); + if (jsonMode){ + update_json(sarg, nzmean, idx); + } else { + cout << nzmean << " "; + } + } + } + else if (sarg == "-X") + { + ColumnVector coord(4); + vector<int64_t> iCoords; + vol.min(mask, iCoords); + coord << (Real)iCoords[7] << (Real)iCoords[8] << (Real)iCoords[9] << 1.0; + coord = vol.niftivox2newimagevox_mat().i() * coord; + //cout << MISCMATHS::round(coord(1)) << " " << MISCMATHS::round(coord(2)) << " " << MISCMATHS::round(coord(3)) << " "; + auto coord1 = MISCMATHS::round(coord(1)); + auto coord2 = MISCMATHS::round(coord(2)); + auto coord3 = MISCMATHS::round(coord(3)); + auto coordList = {coord1, coord2, coord3}; + if (jsonMode){ + update_json(sarg, coordList, idx); + } else { + cout << coord1 << " " << coord2 << " " << coord3 << " "; + } + + } + else if (sarg == "-x") + { + ColumnVector coord(4); + vector<int64_t> iCoords; + vol.min(mask, iCoords); + coord << (Real)iCoords[0] << (Real)iCoords[1] << (Real)iCoords[2] << 1.0; + coord = vol.niftivox2newimagevox_mat().i() * coord; + //cout << MISCMATHS::round(coord(1)) << " " << MISCMATHS::round(coord(2)) << " " << MISCMATHS::round(coord(3)) << " "; + auto coord1 = MISCMATHS::round(coord(1)); + auto coord2 = MISCMATHS::round(coord(2)); + auto coord3 = MISCMATHS::round(coord(3)); + auto coordList = {coord1, coord2, coord3}; + if (jsonMode){ + update_json(sarg, coordList, idx); + } else { + cout << coord1 << " " << coord2 << " " << coord3 << " "; + } + } + else if (sarg == "-w") + { + if (masknz.nvoxels() < 1) + { // Need to generate non-zeromask + generate_masks(mask, masknz, vol, lthr, uthr); + vol *= mask; + } + int xmin = masknz.xsize() - 1, xmax(0), ymin = masknz.ysize() - 1, ymax(0), zmin = masknz.zsize() - 1, zmax(0), tmin = masknz.tsize() - 1, tmax(0); - } + for (int t = 0; t < masknz.tsize(); t++) + for (int z = 0; z < masknz.zsize(); z++) + for (int y = 0; y < masknz.ysize(); y++) + for (int x = 0; x < masknz.xsize(); x++) + if (masknz(x, y, z, t) > 0.5) + { + if (x < xmin) + xmin = x; + if (x > xmax) + xmax = x; + if (y < ymin) + ymin = y; + if (y > ymax) + ymax = y; + if (z < zmin) + zmin = z; + if (z > zmax) + zmax = z; + if (t < tmin) + tmin = t; + if (t > tmax) + tmax = t; + } + // change voxel coords from newimage to nifti convention for output + ColumnVector v(4); + v << xmin << ymin << zmin << 1.0; + v = masknz.niftivox2newimagevox_mat().i() * v; + xmin = MISCMATHS::round(v(1)); + ymin = MISCMATHS::round(v(2)); + zmin = MISCMATHS::round(v(3)); + v << xmax << ymax << zmax << 1.0; + v = masknz.niftivox2newimagevox_mat().i() * v; + xmax = MISCMATHS::round(v(1)); + ymax = MISCMATHS::round(v(2)); + zmax = MISCMATHS::round(v(3)); + if (xmin > xmax) + { + int tmp = xmax; + xmax = xmin; + xmin = tmp; + } + if (ymin > ymax) + { + int tmp = ymax; + ymax = ymin; + ymin = tmp; + } + if (zmin > zmax) + { + int tmp = zmax; + zmax = zmin; + zmin = tmp; + } + vector<int> coords = {xmin, 1 + xmax - xmin, ymin, 1 + ymax - ymin, zmin, 1 + zmax - zmin, tmin, 1 + tmax - tmin}; + if (jsonMode){ + update_json(sarg, coords, idx); + } else { + cout << coords[0] << " " << coords[1] << " " << coords[2] << " " << coords[3] << " " << coords[4] << " " << coords[5] << " " << coords[6] << " " << coords[7] << " "; + } + } + else if (sarg == "-e") + { + if (mask.nvoxels() < 1) + { + generate_masks(mask, masknz, vol, lthr, uthr); + vol *= mask; + } + ColumnVector hist; + int nbins = 1000; + double entropy = 0; + hist = vol.histogram(nbins, mask); + double ntot = hist.Sum(); + for (int j = 1; j <= nbins; j++) + { + if (hist(j) > 0) + { + entropy -= (hist(j) / ntot) * log(hist(j) / ntot); + } + } + entropy /= log((double)nbins); + if (jsonMode){ + update_json(sarg, entropy, idx); + } else { + cout << entropy << " "; + } + } + else if (sarg == "-E") + { + ColumnVector hist; + int nbins = 1000; + double entropy = 0; + if (mask.nvoxels() < 1) + { + generate_masks(mask, masknz, vol, lthr, uthr); + vol *= mask; + } + hist = vol.histogram(nbins, masknz); + double ntot = hist.Sum(); + for (int j = 1; j <= nbins; j++) + { + if (hist(j) > 0) + { + entropy -= (hist(j) / ntot) * log(hist(j) / ntot); + } + } + entropy /= log((double)nbins); + if (jsonMode){ + update_json(sarg, entropy, idx); + } else { + cout << entropy << " "; + } + } + else if (sarg == "-k") + { + narg++; + volume<float> mask2; + if (narg >= argc) + { + cerr << "Must specify an argument to -k" << endl; + exit(2); + } + read_volume4D(mask, argv[narg]); + if (!samesize(mask, vol, 3) || mask.tsize() > vol.tsize()) + { + cerr << "Mask and image must be the same size" << endl; + exit(3); + } + if (timeseriesMode && mask.tsize() != 1) + { + mask2 = mask[timepoint]; + } + if (mask.tsize() != vol.tsize() && mask.tsize() != 1) + while (mask.tsize() < vol.tsize()) + { // copy the last max volume until the correct size is reached + mask.addvolume(mask[mask.tsize() - 1]); + } + mask.binarise(0.5); + if (mask.tsize() != 1) + { + generateNonZeroMask(mask, masknz, vol); + vol *= mask; + } + else + { + generateNonZeroMask(mask[0], masknz, vol); + vol *= mask[0]; + } + } + else if (sarg == "-d") + { + narg++; + if (narg >= argc) + { + cerr << "Must specify an argument to -d" << endl; + exit(2); + } + volume4D<float> image2, image3; + read_volume4D(image2, argv[narg]); + if (!samesize(image2[0], vol[0])) + { + cerr << "Image used with -d must be the same size as the base image" << endl; + exit(3); + } + if (timeseriesMode) + { + image3 = image2[timepoint]; + } + if (image2.tsize() > vol.tsize()) + { + cerr << "Image must be the same size as the base image" << endl; + exit(3); + } + if (image2.tsize() != vol.tsize() && image2.tsize() != 1) + { + // copy the last max volume until the correct size is reached + while (image2.tsize() < vol.tsize()) + { + image2.addvolume(image2[image2.maxt()]); + } + } + // now substract the new image from the base image + vol -= image2; + } + else if (sarg == "-l") + { + narg++; + if (narg < argc) + { + lthr = atof(argv[narg]); + } + else + { + cerr << "Must specify an argument to -l" << endl; + exit(2); + } + generate_masks(mask, masknz, vol, lthr, uthr); + vol *= mask; + } + else if (sarg == "-u") + { + narg++; + if (narg < argc) + { + uthr = atof(argv[narg]); + } + else + { + cerr << "Must specify an argument to -u" << endl; + exit(2); + } + generate_masks(mask, masknz, vol, lthr, uthr); + vol *= mask; + } + else if (sarg == "-a") + { + vol = abs(vol); + } + else if (sarg == "-v") + { + if (mask.nvoxels() > 0) + { + long int nvox = mask.sum(); + if (mask.tsize() == 1) + nvox = nvox * vol.tsize(); + + auto volume_v = nvox * vol.xdim() * vol.ydim() * vol.zdim(); + // promote nvox to float to maintain types in list + auto nvox_volume = {static_cast<float>(nvox), volume_v}; + if (jsonMode){ + update_json(sarg, nvox_volume, idx); + } else { + cout << nvox << " " << volume_v << " "; + } + } + else + { + long int nvox = vol.nvoxels() * vol.tsize(); + auto volume_v = nvox * vol.xdim() * vol.ydim() * vol.zdim(); + // promote nvox to float to maintain types in list + auto nvox_volume = {static_cast<float>(nvox), volume_v}; + if (jsonMode){ + update_json(sarg, nvox_volume, idx); + } else { + cout << nvox << " " << volume_v << " "; + } + } + } + else if (sarg == "-V") + { + if (masknz.nvoxels() > 0) + { + long int nvox = masknz.sum(); + auto volume_V = nvox * vol.xdim() * vol.ydim() * vol.zdim(); + // promote nvox to float to maintain types in list + auto nvox_volume = {static_cast<float>(nvox), volume_V}; + if (jsonMode){ + update_json(sarg, nvox_volume, idx); + } else { + cout << nvox << " " << volume_V << " "; + } + } + else + { + long int nzvox; + nzvox = nonzerocount(vol); + auto volume_V = nzvox * vol.xdim() * vol.ydim() * vol.zdim(); + // promate nzvox to float to maintain types in list + auto nvox_volume = {static_cast<float>(nzvox), volume_V}; + if (jsonMode){ + update_json(sarg, nvox_volume, idx); + } else { + cout << nzvox << " " << volume_V << " "; + } + } + } + else if (sarg == "-D") + { + // hidden debug option! + auto sum = vol.sum(); + if (jsonMode){ + update_json(sarg, sum, idx); + } else { + cout << sum << " "; + } + } + else if (sarg == "-s") + { + if (mask.nvoxels() > 0){ + auto stddev = vol.stddev(mask); + if (jsonMode){ + update_json(sarg, stddev, idx); + } else { + cout << stddev << " "; + } + } + else { + auto stddev = vol.stddev(); + if (jsonMode){ + update_json(sarg, stddev, idx); + } else { + cout << stddev << " "; + } + } + } + else if (sarg == "-S") + { + if (masknz.nvoxels() > 0) + { + auto stddev = vol.stddev(masknz); + if (jsonMode){ + update_json(sarg, stddev, idx); + } else { + cout << stddev << " "; + } + } + else + { + auto stddev = nonzerostddev(vol); + if (jsonMode){ + update_json(sarg, stddev, idx); + } else { + cout << stddev << " "; + } + } + } + else if (sarg == "-r") + { + vector<float> limits(vol.robustlimits(mask)); + if (jsonMode){ + update_json(sarg, limits, idx); + } else { + cout << limits[0] << " " << limits[1] << " "; + } + } + else if (sarg == "-R") + { + auto min_R = vol.min(mask); + auto max_R = vol.max(mask); + auto minmax_R = {min_R, max_R}; + if (jsonMode){ + update_json(sarg, minmax_R, idx); + } else { + cout << min_R << " " << max_R << " "; + } + } + else if (sarg == "-c") + { + ColumnVector cog(4); + cog.SubMatrix(1, 3, 1, 1) = vol.cog(); + cog(4) = 1.0; + cog = vol.newimagevox2mm_mat() * cog; + // remove the last element + cog = cog.Rows(1, 3); + // reshape to a row vector + cog = cog.t(); + if (jsonMode){ + update_json(sarg, cog, idx); + } else { + cout << cog(1) << " " << cog(2) << " " << cog(3) << " "; + } + } + else if (sarg == "-C") + { + ColumnVector cog(4); + // convert from fsl mm to fsl voxel coord to nifti voxel coord + cog.SubMatrix(1, 3, 1, 1) = vol.cog(); + cog(4) = 1.0; + cog = vol.niftivox2newimagevox_mat().i() * cog; + // remove the last element + cog = cog.Rows(1, 3); + // reshape to a row vector + cog = cog.t(); + if (jsonMode){ + update_json(sarg, cog, idx); + } else { + cout << cog(1) << " " << cog(2) << " " << cog(3) << " "; + } + } + else if (sarg == "-p") + { + float n; + narg++; + if (narg < argc) + { + n = atof(argv[narg]); + } + else + { + cerr << "Must specify an argument to -p" << endl; + exit(2); + } + if ((n < 0) || (n > 100)) + { + cerr << "Percentile must be between 0 and 100" << endl; + exit(1); + } + if (mask.nvoxels() > 0){ + auto percentile_p = vol.percentile((float)n / 100.0, mask); + if (jsonMode){ + update_json(sarg, percentile_p, idx); + } else { + cout << percentile_p << " "; + } + } + else { + auto percentile_p = vol.percentile((float)n / 100.0); + if (jsonMode){ + update_json(sarg, percentile_p, idx); + } else { + cout << percentile_p << " "; + } + } + } + else if (sarg == "-P") + { + float n; + narg++; + if (narg < argc) + { + n = atof(argv[narg]); + } + else + { + cerr << "Must specify an argument to -P" << endl; + exit(2); + } + if ((n < 0) || (n > 100)) + { + cerr << "Percentile must be between 0 and 100" << endl; + exit(1); + } + if (mask.nvoxels() < 1) + { + generate_masks(mask, masknz, vol, lthr, uthr); + vol *= mask; + } + auto percentile_P = vol.percentile((float)n / 100.0, masknz); + if (jsonMode){ + update_json(sarg, percentile_P, idx); + } else { + cout << percentile_P << " "; + } + } + else if (sarg == "-h") + { + float n; + narg++; + if (narg < argc) + { + n = atof(argv[narg]); + } + else + { + cerr << "Must specify the number of bins" << endl; + exit(2); + } + int nbins = (int)n; + if (nbins < 1) + { + cerr << "Must specify at least 1 bin" << endl; + exit(1); + } + if (mask.nvoxels() > 0) + { + auto hist = vol.histogram(nbins, vol.min(), vol.max(), mask); + if (jsonMode){ + update_json(sarg, hist, idx); + } else { + cout << hist << " "; + } + } + else + { + auto hist = vol.histogram(nbins, vol.min(), vol.max()); + if (jsonMode){ + update_json(sarg, hist, idx); + } else { + cout << hist << " "; + } + } + } + else if (sarg == "-H") + { + float n; + narg++; + if (narg < argc) + { + n = atof(argv[narg]); + } + else + { + cerr << "Must specify the number of bins" << endl; + exit(2); + } + int nbins = (int)n; + if (nbins < 1) + { + cerr << "Must specify at least 1 bin" << endl; + exit(1); + } + float min = 0; + narg++; + if (narg < argc) + { + min = atof(argv[narg]); + } + else + { + cerr << "Must specify the histogram minimum intensity" << endl; + exit(2); + } + float max = 0; + narg++; + if (narg < argc) + { + max = atof(argv[narg]); + } + else + { + cerr << "Must specify the histogram maximum intensity" << endl; + exit(2); + } + if (mask.nvoxels() > 0) + { + auto hist = vol.histogram(nbins,min,max,mask); + if (jsonMode){ + update_json(sarg, hist, idx); + } else { + cout << hist << " "; + } + } + else + { + auto hist = vol.histogram(nbins, min, max); + if (jsonMode){ + update_json(sarg, hist, idx); + } else { + cout << hist << " "; + } + } + } + else + { + cerr << "Unrecognised option: " << sarg << endl; + exit(3); + } + narg++; + } + tp["indices"] += idx; + if (!jsonMode) + cout << endl; + } // end nIndices + // append the timepoint to the the volumes array + js["volumes"] += tp; + } // end timepoints + if (jsonMode){ + // print the json with 4 spaces of indentation + cout << js.dump(4) << endl; + } return 0; } - - -int main(int argc,char *argv[]) +int main(int argc, char *argv[]) { Tracer tr("main"); string progname(argv[0]); bool timeseriesMode(false); + bool jsonMode(false); string indexMask(""); - while ( argc > 2 && ( string(argv[1])=="-t" || string(argv[1]) =="-K" ) ) { - if ( string(argv[1])=="-t" ) - timeseriesMode=true; - if ( string(argv[1])=="-K" ) { - indexMask=string(argv[2]); + while (argc > 2 && (string(argv[1]) == "-t" || string(argv[1]) == "-K" || string(argv[1]) == "-json")) + { + if (string(argv[1]) == "-t") + timeseriesMode = true; + if (string(argv[1]) == "-json") + jsonMode = true; + if (string(argv[1]) == "-K") + { + indexMask = string(argv[2]); argv++; argc--; } argv++; argc--; } - if (argc < 3 ) + if (argc < 3) return print_usage(progname); - try { - return fmrib_main_float(argc,argv,timeseriesMode,indexMask); - } catch(std::exception &e) { + try + { + return fmrib_main_float(argc, argv, timeseriesMode, indexMask, jsonMode); + } + catch (std::exception &e) + { cerr << e.what() << endl; - } catch (...) { + } + catch (...) + { // do nothing - just exit without garbage message } return -1; - } diff --git a/tests/fix_orient/feedsRun b/tests/fix_orient/feedsRun new file mode 100755 index 0000000000000000000000000000000000000000..0e0e575c431e76d24729b9d125e07502cb887d19 --- /dev/null +++ b/tests/fix_orient/feedsRun @@ -0,0 +1,166 @@ +#!/usr/bin/env fslpython +"""Test the standard recipe for fixing a broken orientation, from "The labels +in FSLView are wrong or missing and no conversion to NIfTI can fix this", on +this page: + +https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Orientation%20Explained + + If this really cannot be fixed at the conversion stage, then it is + possible to fix by the follow (only to be attempted after reading the + following section on background information on NIfTI orientation): + fslorient -deleteorient imagename ; fslswapdim imagename a b c imagename ; + fslorient -setqformcode 1 imagename ; where a b c need to be chosen, + usually by trial and error, so that the image after this is in the same + orientation (as viewed in FSLView) as the MNI152 images. However, unless + you have some way of telling left from right in the image (e.g. by a + vitamin capsule or clearly known structural asymmetry) this method can + easily result in an incorrect left-right labelling and so should not be + attempted. + +The recipe involves: + + 1. resetting the image affines + + 2. transposing the data array until it is in a + RAS orientation + + 3. Enabling the qform affine which, after step + 1, should be a diagonal matrix with pixdims + as scaling parameters. + +Specifically, the recipe is as follows: + + # 1. clear the sform/qform affines + fslorient -deleteorient image + + # 2. rotate the data into RAS orientation (exact + # command depends on original data orientation) + fslswapdim image x z y image + + # 3. reset the qform code. The qform should be + # a diagonal matrix, and the image data array + # should be in RAS orientation, so after this + # step the image should be labelled correctly. + fslorient -setqformcode 1 image + + +This recipe makes some key assumptions: + + - fslorient -deleteorient (step 1) deleting/ + clearing the affines - the qform must be + reset to be a scaling matrix. + + - fslswapdim (step 2) not touching the affines + when the s/qform codes are 0 + +These are required so that, in step 3, the qform is a diagonal matrix with +correct scaling paramaters. + +Some bugs were introduced in FSL 6.0.0 (fixed in FSL 6.0.6) which broke the +above assumptions: + + - fslorient -deleteorient was not clearing the original + affines + - fslswapdim was applying the rotations/flips to both the + data, and to the s/qforms, even if the s/qform codes were + set to 0. +""" + +import numpy as np +import nibabel as nib +import os +import subprocess as sp +import shlex +import sys +import traceback + +def sprun(cmd): + sp.run(shlex.split(cmd), check=True) + + +# Test the original recipe for fixing the +# orientation of an image, as described above +def test_fix_orient(): + + data = np.random.random((10, 10, 10)) + orig_qform = np.array([[2, 0, 0, 10], + [0, 2, 0, -20], + [0, 0, 2, 50], + [0, 0, 0, 1]]) + image = nib.Nifti1Image(data, np.eye(4)) + image.set_sform(np.eye(4), 0) + image.set_qform(orig_qform, 1) + + image.to_filename('orig.nii.gz') + sprun('imcp orig cleared') + sprun('fslorient -deleteorient cleared') + sprun('fslswapdim cleared x z y swapped') + sprun('imcp swapped fixed') + sprun('fslorient -setqformcode 1 fixed') + + fixed = nib.load('fixed.nii.gz') + + new_data = fixed.get_fdata() + new_qform, qcode = fixed.get_qform(coded=True) + _, scode = fixed.get_sform(coded=True) + + # new qform should be a scaling matrix (not + # necessarily equivalent to the original + assert qcode == 1 + assert scode == 0 + assert np.all(np.isclose(new_qform, np.diag([2, 2, 2, 1]))) + assert np.all(np.isclose(new_data, data.transpose((0, 2, 1)))) + + +# An alternate strategy which involves +# explicitly clobbering the qform +def test_fix_orient_alternate(): + # original qform + data = np.random.random((10, 10, 10)) + orig_qform = np.array([[2, 0, 0, 10], + [0, 2, 0, -20], + [0, 0, 2, 50], + [0, 0, 0, 1]]) + image = nib.Nifti1Image(data, np.eye(4)) + image.set_sform(np.eye(4), 0) + image.set_qform(orig_qform, 1) + + image.to_filename('orig.nii.gz') + sprun('imcp orig cleared') + sprun('fslorient -deleteorient cleared') + sprun('fslswapdim cleared x z y swapped') + sprun('imcp swapped fixed1') + sprun('fslorient -setqform 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 fixed1') + sprun('imcp fixed1 fixed') + sprun('fslorient -setqformcode 1 fixed') + + fixed = nib.load('fixed.nii.gz') + + new_data = fixed.get_fdata() + new_qform, qcode = fixed.get_qform(coded=True) + _, scode = fixed.get_sform(coded=True) + + # new qform should be a scaling matrix (not + # necessarily equivalent to the original + assert qcode == 1 + assert scode == 0 + assert np.all(np.isclose(new_qform, np.diag([2, 2, 2, 1]))) + assert np.all(np.isclose(new_data, data.transpose((0, 2, 1)))) + + +if __name__ == '__main__': + tests = [test_fix_orient, + test_fix_orient_alternate] + + result = 0 + for test in tests: + try: + os.chdir(sys.argv[1]) + test() + print(f'\nTest {test.__name__} PASSED') + except Exception as e: + print(f'\nTest {test.__name__} FAILED') + traceback.print_exc() + result = 1 + + sys.exit(result) diff --git a/tests/fslchpixdim/feedsRun b/tests/fslchpixdim/feedsRun new file mode 100755 index 0000000000000000000000000000000000000000..56bcee4cfe07a24e42e1bb33a7cef2e9d2910dd8 --- /dev/null +++ b/tests/fslchpixdim/feedsRun @@ -0,0 +1,58 @@ +#!/usr/bin/env fslpython + +import os +import sys +import subprocess as sp + +import numpy as np +import nibabel as nib + + +def create_image(shape, pixdim): + pixdim = list(pixdim) + data = np.random.random(shape).astype(np.float32) + hdr = nib.Nifti1Header() + + hdr.set_data_dtype(np.float32) + hdr.set_data_shape(shape) + hdr.set_zooms(pixdim[:len(shape)]) + + return nib.Nifti1Image(data, np.eye(4), hdr) + + +def check_image(origimg, changedimg, exppixdim): + + gotshape = changedimg.shape + gotpixdim = list(changedimg.header.get_zooms()) + gotdata = changedimg.get_fdata() + + assert origimg.shape == gotshape + assert gotpixdim == exppixdim + assert np.all(origimg.get_fdata() == gotdata) + + +def run_tests(): + # (image shape, command, exppixdims) + tests = [ + ((5, 5, 5), '2 2 2', (2, 2, 2)), + ((5, 5, 5), '2 2 2 2', (2, 2, 2)), + ((5, 5, 5, 5), '2 2 2', (2, 2, 2, 1)), + ((5, 5, 5, 5), '2 2 2 2', (2, 2, 2, 2)), + ] + + for shape, cmd, exppixdim in tests: + + imgfile = 'image.nii.gz' + img = create_image(shape, [1] * len(shape)) + img.to_filename(imgfile) + + try: + sp.run(['fslchpixdim', imgfile] + list(cmd.split())) + check_image(img, nib.load(imgfile), list(exppixdim)) + + finally: + os.remove(imgfile) + + +if __name__ == '__main__': + sys.exit(run_tests()) diff --git a/tests/fslcomplex/feedsRun b/tests/fslcomplex/feedsRun new file mode 100755 index 0000000000000000000000000000000000000000..a84cc1d5a5fa93dd65326b0bbdbaaef9e3fca884 --- /dev/null +++ b/tests/fslcomplex/feedsRun @@ -0,0 +1,132 @@ +#!/usr/bin/env fslpython +"""Test that fslcomplex produces correct outputs. Specifically tests that the +output files have orientation info that matches that of the input files. +""" + +import numpy as np +import nibabel as nib +import subprocess as sp +import os.path as op +import shlex +import sys +import traceback + +from fsl.utils.tempdir import tempdir +from fsl.data.image import addExt + + +def sprun(cmd): + print(f'RUN {cmd}') + sp.run(shlex.split(cmd), check=True) + + +def imtest(path): + try: + addExt(path) + return True + except Exception: + return False + + +# radio=[True|False] -> whether or not +# to add a flip on the affine X axis +def create_test_input_data(radio, seed=1): + + np.random.seed(seed) + + real = np.random.randint(0, 100, (10, 10, 10)).astype(np.float32) + imag = np.random.randint(0, 100, (10, 10, 10)).astype(np.float32) + affine = np.diag([3, 3, 3, 1]) + + if radio: + affine[0, 0] *= -1 + affine[:3, 3] = [37, 20, 30] + real = np.flip(real, 0) + imag = np.flip(imag, 0) + else: + affine[:3, 3] = [10, 20, 30] + + comp = real + imag * 1j + abso = np.abs(comp) + phase = np.arctan2(comp.imag, comp.real) + comp4d = np.concatenate((comp.reshape(10, 10, 10, 1), + comp.reshape(10, 10, 10, 1)), 3) + + real = nib.Nifti1Image(real, affine) + imag = nib.Nifti1Image(imag, affine) + comp = nib.Nifti1Image(comp, affine) + comp4d = nib.Nifti1Image(comp4d, affine) + abso = nib.Nifti1Image(abso, affine) + phase = nib.Nifti1Image(phase, affine) + + real .set_qform(*real .get_sform(coded=True)) + imag .set_qform(*comp .get_sform(coded=True)) + comp .set_qform(*comp .get_sform(coded=True)) + comp4d.set_qform(*comp4d.get_sform(coded=True)) + abso .set_qform(*abso .get_sform(coded=True)) + phase .set_qform(*phase .get_sform(coded=True)) + + real .to_filename('real_in.nii.gz') + imag .to_filename('imag_in.nii.gz') + comp .to_filename('complex_in.nii.gz') + comp4d.to_filename('complex4d_in.nii.gz') + abso .to_filename('abs_in.nii.gz') + phase .to_filename('phase_in.nii.gz') + + +def compare_images(file1, file2): + img1 = nib.load(addExt(file1)) + img2 = nib.load(addExt(file2)) + data1 = np.asanyarray(img1.dataobj) + data2 = np.asanyarray(img2.dataobj) + assert img1.header['sform_code'] == img2.header['sform_code'] + assert img1.header['qform_code'] == img2.header['qform_code'] + assert np.all(np.isclose(img1.get_sform(), img2.get_sform())) + assert np.all(np.isclose(img1.get_qform(), img2.get_qform())) + assert np.all(np.isclose(img1.header['pixdim'], img2.header['pixdim'])) + assert np.all(np.isclose(data1, data2)) + + +def test_fslcomplex_call(args, infiles, outfiles, radio): + with tempdir(): + create_test_input_data(radio) + sprun(f'fslcomplex {args} {infiles} {outfiles}') + for outfile in outfiles.split(' '): + + if not imtest(outfile): + continue + + prefix = outfile.split('_')[0] + infile = f'{prefix}_in' + + print(f'Comparing {outfile} against {infile}') + + compare_images(infile, outfile) + + +if __name__ == '__main__': + tests = [ + ('-realabs', 'complex_in', 'abs_out'), + ('-realphase', 'complex_in', 'phase_out'), + ('-realpolar', 'complex_in', 'abs_out phase_out'), + ('-realcartesian', 'complex_in', 'real_out imag_out'), + ('-complex', 'real_in imag_in', 'complex_out'), + ('-complexpolar', 'abs_in phase_in', 'complex_out'), + ('-copyonly', 'complex_in', 'complex_out'), + ('-complexsplit', 'complex4d_in', 'complex_out 0 0'), + ('-complexsplit', 'complex4d_in', 'complex_out 1 1'), + ('-complexmerge', 'complex_in complex_in', 'complex4d_out'), + ] + + result = 0 + for radio in [True, False]: + for args, infiles, outfiles in tests: + try: + test_fslcomplex_call(args, infiles, outfiles, radio) + print(f'\nTest fslcomplex {args} {infiles} {outfiles} [radio: {radio}] PASSED') + except Exception as e: + print(f'\nTest fslcomplex {args} {infiles} {outfiles} [radio: {radio}] FAILED') + traceback.print_exc() + result = 1 + + sys.exit(result) diff --git a/tests/fslcreatehd/feedsRun b/tests/fslcreatehd/feedsRun new file mode 100755 index 0000000000000000000000000000000000000000..339553df6f7826c3b1a7c7a707fede7fd0d3b943 --- /dev/null +++ b/tests/fslcreatehd/feedsRun @@ -0,0 +1,265 @@ +#!/usr/bin/env fslpython + +import os +import os.path as op +import sys +import subprocess as sp + +import numpy as np +import nibabel as nib + + +THISDIR = op.dirname(op.abspath(__file__)) + + +# 2=char, 4=short, 8=int, 16=float, 64=double +DTYPE_MAPPING = { + 2 : np.uint8, + 4 : np.int16, + 8 : np.int32, + 16 : np.float32, + 64 : np.float64 +} + +MNI152_2MM_AFFINE = np.array([[-2, 0, 0, 90], + [ 0, 2, 0, -126], + [ 0, 0, 2, -72], + [ 0, 0, 0, 1]]) + + +def validate_result(imgfile, expshape, exppixdim, exporigin, expdtype): + + img = nib.load(imgfile) + hdr = img.header + expshape = list(expshape) + exppixdim = list(exppixdim) + exporigin = list(exporigin) + + if (len(expshape) == 3) or \ + expshape[3] in (0, 1): + expndims = 3 + else: + expndims = 4 + + expaffine = np.diag(exppixdim[:3] + [1]) + expaffine[0, 0] *= -1 + expaffine[:3, 3] = exporigin + expaffine[0, 3] *= exppixdim[0] + expaffine[1, 3] *= -exppixdim[1] + expaffine[2, 3] *= -exppixdim[2] + + assert len(img.shape) == expndims + assert list(img.shape) == expshape[ :expndims] + assert list(hdr.get_zooms()) == exppixdim[:expndims] + assert img.get_data_dtype() == expdtype + assert np.all(np.isclose(img.affine, expaffine)) + + +def create_image(shape, pixdim, origin, dtype): + pixdim = list(pixdim) + affine = np.diag(pixdim[:3] + [1]) + affine[:3, 3] = origin + data = np.random.randint(1, 100, shape).astype(dtype) + hdr = nib.Nifti1Header() + + hdr.set_data_dtype(dtype) + hdr.set_data_shape(shape) + hdr.set_zooms(pixdim[:len(shape)]) + + return nib.Nifti1Image(data, affine, hdr) + + +def test_new_file(): + tests = [ + + # 4th dim of size 0 or 1 should + # result in a 3D image (this + # is coded in validate_result) + ' 5 5 5 0 2 2 2 0 0 0 0 2', + ' 5 5 5 0 2 2 2 1 0 0 0 2', + ' 5 5 5 1 2 2 2 1 0 0 0 2', + ' 5 5 5 1 2 2 2 1 0 0 0 4', + ' 5 5 5 1 2 2 2 1 0 0 0 8', + ' 5 5 5 1 2 2 2 1 0 0 0 16', + ' 5 5 5 1 2 2 2 1 1 2 3 64', + ' 5 5 5 1 0.5 1.5 1.25 1 0 0 0 2', + ' 5 5 5 1 0.5 1.5 1.25 1.5 0 0 0 2', + ' 5 5 5 1 0.5 1.5 1.25 0.5 0 0 0 2', + '30 30 30 5 5 10 3 5 10 20 10 2', + ] + + for test in tests: + args = list(test.split()) + imgfile = 'image.nii.gz' + + try: + os.remove(imgfile) + except: + pass + + print(['fslcreatehd'] + args + [imgfile]) + + sp.run(['fslcreatehd'] + args + [imgfile]) + + args = [float(a) for a in args] + expshape = args[:4] + exppixdim = args[4:8] + exporigin = args[8:11] + expdtype = DTYPE_MAPPING[args[11]] + + try: + validate_result( + imgfile, expshape, exppixdim, exporigin, expdtype) + finally: + os.remove(imgfile) + + +def test_existing_file(): + + # each test is a tuple containing: + # - dtype_of_existing_image + # - shape_of_existing_image + # - exp_shape (set to None if image should not be overwritten) + # - fslcreatehd_args + tests = [ + # 4th dim of 0 or 1 should result in a 3D image + (2, (5, 5, 5), None, '5 5 5 0 2 2 2 1 0 0 0 2'), + (2, (5, 5, 5), None, '5 5 5 0 2 2 2 1 1 2 3 2'), + (2, (5, 5, 5), None, '5 5 5 1 2 2 2 2 0 0 0 2'), + + # dtype arg should be ignored for existing images + (2, (5, 5, 5), None, '5 5 5 0 2 2 2 1 0 0 0 4'), + (4, (5, 5, 5), None, '5 5 5 0 2 2 2 1 0 0 0 2'), + + # data should be overwritten if any + # of the first 3 dims are different, + # or if a 4th dim is specified + (2, (5, 5, 5), (5, 3, 5), '5 3 5 0 2 2 2 2 0 0 0 2'), + (2, (5, 5, 5), (5, 3, 5), '5 3 5 0 2 2 2 2 0 0 0 4'), + (2, (5, 5, 5), (5, 3, 5), '5 3 5 1 2 2 2 2 0 0 0 2'), + (2, (5, 5, 5), (5, 3, 5), '5 3 5 1 2 2 2 2 1 2 3 2'), + (2, (5, 5, 5), (5, 3, 5, 5), '5 3 5 5 2 2 2 2 0 0 0 2'), + (2, (5, 5, 5), (5, 3, 5, 5), '5 3 5 5 2 2 2 2 1 2 3 2'), + + # 4D - same rules apply + (2, (5, 5, 5, 5), None, '5 5 5 5 2 2 2 2 0 0 0 2'), + (2, (5, 5, 5, 5), None, '5 5 5 5 2 2 2 2 0 0 0 2'), + (2, (5, 5, 5, 5), None, '5 5 5 5 2 2 2 2 0 0 0 4'), + (4, (5, 5, 5, 5), None, '5 5 5 5 2 2 2 2 0 0 0 2'), + (2, (5, 5, 5, 5), None, '5 5 5 5 2 2 2 2 1 2 3 2'), + + # data overwritten if nelements + # change + (2, (5, 5, 5, 5), (5, 5, 5), '5 5 5 0 2 2 2 2 0 0 0 2'), + (2, (5, 5, 5, 5), (5, 5, 5), '5 5 5 1 2 2 2 2 0 0 0 2'), + (2, (5, 5, 5, 5), (5, 3, 5), '5 3 5 0 2 2 2 2 0 0 0 2'), + (2, (5, 5, 5, 5), (5, 3, 5), '5 3 5 1 2 2 2 2 0 0 0 2'), + (2, (5, 5, 5, 5), (5, 3, 5), '5 3 5 1 2 2 2 2 1 2 3 2'), + (2, (5, 5, 5, 5), (5, 5, 5, 2), '5 5 5 2 2 2 2 2 0 0 0 2'), + (2, (5, 5, 5, 5), (5, 3, 5, 5), '5 3 5 5 2 2 2 2 0 0 0 2'), + (2, (5, 5, 5, 5), (5, 3, 5, 5), '5 3 5 5 2 2 2 2 1 2 3 2'), + ] + + for (dtype, shape, expshape, args) in tests: + imgfile = 'image.nii.gz' + dtype = DTYPE_MAPPING[dtype] + args = list(args.split()) + img = create_image(shape, (1, 1, 1, 1), (0, 0, 0), dtype) + + img.to_filename(imgfile) + + sp.run(['fslcreatehd'] + args + [imgfile]) + + checkdata = expshape is None + + if expshape is None: + expshape = shape + + args = [float(a) for a in args] + exppixdim = args[4:8] + exporigin = args[8:11] + + try: + validate_result(imgfile, expshape, exppixdim, exporigin, dtype) + + if checkdata: + data = np.asanyarray(img.dataobj) + datacopy = np.asanyarray(nib.load(imgfile).dataobj) + assert np.all(data == datacopy) + + finally: + os.remove(imgfile) + + +def test_new_file_xml(): + xmlfile = op.join(THISDIR, 'mni2mm.xml') + sp.run(['fslcreatehd', xmlfile, 'mni.nii.gz']) + img = nib.load('mni.nii.gz') + + assert img.shape == (91, 109, 91) + assert img.header.get_zooms()[:3] == (2.0, 2.0, 2.0) + assert img.header['intent_code'] == 10 + assert img.header['intent_p1'] == 20 + assert img.header['intent_p2'] == 30 + assert img.header['intent_p3'] == 40 + + +def test_existing_file_xml_same_shape(): + xmlfile = op.join(THISDIR, 'mni2mm.xml') + img = create_image((91, 109, 91), (3, 3, 3), (5, 5, 5), np.float32) + img.to_filename('image.nii.gz') + + sp.run(['fslcreatehd', xmlfile, 'image.nii.gz']) + + result = nib.load('image.nii.gz') + + assert result.shape == (91, 109, 91) + assert result.header.get_zooms()[:3] == (2.0, 2.0, 2.0) + + # bug in FSL<=6.0.4 caused intent codes to not be set + assert result.header['intent_code'] == 10 + assert result.header['intent_p1'] == 20 + assert result.header['intent_p2'] == 30 + assert result.header['intent_p3'] == 40 + assert np.all(result.affine == MNI152_2MM_AFFINE) + + # data should be preserved + assert np.all(result.get_fdata() == img.get_fdata()) + + +def test_existing_file_xml_different_shape(): + + xmlfile = op.join(THISDIR, 'mni2mm.xml') + img = create_image((20, 20, 20), (3, 3, 3), (5, 5, 5), np.float32) + + img.to_filename('image.nii.gz') + + sp.run(['fslcreatehd', xmlfile, 'image.nii.gz']) + + result = nib.load('image.nii.gz') + + assert result.shape == (91, 109, 91) + assert result.header.get_zooms()[:3] == (2.0, 2.0, 2.0) + assert result.header['intent_code'] == 10 + assert result.header['intent_p1'] == 20 + assert result.header['intent_p2'] == 30 + assert result.header['intent_p3'] == 40 + assert np.all(result.affine == MNI152_2MM_AFFINE) + + # data should be cleared + assert np.all(result.get_fdata() == 0) + + +def main(): + outdir = sys.argv[1] + os.chdir(outdir) + + test_new_file() + test_existing_file() + test_new_file_xml() + test_existing_file_xml_same_shape() + test_existing_file_xml_different_shape() + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/tests/fslcreatehd/mni2mm.xml b/tests/fslcreatehd/mni2mm.xml new file mode 100644 index 0000000000000000000000000000000000000000..06b8043c942be6a6bec7c11a26aa44a5cb243471 --- /dev/null +++ b/tests/fslcreatehd/mni2mm.xml @@ -0,0 +1,44 @@ +<nifti_image + image_offset = '352' + ndim = '3' + nx = '91' + ny = '109' + nz = '91' + nt = '1' + dx = '2' + dy = '2' + dz = '2' + dt = '1' + datatype = '4' + nvox = '902629' + nbyper = '2' + scl_slope = '1' + scl_inter = '0' + intent_code = '10' + intent_p1 = '20' + intent_p2 = '30' + intent_p3 = '40' + intent_name = '' + toffset = '0' + xyz_units = '2' + time_units = '8' + freq_dim = '0' + phase_dim = '0' + slice_dim = '0' + descrip = 'FSL5.0' + aux_file = '' + qform_code = '4' + qfac = '-1' + quatern_b = '0' + quatern_c = '1' + quatern_d = '0' + qoffset_x = '90' + qoffset_y = '-126' + qoffset_z = '-72' + sform_code = '4' + sto_xyz_matrix = '-2 0 0 90 0 2 0 -126 0 0 2 -72 0 0 0 1' + slice_code = '0' + slice_start = '0' + scl_end = '0' + scl_duration = '0' +/> diff --git a/tests/fslstats/feedsRun b/tests/fslstats/feedsRun new file mode 100755 index 0000000000000000000000000000000000000000..6e6c725e81cdb1e896dffde3dcab802ec604167b --- /dev/null +++ b/tests/fslstats/feedsRun @@ -0,0 +1,215 @@ +#!/usr/bin/env fslpython + +# test fslstats with a bunch of different options +# +# Usage: fslstats [preoptions] <input> [options] +# +# preoption -t will give a separate output line for each 3D volume of a 4D timeseries +# preoption -K < indexMask > will generate seperate n submasks from indexMask, for indexvalues 1..n where n is the maximum index value in indexMask, and generate statistics for each submask +# Note - options are applied in order, e.g. -M -l 10 -M will report the non-zero mean, apply a threshold and then report the new nonzero mean + +# -l <lthresh> : set lower threshold +# -u <uthresh> : set upper threshold +# -r : output <robust min intensity> <robust max intensity> +# -R : output <min intensity> <max intensity> +# -e : output mean entropy ; mean(-i*ln(i)) +# -E : output mean entropy (of nonzero voxels) +# -v : output <voxels> <volume> +# -V : output <voxels> <volume> (for nonzero voxels) +# -m : output mean +# -M : output mean (for nonzero voxels) +# -s : output standard deviation +# -S : output standard deviation (for nonzero voxels) +# -w : output smallest ROI <xmin> <xsize> <ymin> <ysize> <zmin> <zsize> <tmin> <tsize> containing nonzero voxels +# -x : output co-ordinates of maximum voxel +# -X : output co-ordinates of minimum voxel +# -c : output centre-of-gravity (cog) in mm coordinates +# -C : output centre-of-gravity (cog) in voxel coordinates +# -p <n> : output nth percentile (n between 0 and 100) +# -P <n> : output nth percentile (for nonzero voxels) +# -a : use absolute values of all image intensities +# -n : treat NaN or Inf as zero for subsequent stats +# -k <mask> : use the specified image (filename) for masking - overrides lower and upper thresholds +# -d <image> : take the difference between the base image and the image specified here +# -h <nbins> : output a histogram (for the thresholded/masked voxels only) with nbins +# -H <nbins> <min> <max> : output a histogram (for the thresholded/masked voxels only) with nbins and histogram limits of min and max + +# Note - thresholds are not inclusive ie lthresh<allowed<uthresh + +import sys +import os +import fsl.utils.run as run +import numpy as np +import nibabel as nib + +# set the random seed so that the tests are reproducible +np.random.seed(0) + +options = [ + {"name": "robust", "option": "-r", "expected": "0.018533 0.978824"}, + {"name": "notRobust", "option": "-R", "expected": "0.000546 0.999809"}, + {"name": "entropy", "option": "-e", "expected": "0.906103"}, + {"name": "entropyNonzero", "option": "-E", "expected": "0.906103"}, + {"name": "volume", "option": "-v", "expected": "1000 1000.000000"}, + {"name": "volumeNonzero", "option": "-V", "expected": "1000 1000.000000"}, + {"name": "mean", "option": "-m", "expected": "0.495922"}, + {"name": "mean", "option": "-m -a", "expected": "0.495922"}, + {"name": "mean", "option": "-m -a -n", "expected": "0.495922"}, + {"name": "meanNonzero", "option": "-M", "expected": "0.495922"}, + {"name": "stdev", "option": "-s", "expected": "0.290744"}, + {"name": "stdevNonzero", "option": "-S", "expected": "0.290744"}, + {"name": "roi","option": "-w", "expected": "0 10 0 10 0 10 0 1"}, + {"name": "maxCoord", "option": "-x", "expected": "9 7 4"}, + {"name": "minCoord", "option": "-X", "expected": "8 2 1"}, + {"name": "cogMM", "option": "-c", "expected": "4.498706 4.545873 4.613188"}, + {"name": "cogVox","option": "-C", "expected": "4.498706 4.545873 4.613188"}, + {"name": "percentile", "option": "-p 50", "expected": "0.482584"}, + {"name": "percentileNonzero","option": "-P 50", "expected": "0.482584"}, + {"name": "meanHistCog", "option": "-m -h 10 -c", "expected": "0.495922 101.000000 \n99.000000 \n108.000000 \n107.000000 \n102.000000 \n93.000000 \n99.000000 \n88.000000 \n95.000000 \n108.000000 \n 4.498706 4.545873 4.613188"}, + {"name": "meanHist", "option": "-m -h 10", "expected": "0.495922 101.000000 \n99.000000 \n108.000000 \n107.000000 \n102.000000 \n93.000000 \n99.000000 \n88.000000 \n95.000000 \n108.000000"}, + {"name": "hist", "option": "-h 10", "expected": "101.000000 \n99.000000 \n108.000000 \n107.000000 \n102.000000 \n93.000000 \n99.000000 \n88.000000 \n95.000000 \n108.000000"}, + {"name": "histLimits", "option": "-H 10 0 1", "expected": "98.000000 \n102.000000 \n107.000000 \n108.000000 \n102.000000 \n93.000000 \n99.000000 \n88.000000 \n95.000000 \n108.000000"}, + {"name": "threshold", "option": "-l 0.25 -u 0.75 -m", "expected": "0.495150"}, + {"name": "diff", "option": "-d imageForDiff.nii.gz -m", "expected": "0.000000"}, +] + +# create a list of tests and expected results +tests = [] +for option in options: + tests.append( + { + "preoptions": "", + "mask": "", + "options": option["option"], + "expected": option["expected"], + "name": option["name"], + } + ) + +tests_with_preoptions = [ + { + "preoptions": "-K", + "mask": "mask.nii.gz", + "options": "-m", + "expected": "0.539022 \n0.514414 \n0.493214 \n0.510435", + "name": "preopIndexMaskMean", + }, + { + "preoptions": "-t -K", + "mask": "mask.nii.gz", + "options": "-m", + "expected": "0.526682 \n0.515652 \n0.492337 \n0.511661 \n0.551362 \n0.513176 \n0.494091 \n0.509208", + "name": "preopIndexMaskMeanTimeSeries", + }, + { + "preoptions": "-t", + "mask": "", + "options": "-m", + "expected": "0.503236 \n0.504035", + "name": "preopMeanTimeSeries", + }, +] + +json_tests = [] +for option in options: + file_string = f"test_{option['name']}_{option['option'].replace(' ', '_')}.json" + file_string = file_string.replace("-", "") + json_tests.append( + { + "preoptions": "", + "mask": "", + "options": option["option"], + "expected": file_string, + } + ) + +for option in tests_with_preoptions: + file_string = f"test_{option['name']}_{option['preoptions'].replace(' ', '_')}_{option['options'].replace(' ', '_')}.json" + file_string = file_string.replace("-", "") + json_tests.append( + { + "preoptions": option["preoptions"], + "mask": option["mask"], + "options": option["options"], + "expected": file_string, + } + ) + +def test_fslstats(): + imgfile = 'test.nii.gz' + + # run the tests witoout preoptions + for test in tests: + cmd = f"fslstats {test['preoptions']} {test['mask']} {imgfile} {test['options']}" + # remove any double spaces from empty test options + cmd = " ".join(cmd.split()) + print(cmd) + output = run.runfsl(cmd) + # trim any trailing whitespace from the output (fslstats adds a newline at the end) + output = output.rstrip() + try: + assert output == test["expected"] + except AssertionError: + print('Failed test') + print(cmd) + print('Output') + print(output) + print('Expected') + print(test["expected"]) + raise + + # run the tests with preoptions + imgfile = 'test_4d.nii.gz' + for test in tests_with_preoptions: + cmd = f"fslstats {test['preoptions']} {test['mask']} {imgfile} {test['options']}" + # remove any double spaces from empty test options + cmd = " ".join(cmd.split()) + print(cmd, flush=True) + output = run.runfsl(cmd) + # trim any trailing whitespace from the output (fslstats adds a newline at the end) + output = output.rstrip() + try: + assert output == test["expected"] + except AssertionError: + print('Failed test') + print(cmd) + print('Output') + print(output) + print('Expected') + print(test["expected"]) + raise + + # run the json tests + for test in json_tests: + cmd = f"fslstats -json {test['preoptions']} {test['mask']} {imgfile} {test['options']}" + # remove any double spaces from empty test options + cmd = " ".join(cmd.split()) + print(cmd, flush=True) + output = run.runfsl(cmd) + + # try to load the expected json file + # if it exists, compare the output to the expected + try: + print(test["expected"]) + with open(test["expected"], "r") as json_file: + # read the file as a string + expected = json_file.read() + try: + assert output == expected + except AssertionError: + print('Failed test') + print(cmd) + print('Output') + print(output) + print('Expected') + print(expected) + raise + except FileNotFoundError: + # if the expected json file doesn't exist, create it from the output + # usefull for creating new tests or the first time a test is run + with open(test["expected"], "w") as json_file: + json_file.write(output) + + +if __name__ == "__main__": + test_fslstats() diff --git a/tests/fslstats/imageForDiff.nii.gz b/tests/fslstats/imageForDiff.nii.gz new file mode 100644 index 0000000000000000000000000000000000000000..f4d00fe3675aa6b9dd763c59d6da23ec402c2b17 --- /dev/null +++ b/tests/fslstats/imageForDiff.nii.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14105cd775bf8e192299695a90848e285690c2dafb086bdd8830c5665fb0394b +size 3716 diff --git a/tests/fslstats/mask.nii.gz b/tests/fslstats/mask.nii.gz new file mode 100644 index 0000000000000000000000000000000000000000..3a7c1553ca342e57efe8b8ffd49bd880b6832c7f --- /dev/null +++ b/tests/fslstats/mask.nii.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d9a0b1719ca95cf36b44ddcde54121fa28411d060ab0d288303c80935eb429b1 +size 786 diff --git a/tests/fslstats/test.nii.gz b/tests/fslstats/test.nii.gz new file mode 100644 index 0000000000000000000000000000000000000000..f4d00fe3675aa6b9dd763c59d6da23ec402c2b17 --- /dev/null +++ b/tests/fslstats/test.nii.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14105cd775bf8e192299695a90848e285690c2dafb086bdd8830c5665fb0394b +size 3716 diff --git a/tests/fslstats/test_4d.nii.gz b/tests/fslstats/test_4d.nii.gz new file mode 100644 index 0000000000000000000000000000000000000000..247023d54a81d5c7994b0561ed2fc17768cbf514 --- /dev/null +++ b/tests/fslstats/test_4d.nii.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a48fc73f54d9aba889968a99d58339d495950301970f166166bede44dd38e623 +size 7304 diff --git a/tests/fslstats/test_cogMM_c.json b/tests/fslstats/test_cogMM_c.json new file mode 100644 index 0000000000000000000000000000000000000000..758ec3c4ea87958f2b46bc64f2c2c4b4cc53a91a --- /dev/null +++ b/tests/fslstats/test_cogMM_c.json @@ -0,0 +1,15 @@ +{ + "volumes": [ + { + "indices": [ + { + "-c": [ + 4.505939956803816, + 4.486097425091172, + 4.488288600081377 + ] + } + ] + } + ] +} diff --git a/tests/fslstats/test_cogVox_C.json b/tests/fslstats/test_cogVox_C.json new file mode 100644 index 0000000000000000000000000000000000000000..1b6b7b0042df12032ef3fba38c5279efd005e671 --- /dev/null +++ b/tests/fslstats/test_cogVox_C.json @@ -0,0 +1,15 @@ +{ + "volumes": [ + { + "indices": [ + { + "-C": [ + 4.505939956803816, + 4.486097425091172, + 4.488288600081377 + ] + } + ] + } + ] +} diff --git a/tests/fslstats/test_diff_d_imageForDiff.nii.gz_m.json b/tests/fslstats/test_diff_d_imageForDiff.nii.gz_m.json new file mode 100644 index 0000000000000000000000000000000000000000..76d250a99ba24c780ad8177fbd3f31c5cbc82767 --- /dev/null +++ b/tests/fslstats/test_diff_d_imageForDiff.nii.gz_m.json @@ -0,0 +1,11 @@ +{ + "volumes": [ + { + "indices": [ + { + "-m": 0.0077142266542650755 + } + ] + } + ] +} diff --git a/tests/fslstats/test_entropyNonzero_E.json b/tests/fslstats/test_entropyNonzero_E.json new file mode 100644 index 0000000000000000000000000000000000000000..9573a6e2e3ea79b23560e254157c2d15a12dfff5 --- /dev/null +++ b/tests/fslstats/test_entropyNonzero_E.json @@ -0,0 +1,11 @@ +{ + "volumes": [ + { + "indices": [ + { + "-E": 0.9468094676591793 + } + ] + } + ] +} diff --git a/tests/fslstats/test_entropy_e.json b/tests/fslstats/test_entropy_e.json new file mode 100644 index 0000000000000000000000000000000000000000..407d1864d013c94088173ee18a6cca522b736898 --- /dev/null +++ b/tests/fslstats/test_entropy_e.json @@ -0,0 +1,11 @@ +{ + "volumes": [ + { + "indices": [ + { + "-e": 0.9468094676591793 + } + ] + } + ] +} diff --git a/tests/fslstats/test_histLimits_H_10_0_1.json b/tests/fslstats/test_histLimits_H_10_0_1.json new file mode 100644 index 0000000000000000000000000000000000000000..4ce81ec78aa6c653dcdec7d31c72c7d604b95fd4 --- /dev/null +++ b/tests/fslstats/test_histLimits_H_10_0_1.json @@ -0,0 +1,22 @@ +{ + "volumes": [ + { + "indices": [ + { + "-H": [ + 193.0, + 220.0, + 194.0, + 201.0, + 190.0, + 184.0, + 191.0, + 199.0, + 200.0, + 228.0 + ] + } + ] + } + ] +} diff --git a/tests/fslstats/test_hist_h_10.json b/tests/fslstats/test_hist_h_10.json new file mode 100644 index 0000000000000000000000000000000000000000..2e210d2ba04254ede95cb789bb3e6f50b807a412 --- /dev/null +++ b/tests/fslstats/test_hist_h_10.json @@ -0,0 +1,22 @@ +{ + "volumes": [ + { + "indices": [ + { + "-h": [ + 198.0, + 216.0, + 194.0, + 200.0, + 190.0, + 184.0, + 191.0, + 199.0, + 199.0, + 229.0 + ] + } + ] + } + ] +} diff --git a/tests/fslstats/test_maxCoord_x.json b/tests/fslstats/test_maxCoord_x.json new file mode 100644 index 0000000000000000000000000000000000000000..1a2017282d339384c75ffb8cba333700341fdad7 --- /dev/null +++ b/tests/fslstats/test_maxCoord_x.json @@ -0,0 +1,15 @@ +{ + "volumes": [ + { + "indices": [ + { + "-x": [ + 4, + 8, + 7 + ] + } + ] + } + ] +} diff --git a/tests/fslstats/test_meanHistCog_m_h_10_c.json b/tests/fslstats/test_meanHistCog_m_h_10_c.json new file mode 100644 index 0000000000000000000000000000000000000000..58380fdb02573fc29b8dded4ae7edd8f7a3c8701 --- /dev/null +++ b/tests/fslstats/test_meanHistCog_m_h_10_c.json @@ -0,0 +1,28 @@ +{ + "volumes": [ + { + "indices": [ + { + "-c": [ + 4.505939956803816, + 4.486097425091172, + 4.488288600081377 + ], + "-h": [ + 198.0, + 216.0, + 194.0, + 200.0, + 190.0, + 184.0, + 191.0, + 199.0, + 199.0, + 229.0 + ], + "-m": 0.5036357601464115 + } + ] + } + ] +} diff --git a/tests/fslstats/test_meanHist_m_h_10.json b/tests/fslstats/test_meanHist_m_h_10.json new file mode 100644 index 0000000000000000000000000000000000000000..c8316e5e320510f9a8d269ea71760ffa426277c4 --- /dev/null +++ b/tests/fslstats/test_meanHist_m_h_10.json @@ -0,0 +1,23 @@ +{ + "volumes": [ + { + "indices": [ + { + "-h": [ + 198.0, + 216.0, + 194.0, + 200.0, + 190.0, + 184.0, + 191.0, + 199.0, + 199.0, + 229.0 + ], + "-m": 0.5036357601464115 + } + ] + } + ] +} diff --git a/tests/fslstats/test_meanNonzero_M.json b/tests/fslstats/test_meanNonzero_M.json new file mode 100644 index 0000000000000000000000000000000000000000..e7892ba4cd9fdd18868824b58ea99ed18f3cc330 --- /dev/null +++ b/tests/fslstats/test_meanNonzero_M.json @@ -0,0 +1,11 @@ +{ + "volumes": [ + { + "indices": [ + { + "-M": 0.5036357601464115 + } + ] + } + ] +} diff --git a/tests/fslstats/test_mean_m.json b/tests/fslstats/test_mean_m.json new file mode 100644 index 0000000000000000000000000000000000000000..3763ba8a708cddda21719ed8afdd6df152158fdd --- /dev/null +++ b/tests/fslstats/test_mean_m.json @@ -0,0 +1,11 @@ +{ + "volumes": [ + { + "indices": [ + { + "-m": 0.5036357601464115 + } + ] + } + ] +} diff --git a/tests/fslstats/test_mean_m_a.json b/tests/fslstats/test_mean_m_a.json new file mode 100644 index 0000000000000000000000000000000000000000..3763ba8a708cddda21719ed8afdd6df152158fdd --- /dev/null +++ b/tests/fslstats/test_mean_m_a.json @@ -0,0 +1,11 @@ +{ + "volumes": [ + { + "indices": [ + { + "-m": 0.5036357601464115 + } + ] + } + ] +} diff --git a/tests/fslstats/test_mean_m_a_n.json b/tests/fslstats/test_mean_m_a_n.json new file mode 100644 index 0000000000000000000000000000000000000000..3763ba8a708cddda21719ed8afdd6df152158fdd --- /dev/null +++ b/tests/fslstats/test_mean_m_a_n.json @@ -0,0 +1,11 @@ +{ + "volumes": [ + { + "indices": [ + { + "-m": 0.5036357601464115 + } + ] + } + ] +} diff --git a/tests/fslstats/test_minCoord_X.json b/tests/fslstats/test_minCoord_X.json new file mode 100644 index 0000000000000000000000000000000000000000..aa72b2ef8e9255249a9e19a6ffa9fc1c0ed2870d --- /dev/null +++ b/tests/fslstats/test_minCoord_X.json @@ -0,0 +1,15 @@ +{ + "volumes": [ + { + "indices": [ + { + "-X": [ + 8, + 1, + 6 + ] + } + ] + } + ] +} diff --git a/tests/fslstats/test_notRobust_R.json b/tests/fslstats/test_notRobust_R.json new file mode 100644 index 0000000000000000000000000000000000000000..b5b7acd3e86b3f8de0302bcdc47f6ca7453a63dd --- /dev/null +++ b/tests/fslstats/test_notRobust_R.json @@ -0,0 +1,14 @@ +{ + "volumes": [ + { + "indices": [ + { + "-R": [ + 0.00036734374589286745, + 0.9998085498809814 + ] + } + ] + } + ] +} diff --git a/tests/fslstats/test_percentileNonzero_P_50.json b/tests/fslstats/test_percentileNonzero_P_50.json new file mode 100644 index 0000000000000000000000000000000000000000..47aeceb4aa12b07fd9f9d94fc334e32df0da3c83 --- /dev/null +++ b/tests/fslstats/test_percentileNonzero_P_50.json @@ -0,0 +1,11 @@ +{ + "volumes": [ + { + "indices": [ + { + "-P": 0.5013243556022644 + } + ] + } + ] +} diff --git a/tests/fslstats/test_percentile_p_50.json b/tests/fslstats/test_percentile_p_50.json new file mode 100644 index 0000000000000000000000000000000000000000..a105e7c94c23eb9c88bce4dd7f1c4be225ab2924 --- /dev/null +++ b/tests/fslstats/test_percentile_p_50.json @@ -0,0 +1,11 @@ +{ + "volumes": [ + { + "indices": [ + { + "-p": 0.5013243556022644 + } + ] + } + ] +} diff --git a/tests/fslstats/test_preopIndexMaskMeanTimeSeries_t_K_m.json b/tests/fslstats/test_preopIndexMaskMeanTimeSeries_t_K_m.json new file mode 100644 index 0000000000000000000000000000000000000000..9f0e754f72c2f899b2d531512c8dd4294a6b4822 --- /dev/null +++ b/tests/fslstats/test_preopIndexMaskMeanTimeSeries_t_K_m.json @@ -0,0 +1,36 @@ +{ + "volumes": [ + { + "indices": [ + { + "-m": 0.5266819608311916 + }, + { + "-m": 0.5156517274568186 + }, + { + "-m": 0.49233699466459285 + }, + { + "-m": 0.5116614613561304 + } + ] + }, + { + "indices": [ + { + "-m": 0.5513619920395724 + }, + { + "-m": 0.5131755568141885 + }, + { + "-m": 0.49409089087108476 + }, + { + "-m": 0.5092079986783641 + } + ] + } + ] +} diff --git a/tests/fslstats/test_preopIndexMaskMean_K_m.json b/tests/fslstats/test_preopIndexMaskMean_K_m.json new file mode 100644 index 0000000000000000000000000000000000000000..13bd72edd29b5cb9e50f5ee5901d45b309b912d5 --- /dev/null +++ b/tests/fslstats/test_preopIndexMaskMean_K_m.json @@ -0,0 +1,20 @@ +{ + "volumes": [ + { + "indices": [ + { + "-m": 0.539021976435382 + }, + { + "-m": 0.5144136421355036 + }, + { + "-m": 0.4932139427678388 + }, + { + "-m": 0.5104347300172473 + } + ] + } + ] +} diff --git a/tests/fslstats/test_preopMeanTimeSeries_t_m.json b/tests/fslstats/test_preopMeanTimeSeries_t_m.json new file mode 100644 index 0000000000000000000000000000000000000000..b2b481f0415c2d2ae1060aaa139593ce3b1db9fe --- /dev/null +++ b/tests/fslstats/test_preopMeanTimeSeries_t_m.json @@ -0,0 +1,18 @@ +{ + "volumes": [ + { + "indices": [ + { + "-m": 0.503236498022452 + } + ] + }, + { + "indices": [ + { + "-m": 0.504035022270371 + } + ] + } + ] +} diff --git a/tests/fslstats/test_robust_r.json b/tests/fslstats/test_robust_r.json new file mode 100644 index 0000000000000000000000000000000000000000..9810ac1677f1508fa2a4fb09a4e53c2da26217c6 --- /dev/null +++ b/tests/fslstats/test_robust_r.json @@ -0,0 +1,14 @@ +{ + "volumes": [ + { + "indices": [ + { + "-r": [ + 0.018357284367084503, + 0.982818067073822 + ] + } + ] + } + ] +} diff --git a/tests/fslstats/test_roi_w.json b/tests/fslstats/test_roi_w.json new file mode 100644 index 0000000000000000000000000000000000000000..d023769141f6bc830c86436e7d26d730e69e1074 --- /dev/null +++ b/tests/fslstats/test_roi_w.json @@ -0,0 +1,20 @@ +{ + "volumes": [ + { + "indices": [ + { + "-w": [ + 0, + 10, + 0, + 10, + 0, + 10, + 0, + 2 + ] + } + ] + } + ] +} diff --git a/tests/fslstats/test_stdevNonzero_S.json b/tests/fslstats/test_stdevNonzero_S.json new file mode 100644 index 0000000000000000000000000000000000000000..cdb0547b2196d2c346452c752aca43fd0e581c37 --- /dev/null +++ b/tests/fslstats/test_stdevNonzero_S.json @@ -0,0 +1,11 @@ +{ + "volumes": [ + { + "indices": [ + { + "-S": 0.2949828225878523 + } + ] + } + ] +} diff --git a/tests/fslstats/test_stdev_s.json b/tests/fslstats/test_stdev_s.json new file mode 100644 index 0000000000000000000000000000000000000000..b473581a8be07219141475c501720b8a2f605684 --- /dev/null +++ b/tests/fslstats/test_stdev_s.json @@ -0,0 +1,11 @@ +{ + "volumes": [ + { + "indices": [ + { + "-s": 0.29498282199940035 + } + ] + } + ] +} diff --git a/tests/fslstats/test_threshold_l_0.25_u_0.75_m.json b/tests/fslstats/test_threshold_l_0.25_u_0.75_m.json new file mode 100644 index 0000000000000000000000000000000000000000..6712e13e07ae3d558abd26e1e223374c7fdf754c --- /dev/null +++ b/tests/fslstats/test_threshold_l_0.25_u_0.75_m.json @@ -0,0 +1,11 @@ +{ + "volumes": [ + { + "indices": [ + { + "-m": 0.49646345829352356 + } + ] + } + ] +} diff --git a/tests/fslstats/test_volumeNonzero_V.json b/tests/fslstats/test_volumeNonzero_V.json new file mode 100644 index 0000000000000000000000000000000000000000..9a1ed332a56087d76f6d4a1d414828c2b64dc003 --- /dev/null +++ b/tests/fslstats/test_volumeNonzero_V.json @@ -0,0 +1,14 @@ +{ + "volumes": [ + { + "indices": [ + { + "-V": [ + 2000.0, + 2000.0 + ] + } + ] + } + ] +} diff --git a/tests/fslstats/test_volume_v.json b/tests/fslstats/test_volume_v.json new file mode 100644 index 0000000000000000000000000000000000000000..5e4fb299ead33a17ff8540d8d425fbaca09972cc --- /dev/null +++ b/tests/fslstats/test_volume_v.json @@ -0,0 +1,14 @@ +{ + "volumes": [ + { + "indices": [ + { + "-v": [ + 2000.0, + 2000.0 + ] + } + ] + } + ] +}