Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • fsl/pyfeeds-tests
1 result
Show changes
Commits on Source (27)
Showing
with 925 additions and 389 deletions
......@@ -24,6 +24,13 @@ newFSF = op.join(outDir, "design.fsf")
cmd = "./cleanFSF.py {0} {1} {2} {3} {4} {5}".format(origFSF, newFSF, origDataDir, dataDir, outDir, origFSLDir)
utils.run(cmd)
# For conda version, default_flobs.flobs is in new location
with open(newFSF, 'rt') as f:
contents = f.read()
with open(newFSF, 'wt') as f:
f.write(contents.replace('/etc/', '/data/feat5/'))
# run FEAT
utils.run("unset SGE_ROOT; feat {0}".format(op.join(outDir, "design.fsf")))
......
......@@ -8,4 +8,9 @@ indir=$(pwd)
pushd $outdir
cp -r $indir/bedpost .
bedpostx_gpu bedpost
job_id=$(fsl_sub --coprocessor=cuda bedpostx_gpu bedpost)
touch ${outdir}/wait
fsl_sub --jobhold ${job_id} rm wait
while [ -e ${outdir}/wait ]; do
sleep 1m
done
......@@ -29,11 +29,18 @@ echo $indx > $dti_txt2
fslroi $dti $dti_b0 0 1
bet $dti_b0 $dti_b -f 0.2 -R -n -m
eddy \
--imain=$dti \
--mask=$dti_b --acqp=$dti_txt \
--index=$dti_txt2 \
--bvecs=$dti_bvec \
--bvals=$dti_bval \
--repol \
--out=$dti_u
job_id=$(fsl_sub --coprocessor=cuda eddy \
--imain=$dti \
--mask=$dti_b \
--acqp=$dti_txt \
--index=$dti_txt2 \
--bvecs=$dti_bvec \
--bvals=$dti_bval \
--repol \
--out=$dti_u)
touch ${outdir}/wait
fsl_sub --jobhold ${job_id} rm wait
while [ -e ${outdir}/wait ]; do
sleep 1m
done
......@@ -8,9 +8,15 @@ indir=$(pwd)
pushd $outdir
for i in {1..172}; do
probtrackx2_gpu -x $indir/masks/$i.nii.gz \
--dir=$indir/probtrackx/$i --forcedir -P 200 \
-s $indir/bedpost.bedpostX/merged \
-m $indir/bedpost.bedpostX/nodif_brain_mask \
--opd --pd -l -c 0.2 --distthresh=0
job_id=$(fsl_sub --coprocessor=cuda probtrackx2_gpu \
-x $indir/masks/$i.nii.gz \
--dir=$indir/probtrackx/$i --forcedir -P 200 \
-s $indir/bedpost.bedpostX/merged \
-m $indir/bedpost.bedpostX/nodif_brain_mask \
--opd --pd -l -c 0.2 --distthresh=0)
touch ${outdir}/wait
fsl_sub --jobhold ${job_id} rm wait
while [ -e ${outdir}/wait ]; do
sleep 1m
done
done
mmorf/
#!/usr/bin/env bash
set -e
thisdir=$(cd $(dirname $0) && pwd)
outdir=$1
indir=$2/mmorf
benchmarkdir=$3
mkdir -p ${outdir}/out
MMORF_CONFIG=${outdir}/mmorf_config.ini
cat mov_to_ref_mm.ini | \
sed "s%##MMORF_INPUT_DIR##%${indir}%g" | \
sed "s%##MMORF_OUTPUT_DIR##%${outdir}%g" > \
${MMORF_CONFIG}
echo "----------"
echo "MMORF Test"
echo "----------"
echo ""
# Run MMORF
echo ""
echo "-------------"
echo "Running mmorf"
echo "-------------"
echo ""
# Use fsl_sub in case we are running on the
# cluster. This will just run normally if running
# locally (but fsl_sub will still return a job id
# that can be used in the subsequent hold call).
job_id=$(fsl_sub --coprocessor=cuda mmorf --version --config ${MMORF_CONFIG})
touch ${outdir}/wait
fsl_sub --jobhold ${job_id} rm ${outdir}/wait
while [ -e ${outdir}/wait ]; do
sleep 1m
done
# Combine warps
echo ""
echo "-------------------"
echo "Running convertwarp"
echo "-------------------"
echo ""
convertwarp -m ${indir}/mov/mov_to_ref.mat -w ${outdir}/out/mov_to_ref_warp -r ${indir}/ref/t1 -o ${outdir}/out/mov_to_ref_warp_combined --rel --relout
# Resample T1
echo ""
echo "-----------------"
echo "Running applywarp"
echo "-----------------"
echo ""
applywarp -i ${indir}/mov/t1 -r ${indir}/ref/t1 -w ${outdir}/out/mov_to_ref_warp_combined -o ${outdir}/out/mov_to_ref_t1_nln
# Resample DTI
echo ""
echo "--------------"
echo "Running vecreg"
echo "--------------"
echo ""
vecreg -i ${indir}/mov/dti -o ${outdir}/out/mov_to_ref_dti_nln -r ${indir}/ref/t1 -w ${outdir}/out/mov_to_ref_warp_combined -m ${indir}/mov/mask_dti --refmask=${indir}/ref/mask_dti
# Calculate squared differences to expected outputs
echo ""
echo "----------------"
echo "Running fslmaths"
echo "----------------"
echo ""
fslmaths ${outdir}/out/mov_to_ref_warp -inm 1000 ${outdir}/out/mov_to_ref_warp
fslmaths ${outdir}/out/mov_to_ref_warp_combined -inm 1000 ${outdir}/out/mov_to_ref_warp_combined
fslmaths ${outdir}/out/mov_to_ref_jac -inm 1000 ${outdir}/out/mov_to_ref_jac
fslmaths ${outdir}/out/mov_to_ref_bias_1 -inm 1000 ${outdir}/out/mov_to_ref_bias_1
fslmaths ${outdir}/out/mov_to_ref_t1_nln -inm 1000 ${outdir}/out/mov_to_ref_t1_nln
fslmaths ${outdir}/out/mov_to_ref_dti_nln -inm 1000 ${outdir}/out/mov_to_ref_dti_nln
fslmaths ${outdir}/out/mov_to_ref_warp -sub ${benchmarkdir}/out/mov_to_ref_warp -sqr -Tmean ${outdir}/out/mov_to_ref_warp_sd
fslmaths ${outdir}/out/mov_to_ref_warp_combined -sub ${benchmarkdir}/out/mov_to_ref_warp_combined -sqr -Tmean ${outdir}/out/mov_to_ref_warp_combined_sd
fslmaths ${outdir}/out/mov_to_ref_jac -sub ${benchmarkdir}/out/mov_to_ref_jac -sqr ${outdir}/out/mov_to_ref_jac_sd
fslmaths ${outdir}/out/mov_to_ref_bias_1 -sub ${benchmarkdir}/out/mov_to_ref_bias_1 -sqr ${outdir}/out/mov_to_ref_bias_1_sd
fslmaths ${outdir}/out/mov_to_ref_t1_nln -sub ${benchmarkdir}/out/mov_to_ref_t1_nln -sqr ${outdir}/out/mov_to_ref_t1_nln_sd
fslmaths ${outdir}/out/mov_to_ref_dti_nln -sub ${benchmarkdir}/out/mov_to_ref_dti_nln -sqr -Tmean ${outdir}/out/mov_to_ref_dti_nln_sd
# Calculate MSE for each output to expected output
echo ""
echo "----------------"
echo "Running fslstats"
echo "----------------"
echo ""
echo "Warp Field MSE = $(fslstats ${outdir}/out/mov_to_ref_warp_sd -k ${indir}/ref/mask_dti -m)"
echo "Jac Det MSE = $(fslstats ${outdir}/out/mov_to_ref_jac_sd -k ${indir}/ref/mask_dti -m)"
echo "Bias Field MSE = $(fslstats ${outdir}/out/mov_to_ref_bias_1_sd -k ${indir}/ref/mask_dti -m)"
echo "Combined Warp MSE = $(fslstats ${outdir}/out/mov_to_ref_warp_combined_sd -k ${indir}/ref/mask_dti -m)"
echo "T1 Nonlin MSE = $(fslstats ${outdir}/out/mov_to_ref_t1_nln_sd -k ${indir}/ref/mask_dti -m)"
echo "DTI Nonlin MSE = $(fslstats ${outdir}/out/mov_to_ref_dti_nln_sd -k ${indir}/ref/mask_dti -m)"
echo ""
echo "-------------"
echo "TEST COMPLETE"
echo "-------------"
echo ""
; Values only set once
warp_res_init = 32
warp_scaling = 1 1 2 2
img_warp_space = ##MMORF_INPUT_DIR##/ref/t1
lambda_reg = 4.0e5 3.7e-1 3.1e-1 2.6e-1
hires = 12
optimiser_max_it_lowres = 5
optimiser_max_it_hires = 5
; T1 with no masking and no bias field
img_ref_scalar = ##MMORF_INPUT_DIR##/ref/t1
img_mov_scalar = ##MMORF_INPUT_DIR##/mov/t1
aff_ref_scalar = ##MMORF_INPUT_DIR##/ref/identity.mat
aff_mov_scalar = ##MMORF_INPUT_DIR##/mov/mov_to_ref.mat
use_implicit_mask = 0
use_mask_ref_scalar = 0 0 0 0
use_mask_mov_scalar = 0 0 0 0
mask_ref_scalar = NULL
mask_mov_scalar = NULL
fwhm_ref_scalar = 8.0 8.0 4.0 2.0
fwhm_mov_scalar = 8.0 8.0 4.0 2.0
lambda_scalar = 0.1 0.1 0.1 0.1
estimate_bias = 0
bias_res_init = 16
lambda_bias_reg = 1e9 1e9 1e9 1e9
; T1 with masking and bias field
img_ref_scalar = ##MMORF_INPUT_DIR##/ref/t1
img_mov_scalar = ##MMORF_INPUT_DIR##/mov/t1
aff_ref_scalar = ##MMORF_INPUT_DIR##/ref/identity.mat
aff_mov_scalar = ##MMORF_INPUT_DIR##/mov/mov_to_ref.mat
use_implicit_mask = 0
use_mask_ref_scalar = 1 1 1 1
use_mask_mov_scalar = 1 1 1 1
mask_ref_scalar = ##MMORF_INPUT_DIR##/ref/mask_t1
mask_mov_scalar = ##MMORF_INPUT_DIR##/mov/mask_t1
fwhm_ref_scalar = 8.0 8.0 4.0 2.0
fwhm_mov_scalar = 8.0 8.0 4.0 2.0
lambda_scalar = 1 1 1 1
estimate_bias = 1
bias_res_init = 16
lambda_bias_reg = 1e9 1e9 1e9 1e9
; First tensor pair
img_ref_tensor = ##MMORF_INPUT_DIR##/ref/dti
img_mov_tensor = ##MMORF_INPUT_DIR##/mov/dti
aff_ref_tensor = ##MMORF_INPUT_DIR##/ref/identity.mat
aff_mov_tensor = ##MMORF_INPUT_DIR##/mov/mov_to_ref.mat
use_mask_ref_tensor = 1 1 1 1
use_mask_mov_tensor = 1 1 1 1
mask_ref_tensor = ##MMORF_INPUT_DIR##/ref/mask_dti
mask_mov_tensor = ##MMORF_INPUT_DIR##/mov/mask_dti
fwhm_ref_tensor = 8.0 8.0 4.0 2.0
fwhm_mov_tensor = 8.0 8.0 4.0 2.0
lambda_tensor = 1 1 1 1
; Output warp
warp_out = ##MMORF_OUTPUT_DIR##/out/mov_to_ref_warp
jac_det_out = ##MMORF_OUTPUT_DIR##/out/mov_to_ref_jac
bias_out = ##MMORF_OUTPUT_DIR##/out/mov_to_ref_bias
\ No newline at end of file
originalFeeds
\ No newline at end of file
#!/usr/bin/env fsltclsh
#{{{ FEEDS - FSL Evaluation and Example Data Suite
# Stephen Smith and Matthew Webster, FMRIB Image Analysis Group
#
# Copyright (C) 2001-2011 University of Oxford
#
# Part of FSL - FMRIB's Software Library
# http://www.fmrib.ox.ac.uk/fsl
# fsl@fmrib.ox.ac.uk
#
# Developed at FMRIB (Oxford Centre for Functional Magnetic Resonance
# Imaging of the Brain), Department of Clinical Neurology, Oxford
# University, Oxford, UK
#
#
# LICENCE
#
# FMRIB Software Library, Release 4.0 (c) 2007, The University of Oxford
# (the "Software")
#
# The Software remains the property of the University of Oxford ("the
# University").
#
# The Software is distributed "AS IS" under this Licence solely for
# non-commercial use in the hope that it will be useful, but in order
# that the University as a charitable foundation protects its assets for
# the benefit of its educational and research purposes, the University
# makes clear that no condition is made or to be implied, nor is any
# warranty given or to be implied, as to the accuracy of the Software,
# or that it will be suitable for any particular purpose or for use
# under any specific conditions. Furthermore, the University disclaims
# all responsibility for the use which is made of the Software. It
# further disclaims any liability for the outcomes arising from using
# the Software.
#
# The Licensee agrees to indemnify the University and hold the
# University harmless from and against any and all claims, damages and
# liabilities asserted by third parties (including claims for
# negligence) which arise directly or indirectly from the use of the
# Software or the sale of any products based on the Software.
#
# No part of the Software may be reproduced, modified, transmitted or
# transferred in any form or by any means, electronic or mechanical,
# without the express permission of the University. The permission of
# the University is not required if the said reproduction, modification,
# transmission or transference is done without financial return, the
# conditions of this Licence are imposed upon the receiver of the
# product, and all original and amended source code is included in any
# transmitted product. You may be held legally responsible for any
# copyright infringement that is caused or encouraged by your failure to
# abide by these terms and conditions.
#
# You are not permitted under this Licence to use this Software
# commercially. Use for which any financial return is received shall be
# defined as commercial use, and includes (1) integration of all or part
# of the source code or the Software into a product for sale or license
# by or on behalf of Licensee to third parties or (2) use of the
# Software or any derivative of it for research with the final aim of
# developing software products for sale or license to a third party or
# (3) use of the Software or any derivative of it for research with the
# final aim of developing non-software products for sale or license to a
# third party, or (4) use of the Software to provide any service to an
# external organisation for which payment is received. If you are
# interested in using the Software commercially, please contact Isis
# Innovation Limited ("Isis"), the technology transfer company of the
# University, to negotiate a licence. Contact details are:
# innovation@isis.ox.ac.uk quoting reference DE/1112.
#}}}
#{{{ perror
proc perror { testimage scale } {
global FSLDIR INDIR OUTDIR PTHRESH MAXPERROR
if { ! [ imtest $OUTDIR/$testimage ] } {
set PERROR 100
puts "No output image created"
} else {
exec sh -c "${FSLDIR}/bin/fslmaths $INDIR/$testimage -sub $OUTDIR/$testimage -sqr $OUTDIR/errsq -odt float"
exec sh -c "${FSLDIR}/bin/fslmaths $INDIR/$testimage -sqr $OUTDIR/meansq -odt float"
set PERROR [ expr int ( $scale * 10000.0 * sqrt ( \
[ exec ${FSLDIR}/bin/fslstats $OUTDIR/errsq -m ] / \
[ exec ${FSLDIR}/bin/fslstats $OUTDIR/meansq -m ] ) ) / 100.00 ]
puts "% error = $PERROR"
}
if { $PERROR > $PTHRESH } {
puts "Warning - test failed!"
}
if { $PERROR > $MAXPERROR } {
set MAXPERROR $PERROR
}
return $PERROR
}
#}}}
#{{{ simpleperror
proc simpleperror { a b denom } {
global PTHRESH MAXPERROR
set PERROR [ expr int ( 10000.0 * ( $a - $b ) / $denom ) / 100.00 ]
if { $PERROR < 0 } {
set PERROR [ expr 0 - $PERROR ]
}
puts "% error = $PERROR"
if { $PERROR > $PTHRESH } {
puts "Warning - test failed!"
}
if { $PERROR > $MAXPERROR } {
set MAXPERROR $PERROR
}
return $PERROR
}
#{{{ setup vars and first printouts
set feeds_list "fdt fugue susan sienax bet2 feat melodic first fnirt"
foreach f $feeds_list {
set feeds($f) 1
}
set OUTDIR [ lindex $argv 0 ]
set INDIR "[ lindex $argv 1 ]/originalFeeds"
set FSLDIR $env(FSLDIR)
set PTHRESH 1
set MAXPERROR 0
set INMEDX 0
set INGUI 0
source ${FSLDIR}/tcl/fslstart.tcl
puts "\nFSL Evaluation and Example Data Suite\n"
puts "start time = [ exec date ]"
puts "hostname = [ exec hostname ]"
puts "os = [ exec uname -a ]\n"
puts "Input directory = $INDIR"
puts "Output directory = $OUTDIR"
puts "FSLDIR = $FSLDIR"
set logout $OUTDIR/LOG
#}}}
# to add ASAP:
# filmbabe (make_flobs and filmbabe) / mm / randomise
#{{{ FUGUE
if { $feeds(fugue) } {
puts "\nStarting PRELUDE & FUGUE at [ exec date ]"
fsl:exec "${FSLDIR}/bin/prelude -c $INDIR/fieldmap -o $OUTDIR/unwrapped_phase"
perror unwrapped_phase 0.5
fsl:exec "${FSLDIR}/bin/fugue -i $INDIR/epi -p $OUTDIR/unwrapped_phase -d 0.295 -u $OUTDIR/unwarped_epi"
perror unwarped_epi 0.2
}
#}}}
#{{{ SUSAN
if { $feeds(susan) } {
puts "\nStarting SUSAN at [ exec date ]"
fsl:exec "${FSLDIR}/bin/susan $INDIR/structural 2000 2 3 1 0 $OUTDIR/structural_susan"
perror structural_susan 0.25
}
#}}}
#{{{ SIENAX
if { $feeds(sienax) } {
puts "\nStarting SIENAX (including testing BET and FLIRT and FAST) at [ exec date ]"
fsl:exec "${FSLDIR}/bin/imcp $INDIR/structural $OUTDIR/structural"
fsl:exec "cd $OUTDIR ; ${FSLDIR}/bin/sienax structural -d -r"
puts "checking error on BET:"
perror structural_sienax/I_brain 0.2
puts "checking error on FLIRT:"
perror structural_sienax/I_stdmask 0.01
puts "checking error on FAST:"
puts "checking error on single-image binary segmentation:"
perror structural_sienax/I_stdmaskbrain_seg 0.05
puts "checking error on partial volume images:"
perror structural_sienax/I_stdmaskbrain_pve_0 0.02
perror structural_sienax/I_stdmaskbrain_pve_1 0.03
perror structural_sienax/I_stdmaskbrain_pve_2 0.03
puts "checking error on SIENAX volume outputs:"
foreach sienastats { pgrey vcsf GREY WHITE BRAIN } {
set r [ exec sh -c "grep $sienastats $OUTDIR/structural_sienax/report.sienax | awk '{ print \$2 }'" ]
set d [ exec sh -c "grep $sienastats $INDIR/structural_sienax/report.sienax | awk '{ print \$2 }'" ]
simpleperror $r $d 1600000
}
}
#}}}
#{{{ BET2
if { $feeds(bet2) } {
puts "\nStarting BET2 at [ exec date ]"
fsl:exec "/bin/cp $INDIR/head_t?.nii.gz $OUTDIR"
fsl:exec "cd $OUTDIR ; ${FSLDIR}/bin/bet head_t1 head_t1_brain -A2 head_t2"
puts "checking error on T1 brain extraction:"
perror head_t1_brain 0.05
puts "checking error on skull and scalp surfaces:"
perror head_t1_brain_inskull_mesh .01
perror head_t1_brain_outskull_mesh .01
perror head_t1_brain_outskin_mesh .01
}
#}}}
#{{{ FEAT
if { $feeds(feat) } {
puts "\nStarting FEAT at [ exec date ]"
# fix FEAT setup file to use INDIR, OUTDIR and FSLDIR
fsl:exec "cp ${INDIR}/fmri.feat/design.fsf ${OUTDIR}/design.fsf"
fsl:echo ${OUTDIR}/design.fsf "
set fmri(regstandard) ${FSLDIR}/data/standard/MNI152_T1_2mm_brain
set feat_files(1) ${INDIR}/fmri
set highres_files(1) ${INDIR}/structural_brain
set fmri(outputdir) ${OUTDIR}/fmri.feat"
# run FEAT
fsl:exec "${FSLDIR}/bin/feat ${OUTDIR}/design.fsf"
puts "checking error on filtered functional data:"
perror fmri.feat/filtered_func_data 0.1
puts "checking error on raw Z stat images:"
perror fmri.feat/stats/zstat1 0.02
perror fmri.feat/stats/zstat2 0.02
perror fmri.feat/stats/zfstat1 0.02
puts "checking error on thresholded Z stat images:"
perror fmri.feat/thresh_zstat1 0.02
perror fmri.feat/thresh_zstat2 0.02
perror fmri.feat/thresh_zfstat1 0.02
puts "checking error on registration images:"
perror fmri.feat/reg/example_func2highres 0.02
perror fmri.feat/reg/example_func2standard 0.02
#{{{ check error on largest cluster of Talairached zfstat1
puts "checking error on position of largest cluster of Talairached zfstat1:"
set iptr [ open ${INDIR}/fmri.feat/cluster_zfstat1_std.txt r ]
gets $iptr line
gets $iptr line
scan $line "%f %f %f %f %f %f %f %f %f" D(1) D(2) D(3) D(4) D(5) D(6) D(7) D(8) D(9)
close $iptr
set iptr [ open ${OUTDIR}/fmri.feat/cluster_zfstat1_std.txt r ]
gets $iptr line
gets $iptr line
scan $line "%f %f %f %f %f %f %f %f %f %f %f" R(1) R(2) R(3) R(4) R(5) R(6) R(7) R(8) R(9) R(10) R(11)
close $iptr
simpleperror $D(4) $R(6) 500
simpleperror $D(5) $R(7) 500
simpleperror $D(6) $R(8) 500
simpleperror $D(7) $R(9) 500
simpleperror $D(8) $R(10) 500
simpleperror $D(9) $R(11) 500
#}}}
}
#}}}
#{{{ MELODIC
if { $feeds(melodic) } {
puts "\nStarting MELODIC at [ exec date ]"
fsl:exec "${FSLDIR}/bin/melodic -i $INDIR/fmri -o $OUTDIR/fmri.ica --tr=3 --seed=2"
fsl:exec "${FSLDIR}/bin/fslcc $INDIR/fmri.ica/melodic_IC $OUTDIR/fmri.ica/melodic_IC > $OUTDIR/fmri.ica/fslcc.txt"
set MAXV 0
set MAXA 0
set iptr [ open $OUTDIR/fmri.ica/fslcc.txt r ]
while { ( [ gets $iptr line ] >= 0 ) } {
scan $line "%d %d %f" A B C
if { $A == 52 } {
if { $C > $MAXV } {
set MAXV $C
}
}
if { $A == 39 } {
if { $C > $MAXA } {
set MAXA $C
}
}
}
close $iptr
if { $MAXA < $MAXV } {
set MAXV $MAXA
}
simpleperror $MAXV 1 60
}
#}}}
#{{{ FIRST
if { $feeds(first) } {
puts "\nStarting FIRST at [ exec date ]"
fsl:exec "${FSLDIR}/bin/first_flirt $INDIR/structural $OUTDIR/structural_to_std_sub"
fsl:exec "${FSLDIR}/bin/run_first -i $INDIR/structural -t $OUTDIR/structural_to_std_sub.mat -n 20 -o $OUTDIR/structural_first_L_Hipp -m ${FSLDIR}/data/first/models_336_bin/L_Hipp_bin.bmv"
perror structural_first_L_Hipp 0.01
}
#}}}
#{{{ FDT
if { $feeds(fdt) } {
puts "\nStarting FDT (bedpost) at [ exec date ]"
fsl:exec "cp -r $INDIR/fdt_subj1 $OUTDIR"
fsl:exec "unset FSLMACHINELIST; ${FSLDIR}/bin/bedpostx $OUTDIR/fdt_subj1 -n 1"
puts "checking error on bedpost output:"
perror fdt_subj1.bedpostX/dyads1 .005
perror fdt_subj1.bedpostX/mean_f1samples .005
perror fdt_subj1.bedpostX/mean_ph1samples .003
perror fdt_subj1.bedpostX/mean_th1samples .002
perror fdt_subj1.bedpostX/merged_f1samples .005
perror fdt_subj1.bedpostX/merged_ph1samples .003
perror fdt_subj1.bedpostX/merged_th1samples .002
}
if { $feeds(fnirt) } {
puts "\nStarting FNIRT at [ exec date ]"
fsl:exec "cp -r $INDIR/feeds_fnirt* $OUTDIR"
fsl:exec "cp -r $INDIR/sad* $OUTDIR"
fsl:exec "cp -r $INDIR/happy* $OUTDIR"
set cwd [ pwd ]
cd $OUTDIR
fsl:exec "${FSLDIR}/bin/fnirt --config=feeds_fnirt"
cd $cwd
perror feeds_fnirt_sad2happy 0.1
}
#}}}
#{{{ finish up
puts "\nend time = [ exec date ]\n"
if { $MAXPERROR < $PTHRESH } {
puts "\nAll tests passed"
exit 0
} else {
puts "\nWarning - not all tests passed"
exit 1
}
#}}}
#!/usr/bin/env fslpython
import os
import sys
import subprocess as sp
import numpy as np
import nibabel as nib
def create_image(shape, pixdim):
pixdim = list(pixdim)
data = np.random.random(shape).astype(np.float32)
hdr = nib.Nifti1Header()
hdr.set_data_dtype(np.float32)
hdr.set_data_shape(shape)
hdr.set_zooms(pixdim[:len(shape)])
return nib.Nifti1Image(data, np.eye(4), hdr)
def check_image(origimg, changedimg, exppixdim):
gotshape = changedimg.shape
gotpixdim = list(changedimg.header.get_zooms())
gotdata = changedimg.get_fdata()
assert origimg.shape == gotshape
assert gotpixdim == exppixdim
assert np.all(origimg.get_fdata() == gotdata)
def run_tests():
# (image shape, command, exppixdims)
tests = [
((5, 5, 5), '2 2 2', (2, 2, 2)),
((5, 5, 5), '2 2 2 2', (2, 2, 2)),
((5, 5, 5, 5), '2 2 2', (2, 2, 2, 1)),
((5, 5, 5, 5), '2 2 2 2', (2, 2, 2, 2)),
]
for shape, cmd, exppixdim in tests:
imgfile = 'image.nii.gz'
img = create_image(shape, [1] * len(shape))
img.to_filename(imgfile)
try:
sp.run(['fslchpixdim', imgfile] + list(cmd.split()))
check_image(img, nib.load(imgfile), list(exppixdim))
finally:
os.remove(imgfile)
if __name__ == '__main__':
sys.exit(run_tests())
#!/usr/bin/env fslpython
import os
import os.path as op
import sys
import subprocess as sp
import numpy as np
import nibabel as nib
THISDIR = op.dirname(op.abspath(__file__))
# 2=char, 4=short, 8=int, 16=float, 64=double
DTYPE_MAPPING = {
2 : np.uint8,
4 : np.int16,
8 : np.int32,
16 : np.float32,
64 : np.float64
}
MNI152_2MM_AFFINE = np.array([[-2, 0, 0, 90],
[ 0, 2, 0, -126],
[ 0, 0, 2, -72],
[ 0, 0, 0, 1]])
def validate_result(imgfile, expshape, exppixdim, exporigin, expdtype):
img = nib.load(imgfile)
hdr = img.header
expshape = list(expshape)
exppixdim = list(exppixdim)
exporigin = list(exporigin)
if (len(expshape) == 3) or \
expshape[3] in (0, 1):
expndims = 3
else:
expndims = 4
expaffine = np.diag(exppixdim[:3] + [1])
expaffine[0, 0] *= -1
expaffine[:3, 3] = exporigin
expaffine[0, 3] *= exppixdim[0]
expaffine[1, 3] *= -exppixdim[1]
expaffine[2, 3] *= -exppixdim[2]
assert len(img.shape) == expndims
assert list(img.shape) == expshape[ :expndims]
assert list(hdr.get_zooms()) == exppixdim[:expndims]
assert img.get_data_dtype() == expdtype
assert np.all(np.isclose(img.affine, expaffine))
def create_image(shape, pixdim, origin, dtype):
pixdim = list(pixdim)
affine = np.diag(pixdim[:3] + [1])
affine[:3, 3] = origin
data = np.random.randint(1, 100, shape).astype(dtype)
hdr = nib.Nifti1Header()
hdr.set_data_dtype(dtype)
hdr.set_data_shape(shape)
hdr.set_zooms(pixdim[:len(shape)])
return nib.Nifti1Image(data, affine, hdr)
def test_new_file():
tests = [
# 4th dim of size 0 or 1 should
# result in a 3D image (this
# is coded in validate_result)
' 5 5 5 0 2 2 2 0 0 0 0 2',
' 5 5 5 0 2 2 2 1 0 0 0 2',
' 5 5 5 1 2 2 2 1 0 0 0 2',
' 5 5 5 1 2 2 2 1 0 0 0 4',
' 5 5 5 1 2 2 2 1 0 0 0 8',
' 5 5 5 1 2 2 2 1 0 0 0 16',
' 5 5 5 1 2 2 2 1 1 2 3 64',
' 5 5 5 1 0.5 1.5 1.25 1 0 0 0 2',
' 5 5 5 1 0.5 1.5 1.25 1.5 0 0 0 2',
' 5 5 5 1 0.5 1.5 1.25 0.5 0 0 0 2',
'30 30 30 5 5 10 3 5 10 20 10 2',
]
for test in tests:
args = list(test.split())
imgfile = 'image.nii.gz'
try:
os.remove(imgfile)
except:
pass
print(['fslcreatehd'] + args + [imgfile])
sp.run(['fslcreatehd'] + args + [imgfile])
args = [float(a) for a in args]
expshape = args[:4]
exppixdim = args[4:8]
exporigin = args[8:11]
expdtype = DTYPE_MAPPING[args[11]]
try:
validate_result(
imgfile, expshape, exppixdim, exporigin, expdtype)
finally:
os.remove(imgfile)
def test_existing_file():
# each test is a tuple containing:
# - dtype_of_existing_image
# - shape_of_existing_image
# - exp_shape (set to None if image should not be overwritten)
# - fslcreatehd_args
tests = [
# 4th dim of 0 or 1 should result in a 3D image
(2, (5, 5, 5), None, '5 5 5 0 2 2 2 1 0 0 0 2'),
(2, (5, 5, 5), None, '5 5 5 0 2 2 2 1 1 2 3 2'),
(2, (5, 5, 5), None, '5 5 5 1 2 2 2 2 0 0 0 2'),
# dtype arg should be ignored for existing images
(2, (5, 5, 5), None, '5 5 5 0 2 2 2 1 0 0 0 4'),
(4, (5, 5, 5), None, '5 5 5 0 2 2 2 1 0 0 0 2'),
# data should be overwritten if any
# of the first 3 dims are different,
# or if a 4th dim is specified
(2, (5, 5, 5), (5, 3, 5), '5 3 5 0 2 2 2 2 0 0 0 2'),
(2, (5, 5, 5), (5, 3, 5), '5 3 5 0 2 2 2 2 0 0 0 4'),
(2, (5, 5, 5), (5, 3, 5), '5 3 5 1 2 2 2 2 0 0 0 2'),
(2, (5, 5, 5), (5, 3, 5), '5 3 5 1 2 2 2 2 1 2 3 2'),
(2, (5, 5, 5), (5, 3, 5, 5), '5 3 5 5 2 2 2 2 0 0 0 2'),
(2, (5, 5, 5), (5, 3, 5, 5), '5 3 5 5 2 2 2 2 1 2 3 2'),
# 4D - same rules apply
(2, (5, 5, 5, 5), None, '5 5 5 5 2 2 2 2 0 0 0 2'),
(2, (5, 5, 5, 5), None, '5 5 5 5 2 2 2 2 0 0 0 2'),
(2, (5, 5, 5, 5), None, '5 5 5 5 2 2 2 2 0 0 0 4'),
(4, (5, 5, 5, 5), None, '5 5 5 5 2 2 2 2 0 0 0 2'),
(2, (5, 5, 5, 5), None, '5 5 5 5 2 2 2 2 1 2 3 2'),
# data overwritten if nelements
# change
(2, (5, 5, 5, 5), (5, 5, 5), '5 5 5 0 2 2 2 2 0 0 0 2'),
(2, (5, 5, 5, 5), (5, 5, 5), '5 5 5 1 2 2 2 2 0 0 0 2'),
(2, (5, 5, 5, 5), (5, 3, 5), '5 3 5 0 2 2 2 2 0 0 0 2'),
(2, (5, 5, 5, 5), (5, 3, 5), '5 3 5 1 2 2 2 2 0 0 0 2'),
(2, (5, 5, 5, 5), (5, 3, 5), '5 3 5 1 2 2 2 2 1 2 3 2'),
(2, (5, 5, 5, 5), (5, 5, 5, 2), '5 5 5 2 2 2 2 2 0 0 0 2'),
(2, (5, 5, 5, 5), (5, 3, 5, 5), '5 3 5 5 2 2 2 2 0 0 0 2'),
(2, (5, 5, 5, 5), (5, 3, 5, 5), '5 3 5 5 2 2 2 2 1 2 3 2'),
]
for (dtype, shape, expshape, args) in tests:
imgfile = 'image.nii.gz'
dtype = DTYPE_MAPPING[dtype]
args = list(args.split())
img = create_image(shape, (1, 1, 1, 1), (0, 0, 0), dtype)
img.to_filename(imgfile)
sp.run(['fslcreatehd'] + args + [imgfile])
checkdata = expshape is None
if expshape is None:
expshape = shape
args = [float(a) for a in args]
exppixdim = args[4:8]
exporigin = args[8:11]
try:
validate_result(imgfile, expshape, exppixdim, exporigin, dtype)
if checkdata:
data = np.asanyarray(img.dataobj)
datacopy = np.asanyarray(nib.load(imgfile).dataobj)
assert np.all(data == datacopy)
finally:
os.remove(imgfile)
def test_new_file_xml():
xmlfile = op.join(THISDIR, 'mni2mm.xml')
sp.run(['fslcreatehd', xmlfile, 'mni.nii.gz'])
img = nib.load('mni.nii.gz')
assert img.shape == (91, 109, 91)
assert img.header.get_zooms()[:3] == (2.0, 2.0, 2.0)
assert img.header['intent_code'] == 10
assert img.header['intent_p1'] == 20
assert img.header['intent_p2'] == 30
assert img.header['intent_p3'] == 40
def test_existing_file_xml_same_shape():
xmlfile = op.join(THISDIR, 'mni2mm.xml')
img = create_image((91, 109, 91), (3, 3, 3), (5, 5, 5), np.float32)
img.to_filename('image.nii.gz')
sp.run(['fslcreatehd', xmlfile, 'image.nii.gz'])
result = nib.load('image.nii.gz')
assert result.shape == (91, 109, 91)
assert result.header.get_zooms()[:3] == (2.0, 2.0, 2.0)
# bug in FSL<=6.0.4 caused intent codes to not be set
assert result.header['intent_code'] == 10
assert result.header['intent_p1'] == 20
assert result.header['intent_p2'] == 30
assert result.header['intent_p3'] == 40
assert np.all(result.affine == MNI152_2MM_AFFINE)
# data should be preserved
assert np.all(result.get_fdata() == img.get_fdata())
def test_existing_file_xml_different_shape():
xmlfile = op.join(THISDIR, 'mni2mm.xml')
img = create_image((20, 20, 20), (3, 3, 3), (5, 5, 5), np.float32)
img.to_filename('image.nii.gz')
sp.run(['fslcreatehd', xmlfile, 'image.nii.gz'])
result = nib.load('image.nii.gz')
assert result.shape == (91, 109, 91)
assert result.header.get_zooms()[:3] == (2.0, 2.0, 2.0)
assert result.header['intent_code'] == 10
assert result.header['intent_p1'] == 20
assert result.header['intent_p2'] == 30
assert result.header['intent_p3'] == 40
assert np.all(result.affine == MNI152_2MM_AFFINE)
# data should be cleared
assert np.all(result.get_fdata() == 0)
def main():
outdir = sys.argv[1]
os.chdir(outdir)
test_new_file()
test_existing_file()
test_new_file_xml()
test_existing_file_xml_same_shape()
test_existing_file_xml_different_shape()
if __name__ == '__main__':
sys.exit(main())
<nifti_image
image_offset = '352'
ndim = '3'
nx = '91'
ny = '109'
nz = '91'
nt = '1'
dx = '2'
dy = '2'
dz = '2'
dt = '1'
datatype = '4'
nvox = '902629'
nbyper = '2'
scl_slope = '1'
scl_inter = '0'
intent_code = '10'
intent_p1 = '20'
intent_p2 = '30'
intent_p3 = '40'
intent_name = ''
toffset = '0'
xyz_units = '2'
time_units = '8'
freq_dim = '0'
phase_dim = '0'
slice_dim = '0'
descrip = 'FSL5.0'
aux_file = ''
qform_code = '4'
qfac = '-1'
quatern_b = '0'
quatern_c = '1'
quatern_d = '0'
qoffset_x = '90'
qoffset_y = '-126'
qoffset_z = '-72'
sform_code = '4'
sto_xyz_matrix = '-2 0 0 90 0 2 0 -126 0 0 2 -72 0 0 0 1'
slice_code = '0'
slice_start = '0'
scl_end = '0'
scl_duration = '0'
/>
......@@ -132,7 +132,7 @@ def gen_data():
nib.Nifti1Image(S0, affine=affine).to_filename(f'{fdir}/ref_S0.nii.gz')
nib.Nifti1Image(data, affine=affine).to_filename(f'{fdir}/ref_data.nii.gz')
nib.Nifti1Image(np.ones(data.shape[:3], dtype=int), affine=affine).to_filename(f'{fdir}/nodif_brain_mask.nii.gz')
nib.Nifti1Image(np.ones(data.shape[:3], dtype=np.int32), affine=affine).to_filename(f'{fdir}/nodif_brain_mask.nii.gz')
tensor_components = diffusion_tensor[:, :, :, [0, 0, 0, 1, 1, 2], [0, 1, 2, 1, 2, 2]]
nib.Nifti1Image(tensor_components, affine=affine).to_filename(f'{fdir}/ref_tensor.nii.gz')
......@@ -260,9 +260,3 @@ for directory, multi_shell, kurt in gen_data():
ref = nib.load(f'{directory}/ref_L1.nii.gz').get_fdata()
fit = nib.load(f'{base_output}_L1.nii.gz').get_fdata()
assert not np.allclose(ref, fit, rtol=1e-3, atol=1e-6)
#!/usr/bin/env fslpython
# test avscale with different input affines
import sys
import os
import fsl.utils.run as run
import numpy as np
PI = np.pi
PION2 = np.pi / 2
# Each test has the form (input-affine, expected-output)
# where expected-output comprises:
# - scales
# - translations
# - rotations
# - skews
# - L/R orientation (preserved or swapped)
tests = [
([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]], [(1, 1, 1), (0, 0, 0), (0, 0, 0), (0, 0, 0), 'preserved']),
([[2, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 2, 0],
[0, 0, 0, 1]], [(2, 2, 2), (0, 0, 0), (0, 0, 0), (0, 0, 0), 'preserved']),
([[-2, 0, 0, 0],
[ 0, 2, 0, 0],
[ 0, 0, 2, 0],
[ 0, 0, 0, 1]], [(2, 2, 2), (0, 0, 0), (0, 0, PI), (0, 0, 0), 'swapped']),
([[0, 0, 1, 0],
[0, 1, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 1]], [(1, 1, 1), (0, 0, 0), (0, -PION2, 0), (0, 0, 0), 'swapped']),
([[ 0, 0, -1, 0],
[ 0, -1, 0, 0],
[-1, 0, 0, 0],
[ 0, 0, 0, 1]], [(1, 1, 1), (0, 0, 0), (-PI, PION2, 0), (0, 0, 0), 'preserved']),
([[0, 1, 0, 0],
[1, 0, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]], [(1, 1, 1), (0, 0, 0), (0, 0, PION2), (0, 0, 0), 'swapped']),
([[1, 0, 0, 0],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 0, 1]], [(1, 1, 1), (0, 0, 0), (PION2, 0, 0), (0, 0, 0), 'swapped']),
([[-2, 0, 0, 90],
[ 0, 2, 0, -126],
[ 0, 0, 2, -72],
[ 0, 0, 0, 1]], [(2, 2, 2), (90, -126, -72), (0, 0, PI), (0, 0, 0), 'swapped']),
]
def read_avscale_output(output):
lines = output.split('\n')
# scales
# translations
# rotations
# skews
# l/r orientation (preserved or swapped)
scales = [l for l in lines if l.startswith('Scales ')] [0]
translations = [l for l in lines if l.startswith('Translations ')] [0]
rotations = [l for l in lines if l.startswith('Rotation Angles')][0]
skews = [l for l in lines if l.startswith('Skews ')] [0]
orient = [l for l in lines if l.startswith('Left-Right ')] [0]
scales = [float(s) for s in scales .split()[-3:]]
translations = [float(s) for s in translations.split()[-3:]]
rotations = [float(s) for s in rotations .split()[-3:]]
skews = [float(s) for s in skews .split()[-3:]]
orient = orient.split()[-1]
return {
'scales' : np.array(scales),
'translations' : np.array(translations),
'rotations' : np.array(rotations),
'skews' : np.array(skews),
'orient' : orient.lower(),
}
def test_avscale():
for affine, expected in tests:
affine = np.array(affine)
np.savetxt('affine.mat', affine, '%0.6f')
print(affine)
output = run.runfsl('avscale --allparams affine.mat')
output = read_avscale_output(output)
scales, translations, rotations, skews, orient = expected
try:
assert np.all(np.isclose(scales, output['scales']))
assert np.all(np.isclose(translations, output['translations']))
assert np.all(np.isclose(rotations, output['rotations']))
assert np.all(np.isclose(skews, output['skews']))
assert orient == output['orient']
except AssertionError:
print(affine)
print(output)
raise
if __name__ == '__main__':
if len(sys.argv) > 1:
os.chdir(sys.argv[1])
test_avscale()
#!/usr/bin/env fslpython
import os
import shlex
import sys
import subprocess as sp
import numpy as np
import nibabel as nib
from fsl.data.image import Image
import fsl.transform.affine as affine
def applywarp(src, ref):
cmd = f'applywarp --in={src} --ref={ref} --usesqform --out=out'
sp.run(shlex.split(cmd), check=True)
return Image('out')
def make_image(fname, data, sform, sform_code, qform, qform_code):
hdr = nib.Nifti1Header()
hdr.set_sform(sform, sform_code)
hdr.set_qform(qform, qform_code)
img = Image(data, header=hdr)
img.save(fname)
return img
# fsl/fugue!6
#
# Make sure that applywarp --usesqform
# will correctly use the sform or qform
#
# FSL affine precedence rules are as follows:
#
# 1. If sform_code != 0, use sform; else
# 2. If qform_code != 0, use qform; else
# 3. Use a scaling matrix with pixdims along the
# diagonal
def test_applywarp_uses_sqform_correctly():
srcdata = np.zeros((20, 20, 20))
refdata = np.zeros((20, 20, 20))
# Data from the two images
# aligned via their affines -
# src should be resampled so
# as to be identical to ref
srcdata[ 6: 9, 6: 9, 6: 9] = 1
refdata[16:19, 16:19, 16:19] = 1
eye = np.eye(4)
srcaff = affine.scaleOffsetXform(1, [ 5, 5, 5])
refaff = affine.scaleOffsetXform(1, [-5, -5, -5])
# aligned via sform
src = make_image('src', srcdata, srcaff, 2, eye, 1)
ref = make_image('ref', refdata, refaff, 2, eye, 1)
result = applywarp('src', 'ref')
assert np.all(np.isclose(result.data, ref.data))
# aligned via sform again
src = make_image('src', srcdata, srcaff, 2, eye, 0)
ref = make_image('ref', refdata, refaff, 2, eye, 0)
result = applywarp('src', 'ref')
assert np.all(np.isclose(result.data, ref.data))
# aligned via qform
src = make_image('src', srcdata, eye, 0, srcaff, 2)
ref = make_image('ref', refdata, eye, 0, refaff, 2)
result = applywarp('src', 'ref')
assert np.all(np.isclose(result.data, ref.data))
# not aligned - should result in a scaling
# matrix being used (an identify matrix here)
src = make_image('src', srcdata, eye, 0, srcaff, 0)
ref = make_image('ref', refdata, eye, 0, refaff, 0)
result = applywarp('src', 'ref')
assert not np.all(np.isclose(result.data, ref.data))
assert np.all(np.isclose(result.data, src.data))
if __name__ == '__main__':
os.chdir(sys.argv[1])
test_applywarp_uses_sqform_correctly()
......@@ -7,7 +7,7 @@ subdatadir=$datadir/fsl_course_data/seg_struc/vbm/stats
randomise_parallel -i $subdatadir/GM_mod_merg_s3 -o $outdir/output -m $subdatadir/GM_mask -d $subdatadir/design.mat -t $subdatadir/design.con -n 900 -x
fslpython -c "exit(0 if $(fslstats $outdir/output_tstat1 -M) == 0.613910 else 1)"
fslpython -c "exit(0 if $(fslstats $outdir/output_vox_corrp_tstat1 -M) == 0.084046 else 1)"
fslpython -c "exit(0 if $(fslstats $outdir/output_vox_corrp_tstat1 -M) == 0.057734 else 1)"
rm -rf "$outdir/output"*
......
include ${FSLCONFDIR}/default.mk
PROJNAME = utils_tests
XFILES = test_fslStartup
USRCXXFLAGS = -fopenmp
LIBS = -lfsl-utils
all: ${XFILES}
%: %.cc
${CXX} ${CXXFLAGS} -o $@ $<
test_fslStartup: test_fslStartup.cc
${CXX} ${CXXFLAGS} -o $@ $< ${LDFLAGS}
#!/usr/bin/env fslpython
#Note this script requires a conda activate script to be on the PATH
import os
import shlex
import tempfile
import subprocess as sp
def run(cmd, ompthreads=None, blasthreads=None, fslskipglobal=None):
env = os.environ.copy()
blacklist = ['OMP', 'GOTO', 'BLAS', 'FSL']
for varname in list(env.keys()):
if any(b in varname for b in blacklist):
env.pop(varname)
if ompthreads is not None: env['OMP_NUM_THREADS'] = str(ompthreads)
if blasthreads is not None: env['BLAS_NUM_THREADS'] = str(blasthreads)
if fslskipglobal is not None: env['FSL_SKIP_GLOBAL'] = str(fslskipglobal)
result = sp.run(shlex.split(cmd), check=True, text=True,
stdout=sp.PIPE, stderr=sp.STDOUT, env=env)
print(f'Called {cmd} {ompthreads} {blasthreads} {fslskipglobal}')
print(f' exit code: {result.returncode}')
print(f' stdout: {result.stdout.strip()}')
return result.stdout.strip().split('\n')[-1]
def main():
buildcmds = ['source activate $FSLDIR',
'source $FSLDIR/etc/fslconf/fsl-devel.sh',
'make']
sp.run('; '.join(buildcmds), check=True, shell=True)
# Default behaviour should be: OMP multi-threaded, BLAS single threaded.
assert run('./test_fslStartup', 8, 8) == '8 1 8'
assert run('./test_fslStartup', 4, 4) == '4 1 4'
assert run('./test_fslStartup', 1, 1) == '1 1 1'
# FSL_SKIP_GLOBAL=0 should be equivalent to default behaviour
assert run('./test_fslStartup', 8, 8, 0) == '8 1 8'
assert run('./test_fslStartup', 4, 4, 0) == '4 1 4'
assert run('./test_fslStartup', 1, 1, 0) == '1 1 1'
# With FSL_SKIP_GLOBAL=1, BLAS should be multi-threaded
assert run('./test_fslStartup', 8, 8, 1) == '8 8 8'
assert run('./test_fslStartup', 4, 4, 1) == '4 4 4'
assert run('./test_fslStartup', 1, 1, 1) == '1 1 1'
if __name__ == '__main__':
main()
#include <iostream>
#include "omp.h"
#include "cblas.h"
#include "utils/options.h"
using namespace std;
/*
* Interrogate and print out the number of threads used by OpenMP, and the
* number of threads used by OpenBLAS.
*
* The utils/fslStartup.cc file contains some global initialisation logic
* which controls the number of threads used by OpenBLAS. All we need to do
* to induce the fslStartup.cc code is link against libfsl-utils.so.
*/
int main(int argc, char *argv[]) {
// Use something from libfsl-utils.so
// to ensure that it gets linked.
Utilities::OptionParser opts("test", "test");
opts.usage();
int omp_threads;
int blas_threads;
int sum[16];
for (int i = 0; i < 16; i++) {
sum[i] = 0;
}
// omp num threads should not be affected
// by the FSL startup logic. Sum should
// be equal to omp num threads.
#pragma omp parallel
{
sum[omp_get_thread_num()] = 1;
omp_threads = omp_get_num_threads();
}
// blas num threads should be controlled
// by FSL startup logic.
blas_threads = openblas_get_num_threads();
for (int i = 1; i < 16; i++) {
sum[0] += sum[i];
}
cout << omp_threads << " " << blas_threads << " " << sum[0] << endl;
}