#!/usr/bin/env bash # # This script is shared by all of the feedsRun.<eddytest> # scripts. It runs eddy with a set of arguments, either # directly, or by submitting to a cluster with fsl_sub, # and then waits for eddy to finish. All eddy executables # that are installed in $FSLDIR/bin/ are executed, and # the outputs for each are directed to a different output # prefix. All output prefixes are printed to standard # output as the final line of output. Thne script returns # an exit code of non-0 if something goes wrong. # # The eddy_cpu executable is executed twice - once with # --nthr=1, and again with --nthr=8. # # Outputs of each eddy_<variant> executable is saved with # prefix ${outdir}/eddyOutput_<variant>. For example, the # output of eddy_cuda9.2 will be saved with prefix # ${outdir}/eddyOutput_cuda9.2, and the output of # eddy_cpu --nthr=1 will be saved with prefix # ${outdir}/eddyOutput_cpu_nthr_1. # set -e # First argument is directory in which to search # for eddy executables (typically $FSLDIR/bin). # All remaining arguments are to be passed to # eddy. Don't pass the --out option, as that is # added by this script. exedir="$1"; shift; outdir="$1"; shift; eddy_args=("$@") # Find all eddy_cuda* executables cuda_exes="" for cuda_exe in ${exedir}/eddy_cuda*; do if [ -x "${cuda_exe}" ]; then cuda_exes="${cuda_exes} ${cuda_exe}" fi done # Find all eddy_cpu executables. We run # eddy_cpu twice - here, we add # "eddy_cpu_nthr_X" to the list of # executables to run. The name is unpacked # in the loop below. # # We're using 6 threads here for the multi- # threaded test, as it is the best option on # the FMRIB cluster cpu_exes="" if [ -x "${exedir}/eddy_cpu" ]; then cpu_exes="${cpu_exes} ${exedir}/eddy_cpu_nthr_1" cpu_exes="${cpu_exes} ${exedir}/eddy_cpu_nthr_6" fi if [ "${cuda_exes}" == "" ] && [ "${cpu_exes}" == "" ]; then echo "Cannot find any eddy executables in ${exedir}!" exit 1 fi # Launch both GPU and CPU versions # Store job IDs separately so we # can qalter the openmp jobs below cuda_jids="" cpu_jids="" submitted="" for exe_name in ${cuda_exes} ${cpu_exes}; do variant=`basename ${exe_name}` variant=`echo ${variant} | sed 's/eddy_//'` if [[ "${exe_name}" == *"cuda"* ]]; then fsl_sub="fsl_sub -l ${outdir} --coprocessor=cuda" exe="${exe_name}" else # unpack eddy_cpu_nthr_N # into eddy_cpu --nthr=N fsl_sub="fsl_sub -l ${outdir} -q long.q" exe="${exe_name%_nthr*}" nthr="${variant#cpu_nthr_}" eddy_args+=("--nthr=${nthr}") if [ "${nthr}" != "1" ]; then fsl_sub="${fsl_sub} -s openmp,${nthr}" fi fi # fsl_sub will return an error (and not # output a job id) if we try to run a # cuda exe on a non-cuda-capable machine # or queue. So we allow it to fail. jid=$(${fsl_sub} ${exe} --out=${outdir}/eddyOutput_${variant} ${eddy_args[@]} || true) if [ "${jid}" == "" ]; then echo "Error submitting ${exe_name} - skipping" continue fi submitted="${submitted} ${exe_name}" if [[ "${exe}" == *"cuda"* ]]; then cuda_jids="${cuda_jids} ${jid}" else cpu_jids="${cpu_jids} ${jid}" fi done if [ "${submitted}" == "" ]; then echo "Unable to submit any eddy executables!" exit 1 fi # If running on a cluster, wait # until all jobs have finished. # If not running on a cluster, # the above fsl_sub calls will # have blocked until completion. if [ ! -z "${SGE_ROOT}" ]; then # Ensure that slots are being reserved # on the queue for CPU jobs for jid in ${cpu_jids}; do qalter ${jid} -R y done # wait for all jobs to finish while [ : ]; do for jid in ${cuda_jids} ${cpu_jids}; do tmp=`qstat -j ${jid} | wc` tmp=`echo $tmp | awk '{print $1}'` if [ $tmp -ne 0 ]; then break fi done if [ $tmp -eq 0 ]; then break fi sleep 1m done fi # Gather output prefixes for each run, # and check that the main output file # was created outputs="" for exe in ${submitted}; do tmp=`basename ${exe}` variant=`echo ${tmp} | sed 's/eddy_//'` prefix="${outdir}/eddyOutput_${variant}" outputs="${outputs} ${prefix}" if [ ! -f ${prefix}.nii* ]; then echo "${prefix} is missing" exit 1 fi done echo ${outputs} exit 0