diff --git a/bip/main.py b/bip/main.py
index 9fa24b23c65ff12022f6cad4adae40d3ce0afbe1..55ccda19aa978e844f81a8dc3510c33c2a083069 100755
--- a/bip/main.py
+++ b/bip/main.py
@@ -16,17 +16,11 @@ from dataclasses import dataclass, field
 from file_tree import FileTree
 from pipe_tree import Pipeline, Ref
 import bip
-from bip.utils.log_utils           import setup_logging
-from bip.pipelines.struct_T1       import struct_T1
-from bip.pipelines.struct_T2_FLAIR import struct_T2_FLAIR
-from bip.pipelines.struct_FS       import struct_FS
-from bip.pipelines.struct_swMRI    import struct_swMRI
-from bip.pipelines.struct_asl      import struct_asl
-from bip.pipelines.dMRI_fieldmap   import dMRI_fieldmap
-from bip.pipelines.fMRI_task       import fMRI_task
-from bip.pipelines.fMRI_rest       import fMRI_rest
-from bip.pipelines.dMRI_diff       import dMRI_diff
-from bip.pipelines.IDPs_gen        import IDPs_gen
+from bip.utils.log_utils import setup_logging
+from bip.pipelines import struct_T1, struct_T2_FLAIR, struct_FS
+from bip.pipelines import struct_swMRI, struct_asl, IDPs_gen
+from bip.pipelines import dMRI_fieldmap, dMRI_diff, fMRI_task, fMRI_rest
+
 
 log = logging.getLogger('bip.main')
 
@@ -43,6 +37,7 @@ class Usage(Exception):
 
 @dataclass
 class Context:
+    subject:    str = ""
     BB_BIN_DIR: str = bip.__path__[0]
     FSLDIR:     str = os.environ['FSLDIR']
     gdc:        str = field(init=False)
@@ -332,19 +327,19 @@ def main():
     #pipe = Pipeline(default_submit=dict(logdir=Ref("logs_dir")))
     #ctx.save_context(tree.get('config_file'))
 
-    # This list will be filled with all desired outputs
+    # This list will be filled with all desired outputs ... Later
     targets = []
 
-    pipe, targets = struct_T1.add_to_pipeline(ctx, pipe, tree, targets)
-    pipe, targets = struct_T2_FLAIR.add_to_pipeline(ctx, pipe, tree, targets)
-    pipe, targets = struct_FS.add_to_pipeline(ctx, pipe, tree, targets)
-    pipe, targets = struct_swMRI.add_to_pipeline(ctx, pipe, tree, targets)
-    pipe, targets = struct_asl.add_to_pipeline(ctx, pipe, tree, targets)
-    pipe, targets = dMRI_fieldmap.add_to_pipeline(ctx, pipe, tree, targets)
-    pipe, targets = fMRI_task.add_to_pipeline(ctx, pipe, tree, targets)
-    pipe, targets = fMRI_rest.add_to_pipeline(ctx, pipe, tree, targets)
-    pipe, targets = dMRI_diff.add_to_pipeline(ctx, pipe, tree, targets)
-    pipe, targets = IDPs_gen.add_to_pipeline(ctx, pipe, tree, targets)
+    struct_T1.add_to_pipeline      (ctx, pipe, tree)
+    struct_T2_FLAIR.add_to_pipeline(ctx, pipe, tree)
+    struct_FS.add_to_pipeline      (ctx, pipe, tree)
+    struct_swMRI.add_to_pipeline   (ctx, pipe, tree)
+    struct_asl.add_to_pipeline     (ctx, pipe, tree)
+    dMRI_fieldmap.add_to_pipeline  (ctx, pipe, tree)
+    fMRI_task.add_to_pipeline      (ctx, pipe, tree)
+    fMRI_rest.add_to_pipeline      (ctx, pipe, tree)
+    dMRI_diff.add_to_pipeline      (ctx, pipe, tree)
+    IDPs_gen.add_to_pipeline       (ctx, pipe, tree)
 
     # The skip-missing flag deals with cases where the subject is missing
     # the data of some modalities. For more details, check:
diff --git a/bip/pipelines/IDPs_gen/IDP_SWI_T2star.py b/bip/pipelines/IDPs_gen/IDP_SWI_T2star.py
index 07fed14c353883122cabe299ea273ef37c4045e1..4226a1e1818d3a9d8af4d170e0b96fb7f6909e6d 100755
--- a/bip/pipelines/IDPs_gen/IDP_SWI_T2star.py
+++ b/bip/pipelines/IDPs_gen/IDP_SWI_T2star.py
@@ -14,7 +14,7 @@ import os
 import logging
 from fsl import wrappers
 from pipe_tree import In, Out, Ref
-from bip.utils.log_utils import redirect_logging
+from bip.utils.log_utils import redirect_logging, job_name
 
 log = logging.getLogger(__name__)
 
@@ -24,7 +24,7 @@ def run(ctx,
         logs_dir:                   Ref,
         IDP_SWI_T2star:             Out):
 
-    with redirect_logging('IDP_SWI_T2star', outdir=logs_dir):
+    with redirect_logging(job_name(run), outdir=logs_dir):
 
         result = ("NaN " * 14).strip()
 
diff --git a/bip/pipelines/IDPs_gen/IDP_T1_FIRST_vols.py b/bip/pipelines/IDPs_gen/IDP_T1_FIRST_vols.py
index f3ddee3e5806ceba1601cbfdfebb2ec815dbbf82..28d1fa2318836e91d8c94d541b5d3c87a438a808 100755
--- a/bip/pipelines/IDPs_gen/IDP_T1_FIRST_vols.py
+++ b/bip/pipelines/IDPs_gen/IDP_T1_FIRST_vols.py
@@ -14,7 +14,7 @@ import os
 import logging
 from fsl import wrappers
 from pipe_tree import In, Out, Ref
-from bip.utils.log_utils import redirect_logging
+from bip.utils.log_utils import redirect_logging, job_name
 
 log = logging.getLogger(__name__)
 
@@ -23,7 +23,7 @@ def run(ctx,
         logs_dir:                   Ref,
         IDP_T1_FIRST_vols:          Out):
 
-    with redirect_logging('IDP_T1_FIRST_vols', outdir=logs_dir):
+    with redirect_logging(job_name(run), outdir=logs_dir):
         result = ("NaN " * 15).strip()
 
         if os.path.exists(T1_first_all_fast_firstseg):
diff --git a/bip/pipelines/IDPs_gen/IDP_T1_GM_parcellation.py b/bip/pipelines/IDPs_gen/IDP_T1_GM_parcellation.py
index 9e52099d5aa6d2a325767d6e57c8a2a40498fd7f..d810c588faa7b18c35ba560688df14ef513ec172 100755
--- a/bip/pipelines/IDPs_gen/IDP_T1_GM_parcellation.py
+++ b/bip/pipelines/IDPs_gen/IDP_T1_GM_parcellation.py
@@ -12,7 +12,7 @@
 import logging
 from fsl import wrappers
 from pipe_tree import In, Out, Ref
-from bip.utils.log_utils import redirect_logging, tempdir
+from bip.utils.log_utils import redirect_logging, tempdir, job_name
 
 log = logging.getLogger(__name__)
 
@@ -24,7 +24,7 @@ def run(ctx,
         logs_dir:                Ref,
         IDP_T1_GM_parcellation:  Out):
 
-    with redirect_logging('IDP_T1_GM_parcellation', outdir=logs_dir),\
+    with redirect_logging(job_name(run), outdir=logs_dir),\
         tempdir(tmp_dir):
         result = ("NaN " * 139).strip()
 
diff --git a/bip/pipelines/IDPs_gen/IDP_T1_SIENAX.py b/bip/pipelines/IDPs_gen/IDP_T1_SIENAX.py
index 280594fe69cd578f1db48778fcffcda69a4fc458..02af2ef2a5f09e89d1655d0a99c86fd157da518d 100755
--- a/bip/pipelines/IDPs_gen/IDP_T1_SIENAX.py
+++ b/bip/pipelines/IDPs_gen/IDP_T1_SIENAX.py
@@ -14,7 +14,7 @@ import os
 import logging
 from shutil import copyfile
 from pipe_tree import In, Out, Ref
-from bip.utils.log_utils import redirect_logging
+from bip.utils.log_utils import redirect_logging, job_name
 
 log = logging.getLogger(__name__)
 
@@ -24,7 +24,7 @@ def run(ctx,
         logs_dir:         Ref,
         IDP_T1_SIENAX:    Out):
 
-    with redirect_logging('IDP_T1_SIENAX', outdir=logs_dir):
+    with redirect_logging(job_name(run), outdir=logs_dir):
 
         result = ("NaN " * 11).strip()
 
diff --git a/bip/pipelines/IDPs_gen/IDP_T1_align_to_std.py b/bip/pipelines/IDPs_gen/IDP_T1_align_to_std.py
index c37f63abaacdf0825ed9ea57ffaf3ed1d51cdba0..92f265dbe07badd13982fbd0cf2e67a1b9902af1 100755
--- a/bip/pipelines/IDPs_gen/IDP_T1_align_to_std.py
+++ b/bip/pipelines/IDPs_gen/IDP_T1_align_to_std.py
@@ -12,7 +12,7 @@
 import logging
 from fsl import wrappers
 from pipe_tree import In, Out, Ref
-from bip.utils.log_utils import redirect_logging, tempdir
+from bip.utils.log_utils import redirect_logging, tempdir, job_name
 
 log = logging.getLogger(__name__)
 
@@ -25,7 +25,7 @@ def run(ctx,
         logs_dir:             Ref,
         IDP_T1_align_to_std:  Out):
 
-    with redirect_logging('IDP_T1_align_to_std', outdir=logs_dir),\
+    with redirect_logging(job_name(run), outdir=logs_dir),\
         tempdir(tmp_dir):
 
         tmp_jac = tmp_dir + '/tmpjac.nii.gz'
diff --git a/bip/pipelines/IDPs_gen/IDP_T1_noise_ratio.py b/bip/pipelines/IDPs_gen/IDP_T1_noise_ratio.py
index f557f409604e88d306c42da03b1f7b8eee4f7303..eb8dc9e64ca33ce02e461dc455fb8c1d86a83222 100755
--- a/bip/pipelines/IDPs_gen/IDP_T1_noise_ratio.py
+++ b/bip/pipelines/IDPs_gen/IDP_T1_noise_ratio.py
@@ -13,7 +13,7 @@
 import logging
 from fsl import wrappers
 from pipe_tree import In, Out, Ref
-from bip.utils.log_utils import redirect_logging, tempdir
+from bip.utils.log_utils import redirect_logging, tempdir, job_name
 
 log = logging.getLogger(__name__)
 
@@ -24,7 +24,7 @@ def run(ctx,
         logs_dir:           Ref,
         IDP_T1_noise_ratio: Out):
 
-    with redirect_logging('IDP_T1_noise_ratio', outdir=logs_dir),\
+    with redirect_logging(job_name(run), outdir=logs_dir),\
         tempdir(tmp_dir):
 
         tmp_SNR = tmp_dir + '/tmp_SNR.nii.gz'
diff --git a/bip/pipelines/IDPs_gen/IDP_T2_FLAIR_WMH.py b/bip/pipelines/IDPs_gen/IDP_T2_FLAIR_WMH.py
index 9f7bfad173eec7eb42c4ef050af232cd6a6e966f..8c178034de9cb2ee86aa3c68c2265b4deb9f6701 100755
--- a/bip/pipelines/IDPs_gen/IDP_T2_FLAIR_WMH.py
+++ b/bip/pipelines/IDPs_gen/IDP_T2_FLAIR_WMH.py
@@ -13,7 +13,7 @@
 import logging
 from shutil import copyfile
 from pipe_tree import In, Out, Ref
-from bip.utils.log_utils import redirect_logging
+from bip.utils.log_utils import redirect_logging, job_name
 
 log = logging.getLogger(__name__)
 
@@ -22,5 +22,5 @@ def run(ctx,
         logs_dir:               Ref,
         IDP_T2_FLAIR_WMH:       Out):
 
-    with redirect_logging('IDP_T2_FLAIR_WMH', outdir=logs_dir):
+    with redirect_logging(job_name(run), outdir=logs_dir):
         copyfile(src=T2_FLAIR_bianca_volume, dst=IDP_T2_FLAIR_WMH)
diff --git a/bip/pipelines/IDPs_gen/IDP_all_align_to_T1.py b/bip/pipelines/IDPs_gen/IDP_all_align_to_T1.py
index bb86360760f33777333172e7fa6f993a76f3712e..ace1ba26773ca580a6a6c78cd5ba8cc1ef2813ae 100755
--- a/bip/pipelines/IDPs_gen/IDP_all_align_to_T1.py
+++ b/bip/pipelines/IDPs_gen/IDP_all_align_to_T1.py
@@ -13,7 +13,7 @@ import os
 import logging
 from fsl import wrappers
 from pipe_tree import In, Out, Ref
-from bip.utils.log_utils import redirect_logging, tempdir
+from bip.utils.log_utils import redirect_logging, tempdir, job_name
 
 log = logging.getLogger(__name__)
 
@@ -29,7 +29,7 @@ def run(ctx,
         logs_dir:                   Ref,
         IDP_all_align_to_T1:        Out):
 
-    with redirect_logging('IDP_all_align_to_T1', outdir=logs_dir),\
+    with redirect_logging(job_name(run), outdir=logs_dir),\
         tempdir(tmp_dir):
 
         tmp_mat = tmp_dir + '/tmp_mat.mat'
diff --git a/bip/pipelines/IDPs_gen/IDP_diff_TBSS.py b/bip/pipelines/IDPs_gen/IDP_diff_TBSS.py
index 5049ca2842660de75fbb775afcc655e2b103ec94..c5f60043317792461ff8072ad19524f38bd0b35f 100755
--- a/bip/pipelines/IDPs_gen/IDP_diff_TBSS.py
+++ b/bip/pipelines/IDPs_gen/IDP_diff_TBSS.py
@@ -13,7 +13,7 @@
 import os
 import logging
 from pipe_tree import In, Out, Ref
-from bip.utils.log_utils import redirect_logging
+from bip.utils.log_utils import redirect_logging, job_name
 
 log = logging.getLogger(__name__)
 
@@ -23,7 +23,7 @@ def run(ctx,
         JHUrois_prefix: Ref,
         IDP_diff_TBSS:  Out):
 
-    with redirect_logging('IDP_diff_TBSS', outdir=logs_dir):
+    with redirect_logging(job_name(run), outdir=logs_dir):
 
         nan_result = ("NaN " * 48).strip()
 
diff --git a/bip/pipelines/IDPs_gen/IDP_diff_autoPtx.py b/bip/pipelines/IDPs_gen/IDP_diff_autoPtx.py
index d0b6a7f12f6b6160ef886ece505f1e6956b8d42c..fece943161cd2957c74932bbd7f65dc5dc04a605 100755
--- a/bip/pipelines/IDPs_gen/IDP_diff_autoPtx.py
+++ b/bip/pipelines/IDPs_gen/IDP_diff_autoPtx.py
@@ -15,7 +15,7 @@ import logging
 import nibabel as nib
 from fsl import wrappers
 from pipe_tree import In, Out, Ref, Var
-from bip.utils.log_utils import redirect_logging, tempdir
+from bip.utils.log_utils import redirect_logging, tempdir, job_name
 
 log = logging.getLogger(__name__)
 
@@ -28,8 +28,8 @@ def run(ctx,
         autoptx_tract:      Var(no_iter=True),
         IDP_diff_autoPtx:   Out):
 
-    with redirect_logging('IDP_diff_autoPtx', outdir=logs_dir),\
-        tempdir(tmp_dir):
+    with redirect_logging(job_name(run), outdir=logs_dir),\
+        tempdir(tmp_dir + "/" + __name__):
 
         autoPtx_all = tmp_dir + '/autoPtx_all.nii.gz'
         autoPtx_tmp = tmp_dir + '/autoPtx_tmp.nii.gz'
diff --git a/bip/pipelines/IDPs_gen/IDP_diff_eddy_outliers.py b/bip/pipelines/IDPs_gen/IDP_diff_eddy_outliers.py
index 75f26b2bbce8f4ae6ed38a01729206e19266d924..6514adbfd733eb4d3a34e8136a03b0f5fbdfd3da 100755
--- a/bip/pipelines/IDPs_gen/IDP_diff_eddy_outliers.py
+++ b/bip/pipelines/IDPs_gen/IDP_diff_eddy_outliers.py
@@ -13,7 +13,7 @@
 import os
 import logging
 from pipe_tree import In, Out, Ref
-from bip.utils.log_utils import redirect_logging
+from bip.utils.log_utils import redirect_logging, job_name
 
 log = logging.getLogger(__name__)
 
@@ -22,7 +22,7 @@ def run(ctx,
         logs_dir:               Ref,
         IDP_diff_eddy_outliers: Out):
 
-    with redirect_logging('IDP_diff_eddy_outliers', outdir=logs_dir):
+    with redirect_logging(job_name(run), outdir=logs_dir):
 
         num_outliers = 0
 
diff --git a/bip/pipelines/IDPs_gen/IDP_func_TSNR.py b/bip/pipelines/IDPs_gen/IDP_func_TSNR.py
index 61143a055971ef213dff6bc3951b18362a2e2822..249417dbdfe78bd32abb077f78c4ec66edc2a40d 100755
--- a/bip/pipelines/IDPs_gen/IDP_func_TSNR.py
+++ b/bip/pipelines/IDPs_gen/IDP_func_TSNR.py
@@ -14,7 +14,7 @@ import os
 import logging
 from fsl import wrappers
 from pipe_tree import In, Out, Ref
-from bip.utils.log_utils import redirect_logging, tempdir
+from bip.utils.log_utils import redirect_logging, tempdir, job_name
 
 log = logging.getLogger(__name__)
 
@@ -26,7 +26,7 @@ def run(ctx,
         tmp_dir:                  Ref,
         IDP_func_TSNR:            Out):
 
-    with redirect_logging('IDP_func_TSNR', outdir=logs_dir),\
+    with redirect_logging(job_name(run), outdir=logs_dir),\
         tempdir(tmp_dir):
 
         fMRI_SNR = tmp_dir + '/fMRI_SNR.nii.gz'
diff --git a/bip/pipelines/IDPs_gen/IDP_func_head_motion.py b/bip/pipelines/IDPs_gen/IDP_func_head_motion.py
index 4fcd30a5e940496124fac9d12ae89d41389a3b70..8bf3a0c4fd827d6b148df4b5340cec67db44d4d3 100755
--- a/bip/pipelines/IDPs_gen/IDP_func_head_motion.py
+++ b/bip/pipelines/IDPs_gen/IDP_func_head_motion.py
@@ -14,7 +14,7 @@ import os
 import json
 import logging
 from pipe_tree import In, Out, Ref
-from bip.utils.log_utils import redirect_logging
+from bip.utils.log_utils import redirect_logging, job_name
 
 log = logging.getLogger(__name__)
 
@@ -24,7 +24,7 @@ def run(ctx,
         logs_dir:             Ref,
         IDP_func_head_motion: Out):
 
-    with redirect_logging('IDP_func_head_motion', outdir=logs_dir):
+    with redirect_logging(job_name(run), outdir=logs_dir):
 
         if os.path.exists(rfMRI_mc_rel_mean):
             with open(rfMRI_mc_rel_mean, "r", encoding="utf-8") as f:
diff --git a/bip/pipelines/IDPs_gen/IDP_func_task_activation.py b/bip/pipelines/IDPs_gen/IDP_func_task_activation.py
index d56aab7ca067ac5241ee7eeaec7b492f640ffbfd..d0263354325f6c4069882ac1b3525d1e34727bc6 100755
--- a/bip/pipelines/IDPs_gen/IDP_func_task_activation.py
+++ b/bip/pipelines/IDPs_gen/IDP_func_task_activation.py
@@ -14,7 +14,7 @@ import shutil
 import logging
 from fsl import wrappers
 from pipe_tree import In, Out, Ref
-from bip.utils.log_utils import redirect_logging
+from bip.utils.log_utils import redirect_logging, job_name
 
 log = logging.getLogger(__name__)
 
@@ -41,7 +41,7 @@ def run(ctx,
         tfMRI_featquery_5a_report: Out,
         IDP_func_task_activation:  Out):
 
-    with redirect_logging('IDP_func_task_activation', outdir=logs_dir):
+    with redirect_logging(job_name(run), outdir=logs_dir):
         if not os.path.exists(highres2standard_warp):
             os.symlink(src=os.getcwd() + "/" + T1_to_MNI_warp,
                        dst=highres2standard_warp)
diff --git a/bip/pipelines/IDPs_gen/IDP_subject_COG_table.py b/bip/pipelines/IDPs_gen/IDP_subject_COG_table.py
index 54fed6c2edfe233432c5d2121fdb81aed0704029..94ddb79cf2112de177eadad4cd50b93361594893 100755
--- a/bip/pipelines/IDPs_gen/IDP_subject_COG_table.py
+++ b/bip/pipelines/IDPs_gen/IDP_subject_COG_table.py
@@ -13,7 +13,7 @@
 import os
 import logging
 from pipe_tree import In, Out, Ref
-from bip.utils.log_utils import redirect_logging
+from bip.utils.log_utils import redirect_logging, job_name
 
 log = logging.getLogger(__name__)
 
@@ -28,7 +28,7 @@ def run(ctx,
         logs_dir:           Ref,
         IDP_subject_COG_table: Out):
 
-    with redirect_logging('IDP_subject_COG_table', outdir=logs_dir):
+    with redirect_logging(job_name(run), outdir=logs_dir):
 
         with open(T1_QC_COG, "r", encoding="utf-8") as f:
             COG  = f.read().strip()
diff --git a/bip/pipelines/IDPs_gen/IDP_subject_ID.py b/bip/pipelines/IDPs_gen/IDP_subject_ID.py
index aa3ed635b56fb38a99dcfb18be2ae105d6ddc44e..add6dd61954644977ac8f63152780ac24ea7dfd0 100755
--- a/bip/pipelines/IDPs_gen/IDP_subject_ID.py
+++ b/bip/pipelines/IDPs_gen/IDP_subject_ID.py
@@ -12,7 +12,7 @@
 
 import logging
 from pipe_tree import In, Out, Ref
-from bip.utils.log_utils import redirect_logging
+from bip.utils.log_utils import redirect_logging, job_name
 
 log = logging.getLogger(__name__)
 
@@ -21,6 +21,6 @@ def run(ctx,
         IDP_subject_ID: Out,
         logs_dir:       Ref):
 
-    with redirect_logging('IDP_subject_ID', outdir=logs_dir):
+    with redirect_logging(job_name(run), outdir=logs_dir):
         with open(IDP_subject_ID, 'wt', encoding="utf-8") as f:
             f.write(f'{ctx.subject}\n')
diff --git a/bip/pipelines/IDPs_gen/IDP_subject_centre.py b/bip/pipelines/IDPs_gen/IDP_subject_centre.py
index d81ec6182dfedddb34173e2aa39fd41a5d93d584..80a077c493d0f600336fcca22401b4cad052662b 100755
--- a/bip/pipelines/IDPs_gen/IDP_subject_centre.py
+++ b/bip/pipelines/IDPs_gen/IDP_subject_centre.py
@@ -13,7 +13,7 @@ import os
 import json
 import logging
 from pipe_tree import In, Out, Ref
-from bip.utils.log_utils import redirect_logging
+from bip.utils.log_utils import redirect_logging, job_name
 
 log = logging.getLogger(__name__)
 
@@ -27,7 +27,7 @@ def run(ctx,
         logs_dir:           Ref,
         IDP_subject_centre: Out):
 
-    with redirect_logging('IDP_subject_centre', outdir=logs_dir):
+    with redirect_logging(job_name(run), outdir=logs_dir):
 
         line = ""
         address_text = "(0008, 0081) Institution Address"
diff --git a/bip/pipelines/IDPs_gen/IDPs_gen.py b/bip/pipelines/IDPs_gen/IDPs_gen.py
deleted file mode 100755
index 52e4e13174e3f36d8d6061bf4be90aa29178e760..0000000000000000000000000000000000000000
--- a/bip/pipelines/IDPs_gen/IDPs_gen.py
+++ /dev/null
@@ -1,132 +0,0 @@
-#!/usr/bin/env python
-#
-# struct_T1.py - Pipeline with the T1w processing.
-#
-# Author: Fidel Alfaro Almagro <fidel.alfaroalmagro@ndcn.ox.ac.uk>
-# Author: Paul McCarthy <pauldmccarthy@gmail.com>
-# Author: Michiel Cottaar <michiel.cottaar@ndcn.ox.ac.uk>
-#
-# pylint: disable=C0103,E0602,C0114,C0115,C0116,R0913,R0914,R0915
-#
-
-import logging
-from bip.utils.log_utils    import redirect_logging
-from bip.pipelines.IDPs_gen import IDP_subject_ID
-from bip.pipelines.IDPs_gen import IDP_subject_centre
-from bip.pipelines.IDPs_gen import IDP_subject_COG_table
-from bip.pipelines.IDPs_gen import IDP_all_align_to_T1
-from bip.pipelines.IDPs_gen import IDP_T1_FIRST_vols
-from bip.pipelines.IDPs_gen import IDP_T1_SIENAX
-from bip.pipelines.IDPs_gen import IDP_T1_GM_parcellation
-from bip.pipelines.IDPs_gen import IDP_T1_align_to_std
-from bip.pipelines.IDPs_gen import IDP_T1_noise_ratio
-from bip.pipelines.IDPs_gen import IDP_T2_FLAIR_WMH
-from bip.pipelines.IDPs_gen import IDP_SWI_T2star
-from bip.pipelines.IDPs_gen import IDP_func_head_motion
-from bip.pipelines.IDPs_gen import IDP_func_TSNR
-from bip.pipelines.IDPs_gen import IDP_func_task_activation
-from bip.pipelines.IDPs_gen import IDP_diff_eddy_outliers
-from bip.pipelines.IDPs_gen import IDP_diff_TBSS
-from bip.pipelines.IDPs_gen import IDP_diff_autoPtx
-from bip.pipelines.IDPs_gen import IDPs_generator
-
-log = logging.getLogger(__name__)
-
-def add_to_pipeline(ctx, pipe, tree, targets):
-
-    logs_dir=tree.get('logs_dir')
-
-    subj = ctx.subject
-
-    with redirect_logging('pipe_IDPs_gen', outdir=logs_dir):
-        pipe(IDP_subject_ID.run,
-             submit=dict(jobtime=200, name="BIP_IDP_subject_ID_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('IDP_subject_ID')
-
-        pipe(IDP_T1_align_to_std.run,
-             submit=dict(jobtime=200, name="BIP_IDP_T1_align_to_std_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('IDP_T1_align_to_std')
-
-        pipe(IDP_T1_noise_ratio.run,
-             submit=dict(jobtime=200, name="BIP_IDP_T1_noise_ratio_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('IDP_T1_noise_ratio')
-
-        pipe(IDP_all_align_to_T1.run,
-             submit=dict(jobtime=200, name="BIP_IDP_all_align_to_T1_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('IDP_all_align_to_T1')
-
-        pipe(IDP_func_head_motion.run,
-             submit=dict(jobtime=200, name="BIP_IDP_func_head_motion_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('IDP_func_head_motion')
-
-        pipe(IDP_func_TSNR.run,
-             submit=dict(jobtime=200, name="BIP_IDP_func_TSNR_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('IDP_func_TSNR')
-
-        pipe(IDP_diff_eddy_outliers.run,
-             submit=dict(jobtime=200, name="BIP_IDP_diff_eddy_outliers_"+ subj),
-             kwargs={'ctx' : ctx})
-        targets.append('IDP_diff_eddy_outliers')
-
-        pipe(IDP_T1_SIENAX.run,
-             submit=dict(jobtime=200, name="BIP_IDP_T1_SIENAX_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('IDP_T1_SIENAX')
-
-        pipe(IDP_T1_FIRST_vols.run,
-             submit=dict(jobtime=200, name="BIP_IDP_T1_FIRST_vols_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('IDP_T1_FIRST_vols')
-
-        pipe(IDP_T1_GM_parcellation.run,
-             submit=dict(jobtime=200, name="BIP_IDP_T1_GM_parcellation_" +subj),
-             kwargs={'ctx' : ctx})
-        targets.append('IDP_T1_GM_parcellation')
-
-        pipe(IDP_T2_FLAIR_WMH.run,
-             submit=dict(jobtime=200, name="BIP_IDP_T2_FLAIR_WMH_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('IDP_T2_FLAIR_WMH')
-
-        pipe(IDP_SWI_T2star.run,
-             submit=dict(jobtime=200, name="BIP_IDP_SWI_T2star_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('IDP_SWI_T2star')
-
-        pipe(IDP_func_task_activation.run,
-             submit=dict(jobtime=200,name="BIP_IDP_func_task_activation_"+subj),
-             kwargs={'ctx' : ctx})
-        targets.append('IDP_func_task_activation')
-
-        pipe(IDP_diff_TBSS.run,
-             submit=dict(jobtime=200, name="BIP_IDP_diff_TBSS_"+ subj),
-             kwargs={'ctx' : ctx})
-        targets.append('IDP_diff_TBSS')
-
-        pipe(IDP_diff_autoPtx.run,
-             submit=dict(jobtime=200, name="BIP_IDP_diff_autoPtx_"+ subj),
-             kwargs={'ctx' : ctx})
-        targets.append('IDP_diff_autoPtx')
-
-        pipe(IDP_subject_COG_table.run,
-             submit=dict(jobtime=200, name="BIP_IDP_subject_COG_table_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('IDP_subject_COG_table')
-
-        pipe(IDP_subject_centre.run,
-             submit=dict(jobtime=200, name="BIP_IDP_subject_centre_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('IDP_subject_centre')
-
-        pipe(IDPs_generator.run,
-             submit=dict(jobtime=200, name="BIP_IDPs_generator_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('IDPs')
-
-    return pipe, targets
diff --git a/bip/pipelines/IDPs_gen/IDPs_generator.py b/bip/pipelines/IDPs_gen/IDPs_generator.py
index bdaeb62356ce0601571f2d5515dcdd026a5b42fe..9e7c9ca0050b7ca14ad61ffc9e62556eaf8d0958 100755
--- a/bip/pipelines/IDPs_gen/IDPs_generator.py
+++ b/bip/pipelines/IDPs_gen/IDPs_generator.py
@@ -13,7 +13,7 @@ import os
 import json
 import logging
 from pipe_tree import In, Out, Ref
-from bip.utils.log_utils import redirect_logging
+from bip.utils.log_utils import redirect_logging, job_name
 
 log = logging.getLogger(__name__)
 
@@ -38,7 +38,7 @@ def run(ctx,
         logs_dir:                 Ref,
         IDPs:                     Out):
 
-    with redirect_logging('IDPs_generator', outdir=logs_dir):
+    with redirect_logging(job_name(run), outdir=logs_dir):
 
         result=""
         IDPs_json_file = ctx.get_data("IDPs/IDPs.json")
diff --git a/bip/pipelines/IDPs_gen/__init__.py b/bip/pipelines/IDPs_gen/__init__.py
new file mode 100755
index 0000000000000000000000000000000000000000..1a0bcbfee65f4430af2fcafaf5c16f86b1164df0
--- /dev/null
+++ b/bip/pipelines/IDPs_gen/__init__.py
@@ -0,0 +1,88 @@
+#!/usr/bin/env python
+#
+# struct_T1.py - Pipeline with the T1w processing.
+#
+# Author: Fidel Alfaro Almagro <fidel.alfaroalmagro@ndcn.ox.ac.uk>
+# Author: Paul McCarthy <pauldmccarthy@gmail.com>
+# Author: Michiel Cottaar <michiel.cottaar@ndcn.ox.ac.uk>
+#
+# pylint: disable=C0103,E0602,C0114,C0115,C0116,R0913,R0914,R0915
+# pylint: disable=W0613
+#
+
+import logging
+from bip.utils.log_utils    import job_name
+from bip.pipelines.IDPs_gen import IDP_subject_ID, IDP_subject_centre
+from bip.pipelines.IDPs_gen import IDP_subject_COG_table, IDP_all_align_to_T1
+from bip.pipelines.IDPs_gen import IDP_T1_FIRST_vols, IDP_T1_SIENAX
+from bip.pipelines.IDPs_gen import IDP_T1_GM_parcellation, IDP_T1_align_to_std
+from bip.pipelines.IDPs_gen import IDP_T1_noise_ratio, IDP_T2_FLAIR_WMH
+from bip.pipelines.IDPs_gen import IDP_SWI_T2star, IDP_func_head_motion
+from bip.pipelines.IDPs_gen import IDP_func_TSNR, IDP_func_task_activation
+from bip.pipelines.IDPs_gen import IDP_diff_eddy_outliers, IDP_diff_TBSS
+from bip.pipelines.IDPs_gen import IDP_diff_autoPtx, IDPs_generator
+
+log = logging.getLogger(__name__)
+
+def add_to_pipeline(ctx, pipe, tree):
+
+    subj = ctx.subject
+
+    pipe(IDP_subject_ID.run,
+         submit=dict(jobtime=200, name=job_name(IDP_subject_ID.run, subj)),
+         kwargs={'ctx' : ctx})
+    pipe(IDP_T1_align_to_std.run,
+         submit=dict(jobtime=200, name=job_name(IDP_T1_align_to_std.run, subj)),
+         kwargs={'ctx' : ctx})
+    pipe(IDP_T1_noise_ratio.run,
+         submit=dict(jobtime=200, name=job_name(IDP_T1_noise_ratio.run, subj)),
+         kwargs={'ctx' : ctx})
+    pipe(IDP_all_align_to_T1.run,
+         submit=dict(jobtime=200, name=job_name(IDP_all_align_to_T1.run, subj)),
+         kwargs={'ctx' : ctx})
+    pipe(IDP_func_head_motion.run,
+         submit=dict(jobtime=200, name=job_name(IDP_func_head_motion.run,subj)),
+         kwargs={'ctx' : ctx})
+    pipe(IDP_func_TSNR.run,
+         submit=dict(jobtime=200, name=job_name(IDP_func_TSNR.run, subj)),
+         kwargs={'ctx' : ctx})
+    pipe(IDP_diff_eddy_outliers.run,
+         submit=dict(jobtime=200, name=job_name(IDP_diff_eddy_outliers.run,
+                                                subj)),
+         kwargs={'ctx' : ctx})
+    pipe(IDP_T1_SIENAX.run,
+         submit=dict(jobtime=200, name=job_name(IDP_T1_SIENAX.run, subj)),
+         kwargs={'ctx' : ctx})
+    pipe(IDP_T1_FIRST_vols.run,
+         submit=dict(jobtime=200, name=job_name(IDP_T1_FIRST_vols.run, subj)),
+         kwargs={'ctx' : ctx})
+    pipe(IDP_T1_GM_parcellation.run,
+         submit=dict(jobtime=200, name=job_name(IDP_T1_GM_parcellation.run,
+                                                subj)),
+         kwargs={'ctx' : ctx})
+    pipe(IDP_T2_FLAIR_WMH.run,
+         submit=dict(jobtime=200, name=job_name(IDP_T2_FLAIR_WMH.run,subj)),
+         kwargs={'ctx' : ctx})
+    pipe(IDP_SWI_T2star.run,
+         submit=dict(jobtime=200, name=job_name(IDP_SWI_T2star.run, subj)),
+         kwargs={'ctx' : ctx})
+    pipe(IDP_func_task_activation.run,
+         submit=dict(jobtime=200,name=job_name(IDP_func_task_activation.run,
+                                               subj)),
+         kwargs={'ctx' : ctx})
+    pipe(IDP_diff_TBSS.run,
+         submit=dict(jobtime=200, name=job_name(IDP_diff_TBSS.run, subj)),
+         kwargs={'ctx' : ctx})
+    pipe(IDP_diff_autoPtx.run,
+         submit=dict(jobtime=200, name=job_name(IDP_diff_autoPtx.run, subj)),
+         kwargs={'ctx' : ctx})
+    pipe(IDP_subject_COG_table.run,
+         submit=dict(jobtime=200, name=job_name(IDP_subject_COG_table.run,
+                                                subj)),
+         kwargs={'ctx' : ctx})
+    pipe(IDP_subject_centre.run,
+         submit=dict(jobtime=200, name=job_name(IDP_subject_centre.run, subj)),
+         kwargs={'ctx' : ctx})
+    pipe(IDPs_generator.run,
+         submit=dict(jobtime=200, name=job_name(IDPs_generator.run, subj)),
+         kwargs={'ctx' : ctx})
diff --git a/bip/pipelines/dMRI_diff/__init__.py b/bip/pipelines/dMRI_diff/__init__.py
old mode 100644
new mode 100755
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..5f7840c8c6270ca87158247ff24a4711e3e7ae91
--- a/bip/pipelines/dMRI_diff/__init__.py
+++ b/bip/pipelines/dMRI_diff/__init__.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+#
+# dMRI_diff.py - Pipeline with the dfMRI processing.
+#
+# Author: Fidel Alfaro Almagro <fidel.alfaroalmagro@ndcn.ox.ac.uk>
+# Author: Paul McCarthy <pauldmccarthy@gmail.com>
+# Author: Michiel Cottaar <michiel.cottaar@ndcn.ox.ac.uk>
+#
+# pylint: disable=C0103,E0602,C0114,C0115,C0116,R0913,R0914,R0915
+# pylint: disable=W0613
+#
+
+import logging
+from bip.utils.log_utils     import job_name
+from bip.pipelines.dMRI_diff import diff_eddy, diff_dtifit, diff_noddi
+from bip.pipelines.dMRI_diff import diff_tbss, diff_bedpostx, diff_autoptx
+
+log = logging.getLogger(__name__)
+
+def add_to_pipeline(ctx, pipe, tree):
+
+    subj = ctx.subject
+
+    # TODO: Make this configurable
+    cuda_eddy_dict = dict(queue="short.qg@@win-uk-biobank-gpu",
+                          coprocessor="cuda",
+                          coprocessor_class="P",
+                          coprocessor_toolkit="8.0",
+                          name=job_name(diff_eddy.run, subj))
+
+    cuda_bedpostx_dict = dict(queue="short.qg@@win-uk-biobank-gpu",
+                              coprocessor="cuda",
+                              coprocessor_class="P",
+                              coprocessor_toolkit="6.5",
+                              name=job_name(diff_bedpostx.run, subj))
+    pipe(diff_eddy.run,
+         submit=cuda_eddy_dict,
+         kwargs={'ctx' : ctx})
+    pipe(diff_dtifit.run,
+         submit=dict(jobtime=200, name=job_name(diff_dtifit.run, subj)),
+         kwargs={'ctx' : ctx})
+    pipe(diff_noddi.run,
+         submit=dict(jobtime=200, name=job_name(diff_noddi.run, subj)),
+         kwargs={'ctx' : ctx})
+    pipe(diff_tbss.run,
+         submit=dict(jobtime=200, name=job_name(diff_tbss.run, subj)),
+         kwargs={'ctx' : ctx})
+    pipe(diff_bedpostx.run,
+         submit=cuda_bedpostx_dict,
+         kwargs={'ctx' : ctx})
+    pipe(diff_autoptx.run,
+         submit=dict(jobtime=200, name=job_name(diff_bedpostx.run, subj)),
+         kwargs={'ctx' : ctx})
diff --git a/bip/pipelines/dMRI_diff/dMRI_diff.py b/bip/pipelines/dMRI_diff/dMRI_diff.py
deleted file mode 100755
index c93706305a0ff07de44ec041c40015bd0fd32ba4..0000000000000000000000000000000000000000
--- a/bip/pipelines/dMRI_diff/dMRI_diff.py
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/usr/bin/env python
-#
-# dMRI_diff.py - Pipeline with the dfMRI processing.
-#
-# Author: Fidel Alfaro Almagro <fidel.alfaroalmagro@ndcn.ox.ac.uk>
-# Author: Paul McCarthy <pauldmccarthy@gmail.com>
-# Author: Michiel Cottaar <michiel.cottaar@ndcn.ox.ac.uk>
-#
-# pylint: disable=C0103,E0602,C0114,C0115,C0116,R0913,R0914,R0915
-#
-
-import logging
-from bip.utils.log_utils     import redirect_logging
-from bip.pipelines.dMRI_diff import diff_eddy
-from bip.pipelines.dMRI_diff import diff_dtifit
-from bip.pipelines.dMRI_diff import diff_noddi
-from bip.pipelines.dMRI_diff import diff_tbss
-from bip.pipelines.dMRI_diff import diff_bedpostx
-from bip.pipelines.dMRI_diff import diff_autoptx
-
-log = logging.getLogger(__name__)
-
-def add_to_pipeline(ctx, pipe, tree, targets):
-
-    logs_dir=tree.get('logs_dir')
-
-    subj = ctx.subject 
-
-    with redirect_logging('pipe_dMRI_fieldmap', outdir=logs_dir):
-
-        # TODO: Make this configurable
-        cuda_dict_1 = dict(queue="short.qg@@win-uk-biobank-gpu",
-                         coprocessor="cuda",
-                         coprocessor_class="P",
-                         coprocessor_toolkit="8.0",
-                         name="BIP_diff_eddy_" + subj)
-        cuda_dict_2 = dict(queue="short.qg@@win-uk-biobank-gpu",
-                         coprocessor="cuda",
-                         coprocessor_class="P",
-                         coprocessor_toolkit="6.5",
-                         name="BIP_diff_bedpostx_" + subj)
-        #pipe(diff_eddy.run, submit=dict(jobtime=200), kwargs={'ctx' : ctx})
-        pipe(diff_eddy.run,
-             submit=cuda_dict_1,
-             kwargs={'ctx' : ctx})
-        targets.append('eddy_data')
-        pipe(diff_dtifit.run,
-             submit=dict(jobtime=200, name="BIP_diff_dtifit_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('FA')
-        pipe(diff_noddi.run,
-             submit=dict(jobtime=200, name="BIP_diff_noddi_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('ISOVF')
-        pipe(diff_tbss.run,
-             submit=dict(jobtime=200, name="BIP_diff_tbss_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('JHUrois_FA')
-        pipe(diff_bedpostx.run, 
-             submit=cuda_dict_2,
-             kwargs={'ctx' : ctx})
-        targets.append('bedpostx_eye')
-        pipe(diff_autoptx.run,
-             submit=dict(jobtime=200, name="BIP_diff_autoptx_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('tractsNorm')
-
-    return pipe, targets
diff --git a/bip/pipelines/dMRI_diff/diff_autoptx.py b/bip/pipelines/dMRI_diff/diff_autoptx.py
index 017bd3538ffac8b749919c74623ddacdacccd76a..6b2629bee19917d9a5a6bf00bad267acc873398e 100755
--- a/bip/pipelines/dMRI_diff/diff_autoptx.py
+++ b/bip/pipelines/dMRI_diff/diff_autoptx.py
@@ -15,7 +15,7 @@ import random
 import logging
 from shutil import copyfile
 from fsl import wrappers
-from bip.utils.log_utils import redirect_logging
+from bip.utils.log_utils import redirect_logging, job_name
 from pipe_tree import In, Out, Ref, Var
 
 log = logging.getLogger(__name__)
@@ -42,7 +42,7 @@ def run(ctx,
         tractsNorm:                Out,
         aptx_tract_tmp:            Out):
 
-    with redirect_logging('diff_autoptx_' + autoptx_tract.value,
+    with redirect_logging(job_name(run, autoptx_tract.value),
                           outdir=logs_dir):
 
         t_name  = autoptx_tract.value
diff --git a/bip/pipelines/dMRI_diff/diff_bedpostx.py b/bip/pipelines/dMRI_diff/diff_bedpostx.py
index 3cf8ace1d575b2a4b8c20e27ebb33d1a1d950d8e..8b4aab4b60d3b31a17460361bd2d68e8e781c790 100755
--- a/bip/pipelines/dMRI_diff/diff_bedpostx.py
+++ b/bip/pipelines/dMRI_diff/diff_bedpostx.py
@@ -14,7 +14,7 @@ import os
 import logging
 from shutil import copyfile
 from fsl import wrappers
-from bip.utils.log_utils import redirect_logging
+from bip.utils.log_utils import redirect_logging, job_name
 from pipe_tree import In, Out, Ref
 
 log = logging.getLogger(__name__)
@@ -38,7 +38,7 @@ def run(ctx,
         bedpostx_eye:              Out,
         bp_logs_gpu_dir:           Out):
 
-    with redirect_logging('diff_bedpostx', outdir=logs_dir):
+    with redirect_logging(job_name(run), outdir=logs_dir):
 
         w = os.getcwd() + "/"
 
diff --git a/bip/pipelines/dMRI_diff/diff_dtifit.py b/bip/pipelines/dMRI_diff/diff_dtifit.py
index 43c6e6c597b4cc3dae2575b828da578c628031d3..ea4cc46f2611c0a43e9f5b3ba979cd0f24fb9b0c 100755
--- a/bip/pipelines/dMRI_diff/diff_dtifit.py
+++ b/bip/pipelines/dMRI_diff/diff_dtifit.py
@@ -14,7 +14,7 @@ import logging
 from shutil import copyfile
 import numpy as np
 from fsl import wrappers
-from bip.utils.log_utils import redirect_logging
+from bip.utils.log_utils import redirect_logging, job_name
 from pipe_tree import In, Out, Ref
 from gradunwarp.core.gradient_unwarp_apply import gradient_unwarp_apply
 
@@ -35,7 +35,7 @@ def run(ctx,
         eddy_data_ud_1_shell_bvec: Out,
         FA:                        Out):
 
-    with redirect_logging('diff_dtifit', outdir=logs_dir):
+    with redirect_logging(job_name(run), outdir=logs_dir):
 
 
         if ctx.gdc != '':
diff --git a/bip/pipelines/dMRI_diff/diff_eddy.py b/bip/pipelines/dMRI_diff/diff_eddy.py
index a4d286a22eef1c02dd90352c411d1d9c57c3b02c..a47794e9a85c59414fa8b71ae7c2357f0dd011cf 100755
--- a/bip/pipelines/dMRI_diff/diff_eddy.py
+++ b/bip/pipelines/dMRI_diff/diff_eddy.py
@@ -15,7 +15,7 @@ import logging
 from shutil import copyfile
 import nibabel as nib
 from fsl import wrappers
-from bip.utils.log_utils import redirect_logging
+from bip.utils.log_utils import redirect_logging, job_name
 from pipe_tree import In, Out, Ref
 
 log = logging.getLogger(__name__)
@@ -43,7 +43,7 @@ def run(ctx,
         eddy_outlier_report:            Out):
 
     #TODO: Do the __name__ for everything
-    with redirect_logging(__name__, outdir=logs_dir):
+    with redirect_logging(job_name(run), outdir=logs_dir):
 
         # Creates links
         # TODO: These links are NOT relative. This may cause future problems.
diff --git a/bip/pipelines/dMRI_diff/diff_noddi.py b/bip/pipelines/dMRI_diff/diff_noddi.py
index 8d36809bffb5b623a3496330968364208e0d8256..1ef59a233b8526bea52291ef5325d79eb1f7e84b 100755
--- a/bip/pipelines/dMRI_diff/diff_noddi.py
+++ b/bip/pipelines/dMRI_diff/diff_noddi.py
@@ -16,7 +16,7 @@ import zipfile
 import gzip
 import shutil
 import amico
-from bip.utils.log_utils import redirect_logging, tempdir
+from bip.utils.log_utils import redirect_logging, tempdir, job_name
 from pipe_tree import In, Out, Ref
 
 
@@ -38,7 +38,7 @@ def run(ctx,
         OD:                        Out,
         NODDI_dir_file:            Out):
 
-    with redirect_logging('diff_noddi', outdir=logs_dir),\
+    with redirect_logging(job_name(run), outdir=logs_dir),\
          tempdir(tmp_dir):
         amtmp      = tmp_dir + '/amtmp.nii'
         amtmp_mask = tmp_dir + '/amtmp_mask.nii'
diff --git a/bip/pipelines/dMRI_diff/diff_tbss.py b/bip/pipelines/dMRI_diff/diff_tbss.py
index ce77c626a5a2dd9bc3f1ca70ebad911c5c2407e3..db2dc5b6c9810a6cc32f5ba1b2f0fed35e48ed60 100755
--- a/bip/pipelines/dMRI_diff/diff_tbss.py
+++ b/bip/pipelines/dMRI_diff/diff_tbss.py
@@ -15,7 +15,7 @@ import logging
 from shutil import copyfile
 import nibabel as nib
 from fsl import wrappers
-from bip.utils.log_utils import redirect_logging, tempdir
+from bip.utils.log_utils import redirect_logging, tempdir, job_name
 from pipe_tree import In, Out, Ref
 
 log = logging.getLogger(__name__)
@@ -51,7 +51,7 @@ def run(ctx,
         TBSS_all_FA_skeletonised:    Out,
         JHUrois_FA:                  Out):
 
-    with redirect_logging('diff_tbss', outdir=logs_dir),\
+    with redirect_logging(job_name(run), outdir=logs_dir),\
          tempdir(tmp_dir):
 
         TBSS_FA_tmp = tmp_dir + '/TBSS_FA_tmp.nii.gz'
diff --git a/bip/pipelines/dMRI_fieldmap/__init__.py b/bip/pipelines/dMRI_fieldmap/__init__.py
old mode 100644
new mode 100755
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..102289e779b89b1a78448ec8ae959f95a0445029
--- a/bip/pipelines/dMRI_fieldmap/__init__.py
+++ b/bip/pipelines/dMRI_fieldmap/__init__.py
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+#
+# dMRI_fieldmap.py - Pipeline with the fieldmap processing.
+#
+# Author: Fidel Alfaro Almagro <fidel.alfaroalmagro@ndcn.ox.ac.uk>
+# Author: Paul McCarthy <pauldmccarthy@gmail.com>
+# Author: Michiel Cottaar <michiel.cottaar@ndcn.ox.ac.uk>
+#
+# pylint: disable=C0103,E0602,C0114,C0115,C0116,R0913,R0914,R0915
+# pylint: disable=W0613
+#
+
+import logging
+from bip.utils.log_utils         import job_name
+from bip.pipelines.dMRI_fieldmap import fieldmap_pre_topup, fieldmap_topup
+from bip.pipelines.dMRI_fieldmap import fieldmap_post_topup
+
+log = logging.getLogger(__name__)
+
+def add_to_pipeline(ctx, pipe, tree):
+
+    subj = ctx.subject
+
+    pipe(fieldmap_pre_topup.run,
+         submit=dict(jobtime=200, name=job_name(fieldmap_pre_topup.run, subj)),
+         kwargs={'ctx' : ctx})
+    pipe(fieldmap_topup.run,
+         submit=dict(jobtime=200, name=job_name(fieldmap_topup.run, subj)),
+         kwargs={'ctx' : ctx})
+    pipe(fieldmap_post_topup.run,
+         submit=dict(jobtime=200, name=job_name(fieldmap_post_topup.run, subj)),
+         kwargs={'ctx' : ctx})
diff --git a/bip/pipelines/dMRI_fieldmap/dMRI_fieldmap.py b/bip/pipelines/dMRI_fieldmap/dMRI_fieldmap.py
deleted file mode 100644
index dc64d40f3f59efc5c77db7ef32382d3e11803575..0000000000000000000000000000000000000000
--- a/bip/pipelines/dMRI_fieldmap/dMRI_fieldmap.py
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/usr/bin/env python
-#
-# dMRI_fieldmap.py - Pipeline with the fieldmap processing.
-#
-# Author: Fidel Alfaro Almagro <fidel.alfaroalmagro@ndcn.ox.ac.uk>
-# Author: Paul McCarthy <pauldmccarthy@gmail.com>
-# Author: Michiel Cottaar <michiel.cottaar@ndcn.ox.ac.uk>
-#
-# pylint: disable=C0103,E0602,C0114,C0115,C0116,R0913,R0914,R0915
-#
-
-import logging
-from bip.utils.log_utils         import redirect_logging
-from bip.pipelines.dMRI_fieldmap import fieldmap_pre_topup
-from bip.pipelines.dMRI_fieldmap import fieldmap_topup
-from bip.pipelines.dMRI_fieldmap import fieldmap_post_topup
-
-log = logging.getLogger(__name__)
-
-def add_to_pipeline(ctx, pipe, tree, targets):
-
-    logs_dir=tree.get('logs_dir')
-
-    subj = ctx.subject 
-
-    with redirect_logging('pipe_dMRI_fieldmap', outdir=logs_dir):
-        pipe(fieldmap_pre_topup.run,
-             submit=dict(jobtime=200, name="BIP_fieldmap_pre_topup_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('total_B0_PA')
-        pipe(fieldmap_topup.run,
-             submit=dict(jobtime=200, name="BIP_fieldmap_topup_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('fieldmap_fout')
-        pipe(fieldmap_post_topup.run,
-             submit=dict(jobtime=200, name="BIP_fieldmap_post_topup_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('fieldmap_mask')
-
-    return pipe, targets
diff --git a/bip/pipelines/dMRI_fieldmap/fieldmap_post_topup.py b/bip/pipelines/dMRI_fieldmap/fieldmap_post_topup.py
index 745eb55bb8c84cc1c333342705c787f875855f9a..89631af1fd229709d7e99af8e9357af70919b483 100755
--- a/bip/pipelines/dMRI_fieldmap/fieldmap_post_topup.py
+++ b/bip/pipelines/dMRI_fieldmap/fieldmap_post_topup.py
@@ -14,7 +14,7 @@ from shutil import copyfile
 import numpy as np
 import nibabel as nib
 from fsl import wrappers
-from bip.utils.log_utils import redirect_logging, tempdir
+from bip.utils.log_utils import redirect_logging, tempdir, job_name
 from pipe_tree import In, Out, Ref
 from gradunwarp.core.gradient_unwarp_apply import gradient_unwarp_apply
 
@@ -48,7 +48,7 @@ def run(ctx,
         fieldmap_mask_ud:               Out,
         fieldmap_iout_mean_ud_inv_warp: Out):
 
-    with redirect_logging('fieldmap_post_topup', outdir=logs_dir),\
+    with redirect_logging(job_name(run), outdir=logs_dir),\
                tempdir(tmp_dir):
 
         B0_AP_corr_tmp  = tmp_dir + '/B0_AP_corr_tmp.nii.gz'
diff --git a/bip/pipelines/dMRI_fieldmap/fieldmap_pre_topup.py b/bip/pipelines/dMRI_fieldmap/fieldmap_pre_topup.py
index 0473ec10efd07bc6855239c427f86c1032c3bdc3..6dd197045615526fa68e9dbfa326d2eec9dc7b4a 100755
--- a/bip/pipelines/dMRI_fieldmap/fieldmap_pre_topup.py
+++ b/bip/pipelines/dMRI_fieldmap/fieldmap_pre_topup.py
@@ -16,7 +16,7 @@ from shutil import copyfile
 import numpy as np
 import nibabel as nib
 from fsl import wrappers
-from bip.utils.log_utils       import redirect_logging, tempdir
+from bip.utils.log_utils       import redirect_logging, tempdir, job_name
 from bip.utils.get_b0s         import get_b0s
 from bip.utils.get_dwell_time  import get_dwell_time
 from bip.utils.read_json_field import read_json_field
@@ -181,7 +181,7 @@ def run(ctx,
         logs_dir:         Ref,
         tmp_dir:          Ref):
 
-    with redirect_logging('fieldmap_pre_topup', outdir=logs_dir),\
+    with redirect_logging(job_name(run), outdir=logs_dir),\
          tempdir(tmp_dir):
 
         AP_tmp = tmp_dir + '/AP_tmp_'
diff --git a/bip/pipelines/dMRI_fieldmap/fieldmap_topup.py b/bip/pipelines/dMRI_fieldmap/fieldmap_topup.py
index f06c097df12b0468402bfc4ffd896dcd8664e29b..68487a0057a778fc5094168ba9f4529f6e6de1de 100755
--- a/bip/pipelines/dMRI_fieldmap/fieldmap_topup.py
+++ b/bip/pipelines/dMRI_fieldmap/fieldmap_topup.py
@@ -12,7 +12,7 @@
 
 import logging
 from fsl import wrappers
-from bip.utils.log_utils import redirect_logging
+from bip.utils.log_utils import redirect_logging, job_name
 from pipe_tree import In, Out, Ref
 
 log = logging.getLogger(__name__)
@@ -25,7 +25,7 @@ def run(ctx,
         fieldmap_jacout:     Ref,
         fieldmap_fout:       Out):
 
-    with redirect_logging('fieldmap_topup', outdir=logs_dir):
+    with redirect_logging(job_name(run), outdir=logs_dir):
         wrappers.topup(imain=B0_AP_PA,
                        datain=acqparams,
                        config="b02b0.cnf",
diff --git a/bip/pipelines/fMRI_rest/__init__.py b/bip/pipelines/fMRI_rest/__init__.py
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..5f61a65c668cd70c7294980f86c301dd210983da 100755
--- a/bip/pipelines/fMRI_rest/__init__.py
+++ b/bip/pipelines/fMRI_rest/__init__.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+#
+# fMRI_rest.py - Pipeline with the resting state fMRI processing.
+#
+# Author: Fidel Alfaro Almagro <fidel.alfaroalmagro@ndcn.ox.ac.uk>
+# Author: Paul McCarthy <pauldmccarthy@gmail.com>
+# Author: Michiel Cottaar <michiel.cottaar@ndcn.ox.ac.uk>
+#
+# pylint: disable=C0103,E0602,C0114,C0115,C0116,R0913,R0914,R0915
+# pylint: disable=W0613
+#
+
+import logging
+from bip.utils.log_utils     import job_name
+from bip.pipelines.fMRI_rest import rfMRI_prepare, rfMRI_melodic
+from bip.pipelines.fMRI_rest import rfMRI_fix, rfMRI_netmats
+
+log = logging.getLogger(__name__)
+
+def add_to_pipeline(ctx, pipe, tree):
+
+    subj = ctx.subject
+
+    pipe(rfMRI_prepare.run,
+         submit=dict(jobtime=200, name=job_name(rfMRI_prepare.run, subj)),
+         kwargs={'ctx' : ctx})
+    pipe(rfMRI_melodic.run,
+         submit=dict(jobtime=200, name=job_name(rfMRI_melodic.run, subj)),
+         kwargs={'ctx' : ctx})
+    pipe(rfMRI_fix.run,
+         submit=dict(jobtime=200, name=job_name(rfMRI_fix.run, subj)),
+         kwargs={'ctx' : ctx})
+    pipe(rfMRI_netmats.run,
+         submit=dict(jobtime=200, name=job_name(rfMRI_netmats.run, subj)),
+         kwargs={'ctx' : ctx})
diff --git a/bip/pipelines/fMRI_rest/fMRI_rest.py b/bip/pipelines/fMRI_rest/fMRI_rest.py
deleted file mode 100755
index 34be1e6aa92a0d6003b3da284788c5b4b6963a69..0000000000000000000000000000000000000000
--- a/bip/pipelines/fMRI_rest/fMRI_rest.py
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env python
-#
-# fMRI_rest.py - Pipeline with the resting state fMRI processing.
-#
-# Author: Fidel Alfaro Almagro <fidel.alfaroalmagro@ndcn.ox.ac.uk>
-# Author: Paul McCarthy <pauldmccarthy@gmail.com>
-# Author: Michiel Cottaar <michiel.cottaar@ndcn.ox.ac.uk>
-#
-# pylint: disable=C0103,E0602,C0114,C0115,C0116,R0913,R0914,R0915
-#
-
-import logging
-from bip.utils.log_utils     import redirect_logging
-from bip.pipelines.fMRI_rest import rfMRI_prepare
-from bip.pipelines.fMRI_rest import rfMRI_melodic
-from bip.pipelines.fMRI_rest import rfMRI_fix
-from bip.pipelines.fMRI_rest import rfMRI_netmats
-
-log = logging.getLogger(__name__)
-
-def add_to_pipeline(ctx, pipe, tree, targets):
-
-    logs_dir=tree.get('logs_dir')
-
-    subj = ctx.subject 
-
-    with redirect_logging('pipe_fMRI_task', outdir=logs_dir):
-        pipe(rfMRI_prepare.run,
-             submit=dict(jobtime=200, name="BIP_rfMRI_prepare_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('rfMRI_fsf')
-        pipe(rfMRI_melodic.run,
-             submit=dict(jobtime=200, name="BIP_rfMRI_melodic_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('rfMRI_ica')
-        pipe(rfMRI_fix.run,
-             submit=dict(jobtime=200, name="BIP_rfMRI_fix_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('filtered_func_data_clean_stdmask')
-        pipe(rfMRI_netmats.run,
-             submit=dict(jobtime=200, name="BIP_rfMRI_netmats_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('node_amplitudes')
-
-    return pipe, targets
diff --git a/bip/pipelines/fMRI_rest/rfMRI_netmats_fnc.py b/bip/pipelines/fMRI_rest/rfMRI_netmats_fnc.py
old mode 100644
new mode 100755
diff --git a/bip/pipelines/fMRI_task/__init__.py b/bip/pipelines/fMRI_task/__init__.py
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..6f6d7605aedeef25fdf6caca4515b9623db390c7 100755
--- a/bip/pipelines/fMRI_task/__init__.py
+++ b/bip/pipelines/fMRI_task/__init__.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+#
+# fMRI_task.py - Pipeline with the task fMRI processing.
+#
+# Author: Fidel Alfaro Almagro <fidel.alfaroalmagro@ndcn.ox.ac.uk>
+# Author: Paul McCarthy <pauldmccarthy@gmail.com>
+# Author: Michiel Cottaar <michiel.cottaar@ndcn.ox.ac.uk>
+#
+# pylint: disable=C0103,E0602,C0114,C0115,C0116,R0913,R0914,R0915
+# pylint: disable=W0613
+#
+
+import logging
+from bip.utils.log_utils     import job_name
+from bip.pipelines.fMRI_task import tfMRI_prepare, tfMRI_feat
+
+log = logging.getLogger(__name__)
+
+def add_to_pipeline(ctx, pipe, tree):
+
+    subj = ctx.subject
+
+    pipe(tfMRI_prepare.run,
+         submit=dict(jobtime=200, name=job_name(tfMRI_prepare.run, subj)),
+         kwargs={'ctx' : ctx})
+    pipe(tfMRI_feat.run,
+         submit=dict(jobtime=200, name=job_name(tfMRI_feat.run, subj)),
+         kwargs={'ctx' : ctx})
diff --git a/bip/pipelines/fMRI_task/fMRI_task.py b/bip/pipelines/fMRI_task/fMRI_task.py
deleted file mode 100755
index 00a34c4f0ae37eab9ecac87a452d4992ee52daee..0000000000000000000000000000000000000000
--- a/bip/pipelines/fMRI_task/fMRI_task.py
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/usr/bin/env python
-#
-# fMRI_task.py - Pipeline with the task fMRI processing.
-#
-# Author: Fidel Alfaro Almagro <fidel.alfaroalmagro@ndcn.ox.ac.uk>
-# Author: Paul McCarthy <pauldmccarthy@gmail.com>
-# Author: Michiel Cottaar <michiel.cottaar@ndcn.ox.ac.uk>
-#
-# pylint: disable=C0103,E0602,C0114,C0115,C0116,R0913,R0914,R0915
-#
-
-import logging
-from bip.utils.log_utils     import redirect_logging
-from bip.pipelines.fMRI_task import tfMRI_prepare
-from bip.pipelines.fMRI_task import tfMRI_feat
-
-log = logging.getLogger(__name__)
-
-def add_to_pipeline(ctx, pipe, tree, targets):
-
-    logs_dir=tree.get('logs_dir')
-
-    subj = ctx.subject 
-
-    with redirect_logging('pipe_fMRI_task', outdir=logs_dir):
-        pipe(tfMRI_prepare.run,
-             submit=dict(jobtime=200, name="BIP_tfMRI_prepare_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('tfMRI_fsf')
-        pipe(tfMRI_feat.run,
-             submit=dict(jobtime=200, name="BIP_tfMRI_feat_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('tfMRI_feat')
-
-    return pipe, targets
diff --git a/bip/pipelines/struct_FS/FS_get_IDPs.py b/bip/pipelines/struct_FS/FS_get_IDPs.py
index 3a14ccf4e66f4078c34c6066b3c94741aa8741c8..3dee9ab76100572fc21b351892586b1b76247682 100755
--- a/bip/pipelines/struct_FS/FS_get_IDPs.py
+++ b/bip/pipelines/struct_FS/FS_get_IDPs.py
@@ -15,537 +15,18 @@ import sys
 import logging
 import argparse
 from pipe_tree import In, Out, Ref
-from bip.utils.log_utils import redirect_logging,  run_command
+from bip.pipelines.struct_FS.FS_get_IDPs_fnc import bb_FS_get_IDPs
+from bip.utils.log_utils import redirect_logging, job_name
 
 log = logging.getLogger(__name__)
 
-class MyParser(argparse.ArgumentParser):
-    def error(self, message):
-        sys.stderr.write(f'error: {message}\n')
-        self.print_help()
-        sys.exit(2)
-
-class Usage(Exception):
-    def __init__(self, msg):
-        self.msg = msg
-
-def check_and_create_dir(dirName):
-    try:
-        os.stat(dirName)
-    except:
-        os.mkdir(dirName)
-
-def read_file(fileName):
-    result=[]
-    with open(fileName, 'r', encoding="utf-8") as f:
-        for line in f.readlines():
-            line=line.replace('\t',' ')
-            result.append([x.replace("\n",'') for x in line.split(' ')])
-    return result
-
-
-def generate_FS_IDP_files(SUBJECTS_DIR, subject_ID, subject, dataDir,
-                          headersDir, ctx, env):
-
-    statsDir = SUBJECTS_DIR + '/' + subject_ID + '/stats/'
-
-    #TODO: Include a pre-requisite that python2.7 must be availble in the system
-    if os.path.isfile(statsDir + 'aseg.stats'):
-        run_command(log, 'asegstats2table  '+
-           ' -m volume --all-segs --tablefile ' + dataDir + 'aseg_1.txt ' +
-           ' --subjects ' + subject_ID + ' --skip', env)
-        run_command(log, 'asegstats2table  '+
-            ' -m mean --all-segs --tablefile ' + dataDir +
-            'aseg_intensity.txt ' +' --subjects ' + subject_ID + ' --skip', env)
-
-    if os.path.isfile(statsDir + 'lh.w-g.pct.stats'):
-        run_command(log, 'asegstats2table ' +
-            ' -m mean --all-segs --stats=lh.w-g.pct.stats ' +
-            ' --tablefile ' + dataDir + 'wg_lh_mean.txt ' +
-            ' --subjects ' + subject_ID + ' --skip', env)
-
-    if os.path.isfile(statsDir + 'rh.w-g.pct.stats'):
-        run_command(log, 'asegstats2table ' +
-            ' -m mean --all-segs --stats=rh.w-g.pct.stats ' +
-            ' --tablefile ' + dataDir + 'wg_rh_mean.txt ' +
-            ' --subjects ' + subject_ID + ' --skip', env)
-
-    for hemi in ["lh", "rh"]:
-        for value in ["volume", "area", "thickness"]:
-            for atlas in ["BA_exvivo", "aparc.DKTatlas", "aparc.a2009s",
-                          "aparc"]:
-                outFileName = dataDir + atlas + '_' + hemi + '_' + value + '.txt'
-                if os.path.isfile(statsDir + hemi + "." + atlas + '.stats'):
-                    run_command(log, 'aparcstats2table '+
-                        ' -m ' + value + ' --hemi=' + hemi +
-                        ' --tablefile ' + outFileName +
-                        ' --subjects ' + subject_ID + ' --skip -p ' + atlas,env)
-
-    atlas="aparc.pial"
-    value="area"
-    for hemi in ["lh", "rh"]:
-        outFileName = dataDir + atlas + '_' + hemi + '_' + value + '.txt'
-        if os.path.isfile(statsDir + hemi + '.aparc.pial.stats'):
-            run_command(log, 'aparcstats2table '+
-                ' -m ' + value + ' --hemi=' + hemi +
-                ' --tablefile ' + outFileName +
-                ' --subjects ' + subject_ID + ' --skip -p ' + atlas, env)
-
-    with open(ctx.get_data('FS/FS_initial_files.txt'), encoding="utf-8") as f:
-        files_generated = [x.replace('\n','').split(" ") for x in f.readlines()]
-
-    data_dict={}
-
-    for file_generated in files_generated:
-        if os.path.isfile(dataDir + file_generated[0] + '.txt'):
-            data = read_file(dataDir + file_generated[0] + '.txt')
-        else:
-            data = read_file(ctx.get_data('FS/FS_data_ex/' +
-                                          file_generated[0] + '.txt'))
-        data_dict[file_generated[0]] = data
-
-    data_dict['ID'] = [['ID'], [subject]]
-
-    return data_dict
-
-
-#Quick consistency check
-def check_consistency(data_dict, SUBJECTS_DIR, ctx):
-
-    for file_generated in data_dict.keys():
-        if len(data_dict[file_generated])>2:
-            print("Error in " + file_generated + ': File has more than 2 lines')
-            save_data_NaNs(SUBJECTS_DIR, ctx)
-            sys.exit(-1)
-
-        len0=len(data_dict[file_generated][0])
-        len1=len(data_dict[file_generated][1])
-
-        if len0 != len1:
-            print("Error in " + file_generated + ': Inconsistent # of features')
-            save_data_NaNs(SUBJECTS_DIR, ctx)
-            sys.exit(-1)
-
-
-def fix_aseg_data(data_dict, subjectDir):
-
-    #Split aseg_1 into aseg_global and aseg_volume
-    data_dict['aseg_global'] = [[],[]]
-    data_dict['aseg_global'][0] = [data_dict['aseg_1'][0][0]] + data_dict['aseg_1'][0][46:]
-    data_dict['aseg_global'][1] = [data_dict['aseg_1'][1][0]] + data_dict['aseg_1'][1][46:]
-
-
-    #Variables not needed
-    vars_to_delete = ['CortexVol', 'CerebralWhiteMatterVol', \
-                      'SupraTentorialVolNotVentVox', 'MaskVol', 'SurfaceHoles']
-    ind_to_delete = []
-    for i in range(len(data_dict['aseg_global'][0])):
-        if not data_dict['aseg_global'][0][i] in vars_to_delete:
-            ind_to_delete.append(i)
-
-    data_dict['aseg_global'][0] = [data_dict['aseg_global'][0][x] for x in ind_to_delete]
-    data_dict['aseg_global'][1] = [data_dict['aseg_global'][1][x] for x in ind_to_delete]
-
-    # For some reason, the VentricleChoroidVol is not caught by asegstats2table
-    try:
-        with open(subjectDir + '/stats/aseg.stats', 'r', encoding="utf-8") as f:
-            val=[x.split(',')[3].strip() for x in f.readlines() if 'VentricleChoroidVol' in x]
-    except:
-        val=["NaN"]
-
-    data_dict['aseg_global'][0].append('VentricleChoroidVol')
-    data_dict['aseg_global'][1].append(val[0])
-
-    data_dict['aseg_volume'] = [[],[]]
-    data_dict['aseg_volume'][0] = data_dict['aseg_1'][0][0:46]
-    data_dict['aseg_volume'][1] = data_dict['aseg_1'][1][0:46]
-
-    del data_dict['aseg_1']
-
-    #Remove the WM-hypointensities. No value in any subject
-    cols_to_remove = ['Left-WM-hypointensities', 'Right-WM-hypointensities',
-                  'Left-non-WM-hypointensities', 'Right-non-WM-hypointensities']
-
-    for key in list(data_dict.keys()):
-        if key.startswith('aseg'):
-            sub_keys_to_remove = []
-            for sub_key in data_dict[key][0]:
-                for col in cols_to_remove:
-                    if col in sub_key:
-                        sub_keys_to_remove.append(sub_key)
-
-            for sub_key in sub_keys_to_remove:
-                ind = data_dict[key][0].index(sub_key)
-                del data_dict[key][0][ind]
-                del data_dict[key][1][ind]
-
-    return data_dict
-
-
-def gen_aparc_special(data_dict, subjectDir):
-
-    struct_data = []
-
-    struct_data.append(['aparc.pial_lh_area','TotalSurface','lh.aparc.pial.stats',  'PialSurfArea'])
-    struct_data.append(['aparc.pial_rh_area','TotalSurface','rh.aparc.pial.stats',  'PialSurfArea'])
-    struct_data.append(['aparc_lh_area',     'TotalSurface','lh.aparc.stats',       'WhiteSurfArea'])
-    struct_data.append(['aparc_rh_area',     'TotalSurface','rh.aparc.stats',       'WhiteSurfArea'])
-    struct_data.append(['aparc_lh_thickness',     'GlobalMeanThickness','lh.aparc.stats','MeanThickness'])
-    struct_data.append(['aparc_rh_thickness',     'GlobalMeanThickness','rh.aparc.stats','MeanThickness'])
-
-    for elem in struct_data:
-        data_dict[elem[0]][0].append(elem[1])
-        try:
-            with open(subjectDir + '/stats/' + elem[2], 'r',
-                      encoding="utf-8") as f:
-                v = [x.split(',')[3].strip() for x in f.readlines() if elem[3] in x]
-                data_dict[elem[0]][1].append(v[0])
-        except:
-            data_dict[elem[0]][1].append('NaN')
-
-    return data_dict
-
-
-def bool_FLAIR(data_dict, subjectDir):
-    if os.path.isfile(subjectDir + '/mri/FLAIR.mgz'):
-        data_dict['FLAIR'] = [['Use-T2-FLAIR-for-FreeSurfer'],['1']]
-    else:
-        data_dict['FLAIR'] = [['Use-T2-FLAIR-for-FreeSurfer'],['0']]
-
-    return data_dict
-
-
-def gen_subsegmentation(data_dict, subjectDir, subject, ctx):
-    struct_data = {}
-    struct_data['Brainstem_global'] = [['brainstemSsVolumes.v12.txt'],5]
-    struct_data['ThalamNuclei']     = [['ThalamicNuclei.v10.T1.volumes.txt'],52]
-    struct_data['AmygNuclei_lh']    = [['lh.amygNucVolumes-T1-AN.v21.txt',
-                                        'lh.amygNucVolumes-T1.v21.txt'],10]
-    struct_data['AmygNuclei_rh']    = [['rh.amygNucVolumes-T1-AN.v21.txt',
-                                        'rh.amygNucVolumes-T1.v21.txt'],10]
-    struct_data['HippSubfield_lh']  = [['lh.hippoSfVolumes-T1-AN.v21.txt',
-                                        'lh.hippoSfVolumes-T1.v21.txt'],22]
-    struct_data['HippSubfield_rh']  = [['rh.hippoSfVolumes-T1-AN.v21.txt',
-                                        'rh.hippoSfVolumes-T1.v21.txt'],22]
-
-    for struct in struct_data.keys():
-        found = False
-        data_dict[struct] = [[],[]]
-        for fil in struct_data[struct][0]:
-            final_fil = subjectDir + '/FreeSurfer/mri/' + fil
-            if os.path.isfile(final_fil):
-                with open(final_fil, 'r',encoding="utf-8") as f:
-                    for lin in f.readlines():
-                        lin = lin.replace('\n','').split(' ')
-                        data_dict[struct][0].append(lin[0])
-                        data_dict[struct][1].append(lin[1])
-                found = True
-                break
-
-        if not found:
-            with open(ctx.get_data('FS/FS_sub_headers/' +
-                      struct + '.txt'), encoding="utf-8") as f:
-                data_dict[struct][0] = [x.replace('\n','') for x in f.readlines()]
-                data_dict[struct][0] = ['NaN'] * len(data_dict[struct][0])
-        data_dict[struct][0]=['ID'] + data_dict[struct][0]
-        data_dict[struct][1]=[subject] + data_dict[struct][1]
-    return data_dict
-
-def fix_aparc_data(data_dict, subjectDir):
-
-    # Remove the column "temporalpole" in aparc files.
-    # Unreliable measure: Very few subjects have that measure.
-    for key in list(data_dict.keys()):
-        if key.startswith('aparc'):
-            sub_keys_to_remove = []
-            for sub_key in data_dict[key][0]:
-                if 'temporalpole' in sub_key:
-                    sub_keys_to_remove.append(sub_key)
-            for sub_key in sub_keys_to_remove:
-                ind = data_dict[key][0].index(sub_key)
-
-                if ind != -1:
-                    del data_dict[key][0][ind]
-                    del data_dict[key][1][ind]
-
-
-    #Remove ETIV and BrainSegVolNotVent (They are global)
-    for key in list(data_dict.keys()):
-        if key.startswith('aparc') or key.startswith('BA_exvivo'):
-            sub_keys_to_remove = []
-            for sub_key in  data_dict[key][0]:
-                if sub_key in ['BrainSegVolNotVent' , 'eTIV']:
-                    sub_keys_to_remove.append(sub_key)
-            for sub_key in sub_keys_to_remove:
-                ind = data_dict[key][0].index(sub_key)
-                del data_dict[key][0][ind]
-                del data_dict[key][1][ind]
-
-    #Removing last colum for thickness in aparc
-    for key in list(data_dict.keys()):
-        if key.startswith('aparc') and key.endswith('thickness'):
-            sub_keys_to_remove = []
-            for sub_key in data_dict[key][0]:
-                if sub_key.endswith('MeanThickness_thickness'):
-                    sub_keys_to_remove.append(sub_key)
-            for sub_key in sub_keys_to_remove:
-                ind = data_dict[key][0].index(sub_key)
-                del data_dict[key][0][ind]
-                del data_dict[key][1][ind]
-
-    #Removing the last column for areas, also in BA
-    for key in list(data_dict.keys()):
-        if key.endswith('area'):
-            sub_keys_to_remove = []
-            for sub_key in data_dict[key][0]:
-                if sub_key.endswith('WhiteSurfArea_area'):
-                    sub_keys_to_remove.append(sub_key)
-            for sub_key in sub_keys_to_remove:
-                ind = data_dict[key][0].index(sub_key)
-                del data_dict[key][0][ind]
-                del data_dict[key][1][ind]
-
-
-    return data_dict
-
-#Remove first feature in case it is the subject ID (except for ID itself)
-def remove_first_feature(data_dict, subject):
-    for key in list(data_dict.keys()):
-        if key != "ID":
-            if (data_dict[key][1][0] == subject) or \
-               (data_dict[key][1][0] == ('FS_' + subject)) or \
-               (data_dict[key][1][0] == ''):
-                del data_dict[key][0][0]
-                del data_dict[key][1][0]
-    return data_dict
-
-def fix_headers(data_dict):
-
-    #Applying some general replacing rules for the categories
-    replace_rules = [['.', '-'],
-                     ['BA_exvivo', 'BA-exvivo'],
-                     ['AmygNuclei_lh', 'AmygNuclei_lh_volume'],
-                     ['AmygNuclei_rh', 'AmygNuclei_rh_volume'],
-                     ['Brainstem_global', 'Brainstem_global_volume'],
-                     ['HippSubfield_lh', 'HippSubfield_lh_volume'],
-                     ['HippSubfield_rh', 'HippSubfield_rh_volume'],
-                     ['wg_lh_mean','wg_lh_intensity-contrast'],
-                     ['wg_rh_mean','wg_rh_intensity-contrast'],
-                     ['aparc_lh', 'aparc-Desikan_lh'],
-                     ['aparc_rh', 'aparc-Desikan_rh'],
-                     ['FLAIR','Use-T2-FLAIR-for-FreeSurfer']]
-
-    for key in list(data_dict.keys()):
-        new_key=key
-        for rule in replace_rules:
-            if rule[0] in new_key:
-                new_key = new_key.replace(rule[0],rule[1])
-                data_dict[new_key] = data_dict.pop(key)
-
-    #Renaming some special cases
-    structs = [['aseg_global', 'lhSurfaceHoles', 'aseg_lh_number', 'HolesBeforeFixing'],
-               ['aseg_global', 'rhSurfaceHoles', 'aseg_rh_number', 'HolesBeforeFixing'],
-               ['aseg_global', 'BrainSegVol-to-eTIV', 'aseg_global_volume-ratio', 'BrainSegVol-to-eTIV'],
-               ['aseg_global', 'MaskVol-to-eTIV', 'aseg_global_volume-ratio', 'MaskVol-to-eTIV']]
-
-    for struct in structs:
-        index = data_dict[struct[0]][0].index(struct[1])
-        if struct[2] not in data_dict.keys():
-            data_dict[struct[2]] = [struct[3]],[data_dict[struct[0]][1][index]]
-        else:
-            data_dict[struct[2]][0].append(struct[3])
-            data_dict[struct[2]][1].append(data_dict[struct[0]][1][index])
-        del data_dict[struct[0]][0][index]
-        del data_dict[struct[0]][1][index]
-
-
-
-    for metric in ['volume', 'intensity']:
-        old_key = 'aseg_' + metric
-        for i in range(len(data_dict[old_key][0])):
-            if 'Left-' in data_dict[old_key][0][i]:
-                new_name = data_dict[old_key][0][i].replace('Left-','')
-                new_key = 'aseg_lh_' + metric
-
-                if new_key not in data_dict.keys():
-                    data_dict[new_key] = [[],[]]
-                data_dict[new_key][0].append(new_name)
-                data_dict[new_key][1].append(data_dict[old_key][1][i])
-            elif 'Right-' in data_dict[old_key][0][i]:
-                new_name = data_dict[old_key][0][i].replace('Right-','')
-                new_key = 'aseg_rh_' + metric
-
-                if new_key not in data_dict.keys():
-                    data_dict[new_key] = [[],[]]
-                data_dict[new_key][0].append(new_name)
-                data_dict[new_key][1].append(data_dict[old_key][1][i])
-            else:
-                new_name = data_dict[old_key][0][i]
-                new_key = 'aseg_global_' + metric
-                if new_key not in data_dict.keys():
-                    data_dict[new_key] = [[],[]]
-                data_dict[new_key][0].append(new_name)
-                data_dict[new_key][1].append(data_dict[old_key][1][i])
-
-        del data_dict[old_key]
-
-    for i in range(len(data_dict['aseg_global'][0])):
-        if data_dict['aseg_global'][0][i].startswith('lh'):
-            new_name = data_dict['aseg_global'][0][i].replace('lh','').replace('Vol','')
-            data_dict['aseg_lh_volume'][0].append(new_name)
-            data_dict['aseg_lh_volume'][1].append(data_dict['aseg_global'][1][i])
-        elif data_dict['aseg_global'][0][i].startswith('rh'):
-            new_name = data_dict['aseg_global'][0][i].replace('rh','').replace('Vol','')
-            data_dict['aseg_rh_volume'][0].append(new_name)
-            data_dict['aseg_rh_volume'][1].append(data_dict['aseg_global'][1][i])
-        else:
-            new_name = data_dict['aseg_global'][0][i].replace('Vol','')
-            data_dict['aseg_global_volume'][0].append(new_name)
-            data_dict['aseg_global_volume'][1].append(data_dict['aseg_global'][1][i])
-
-    del data_dict['aseg_global']
-
-    # Split ThalamNuclei into Left and Right
-    data_dict['ThalamNuclei_lh_volume'] = [[],[]]
-    data_dict['ThalamNuclei_rh_volume'] = [[],[]]
-    for i in range(len(data_dict['ThalamNuclei'][0])):
-        if "Left" in data_dict['ThalamNuclei'][0][i]:
-            new_name = data_dict['ThalamNuclei'][0][i].replace('Left-','')
-            data_dict['ThalamNuclei_lh_volume'][0].append(new_name)
-            data_dict['ThalamNuclei_lh_volume'][1].append(data_dict['ThalamNuclei'][1][i])
-
-        elif "Right" in data_dict['ThalamNuclei'][0][i]:
-            new_name = data_dict['ThalamNuclei'][0][i].replace('Right-','')
-            data_dict['ThalamNuclei_rh_volume'][0].append(new_name)
-            data_dict['ThalamNuclei_rh_volume'][1].append(data_dict['ThalamNuclei'][1][i])
-    del data_dict['ThalamNuclei']
-
-    #Removing redundant prefix and sufix in BA_excvivo
-    BAs = ['_exvivo_area','_exvivo_thickness', '_exvivo_volume']
-    BA_keys = [key for key in list(data_dict.keys()) if key.startswith('BA-exvivo')]
-    for key in BA_keys:
-        for i in range(len(data_dict[key][0])):
-            for BA in BAs:
-                data_dict[key][0][i] = data_dict[key][0][i].replace(BA,'').replace('rh_','').replace('lh_','')
-
-    #Removing redundant prefix and sufix in aparc
-    aparcs = ['_area','_thickness', '_volume','-area','-thickness', '-volume']
-    aparc_keys = [key for key in list(data_dict.keys()) if key.startswith('aparc')]
-    for key in aparc_keys:
-        for i in range(len(data_dict[key][0])):
-            for aparc in aparcs:
-                data_dict[key][0][i] = data_dict[key][0][i].replace(aparc,'').replace('rh_','').replace('lh_','')
-
-    #Changing weird and inconsistent characters
-    for key in list(data_dict.keys()):
-        for i in range(len(data_dict[key][0])):
-            data_dict[key][0][i] = data_dict[key][0][i].replace('_','-').replace('&','+')
-
-    return data_dict
-
-
-def save_data_NaNs(SUBJECTS_DIR, ctx):
-
-    with open(ctx.get_data('FS/FS_headers.txt'), encoding="utf-8") as f:
-        final_headers = [x.replace('\n','') for x in f.readlines()]
-
-    num_NaNs = len(final_headers) - 1
-
-    with open(SUBJECTS_DIR+'/IDP_files/FS_IDPs.txt','w',encoding="utf-8") as f:
-        values = ['NaN'] * num_NaNs
-        values_str = SUBJECTS_DIR + " " + " ".join(values)
-        f.write(f"{values_str}\n")
-        f.close()
-
-
-def save_data(data_dict, SUBJECTS_DIR, ctx):
-
-    with open(ctx.get_data('FS/FS_headers.txt'), encoding="utf-8") as f:
-        final_headers = [x.replace('\n','') for x in f.readlines()]
-
-    temp_headers={}
-
-    for key in list(data_dict.keys()):
-        if key in ['ID', 'Use-T2-FLAIR-for-FreeSurfer']:
-            temp_headers[key] = data_dict[key][1][0]
-        else:
-            for i in range(len(data_dict[key][0])):
-                temp_headers[key+"_"+data_dict[key][0][i]] = data_dict[key][1][i]
-
-    for x in final_headers:
-        if x not in temp_headers.keys():
-            temp_headers[x] = "NaN"
-
-    with open(SUBJECTS_DIR+'/IDP_files/FS_IDPs.txt','w',encoding="utf-8") as f:
-        values = [temp_headers[x] for x in final_headers]
-        values_str = " ".join(values)
-        f.write(f"{values_str}\n")
-        f.close()
-
-
-def save_headers_info(data_dict, SUBJECTS_DIR, ctx):
-
-    with open(ctx.get_data('FS/FS_final_headers.txt'), encoding="utf-8") as f:
-        final_headers = [x.replace('\n','') for x in f.readlines()]
-
-    temp_headers={}
-
-    for key in list(data_dict.keys()):
-        if key in ['ID', 'Use-T2-FLAIR-for-FreeSurfer']:
-            temp_headers[key] = data_dict[key][1][0]
-        else:
-            for i in range(len(data_dict[key][0])):
-                temp_headers[key+"_"+data_dict[key][0][i]] = data_dict[key][1][i]
-
-    for x in final_headers:
-        if x not in temp_headers.keys():
-            temp_headers[x] = "NaN"
-
-    with open(SUBJECTS_DIR + '/IDP_files/FS_headers_info.txt','w',
-              encoding="utf-8") as f:
-        values = [temp_headers[x] for x in final_headers]
-        values_str = " ".join(values)
-        f.write(f"{values_str}\n")
-
-        f.close()
-
-def bb_FS_get_IDPs(ctx, env):
-
-    subject    = ctx.subject
-    subject_ID = 'FreeSurfer'
-    subjectDir = env['SUBJECTS_DIR']
-    dataDir    = subjectDir + '/data/'
-    headersDir = subjectDir + '/headers/'
-
-    #TODO: Raise an exception
-    if not os.path.isdir(subjectDir):
-        print("Error: FreeSurfer has not been run on this subject")
-        sys.exit(-1)
-
-    check_and_create_dir(dataDir)
-    check_and_create_dir(headersDir)
-
-    data_dict = generate_FS_IDP_files(subjectDir, subject_ID, subject, dataDir,
-                                      headersDir, ctx, env)
-    data_dict = fix_aseg_data(data_dict, subjectDir)
-    data_dict = gen_aparc_special(data_dict, subjectDir)
-    data_dict = gen_subsegmentation(data_dict, subjectDir, subject, ctx)
-    data_dict = bool_FLAIR(data_dict, subjectDir)
-    data_dict = fix_aparc_data(data_dict,subjectDir)
-    data_dict = remove_first_feature(data_dict, subject)
-    data_dict = fix_headers(data_dict)
-
-    check_consistency(data_dict, subjectDir, ctx)
-    save_data(data_dict, subjectDir, ctx)
-
-
 def run(ctx,
         ThalamicNuclei:             In,
         rh_entorhinal_exvivo_label: In,
         logs_dir:                   Ref,
         FS_IDPs:                    Out):
 
-    with redirect_logging('FS_get_IDPs', outdir=logs_dir):
+    with redirect_logging(job_name(run), outdir=logs_dir):
 
         env = dict(os.environ, SUBJECTS_DIR=os.getcwd() + "/" + ctx.subject)
 
diff --git a/bip/pipelines/struct_FS/FS_get_IDPs_fnc.py b/bip/pipelines/struct_FS/FS_get_IDPs_fnc.py
new file mode 100755
index 0000000000000000000000000000000000000000..3a14ccf4e66f4078c34c6066b3c94741aa8741c8
--- /dev/null
+++ b/bip/pipelines/struct_FS/FS_get_IDPs_fnc.py
@@ -0,0 +1,552 @@
+#!/usr/bin/env python
+#
+# asl_get_IDPs.py - Sub-pipeline generating the IDPs of the ASL pipeline.
+#
+# Author: Fidel Alfaro Almagro <fidel.alfaroalmagro@ndcn.ox.ac.uk>
+# Author: Paul McCarthy <pauldmccarthy@gmail.com>
+# Author: Michiel Cottaar <michiel.cottaar@ndcn.ox.ac.uk>
+#
+# pylint: disable=C0103,E0602,C0114,C0115,C0116,R0913,R0914,R0915
+# pylint: disable=W0613,R1718,C0201,C0206,R0912,W0702,C0301
+#
+
+import os
+import sys
+import logging
+import argparse
+from pipe_tree import In, Out, Ref
+from bip.utils.log_utils import redirect_logging,  run_command
+
+log = logging.getLogger(__name__)
+
+class MyParser(argparse.ArgumentParser):
+    def error(self, message):
+        sys.stderr.write(f'error: {message}\n')
+        self.print_help()
+        sys.exit(2)
+
+class Usage(Exception):
+    def __init__(self, msg):
+        self.msg = msg
+
+def check_and_create_dir(dirName):
+    try:
+        os.stat(dirName)
+    except:
+        os.mkdir(dirName)
+
+def read_file(fileName):
+    result=[]
+    with open(fileName, 'r', encoding="utf-8") as f:
+        for line in f.readlines():
+            line=line.replace('\t',' ')
+            result.append([x.replace("\n",'') for x in line.split(' ')])
+    return result
+
+
+def generate_FS_IDP_files(SUBJECTS_DIR, subject_ID, subject, dataDir,
+                          headersDir, ctx, env):
+
+    statsDir = SUBJECTS_DIR + '/' + subject_ID + '/stats/'
+
+    #TODO: Include a pre-requisite that python2.7 must be availble in the system
+    if os.path.isfile(statsDir + 'aseg.stats'):
+        run_command(log, 'asegstats2table  '+
+           ' -m volume --all-segs --tablefile ' + dataDir + 'aseg_1.txt ' +
+           ' --subjects ' + subject_ID + ' --skip', env)
+        run_command(log, 'asegstats2table  '+
+            ' -m mean --all-segs --tablefile ' + dataDir +
+            'aseg_intensity.txt ' +' --subjects ' + subject_ID + ' --skip', env)
+
+    if os.path.isfile(statsDir + 'lh.w-g.pct.stats'):
+        run_command(log, 'asegstats2table ' +
+            ' -m mean --all-segs --stats=lh.w-g.pct.stats ' +
+            ' --tablefile ' + dataDir + 'wg_lh_mean.txt ' +
+            ' --subjects ' + subject_ID + ' --skip', env)
+
+    if os.path.isfile(statsDir + 'rh.w-g.pct.stats'):
+        run_command(log, 'asegstats2table ' +
+            ' -m mean --all-segs --stats=rh.w-g.pct.stats ' +
+            ' --tablefile ' + dataDir + 'wg_rh_mean.txt ' +
+            ' --subjects ' + subject_ID + ' --skip', env)
+
+    for hemi in ["lh", "rh"]:
+        for value in ["volume", "area", "thickness"]:
+            for atlas in ["BA_exvivo", "aparc.DKTatlas", "aparc.a2009s",
+                          "aparc"]:
+                outFileName = dataDir + atlas + '_' + hemi + '_' + value + '.txt'
+                if os.path.isfile(statsDir + hemi + "." + atlas + '.stats'):
+                    run_command(log, 'aparcstats2table '+
+                        ' -m ' + value + ' --hemi=' + hemi +
+                        ' --tablefile ' + outFileName +
+                        ' --subjects ' + subject_ID + ' --skip -p ' + atlas,env)
+
+    atlas="aparc.pial"
+    value="area"
+    for hemi in ["lh", "rh"]:
+        outFileName = dataDir + atlas + '_' + hemi + '_' + value + '.txt'
+        if os.path.isfile(statsDir + hemi + '.aparc.pial.stats'):
+            run_command(log, 'aparcstats2table '+
+                ' -m ' + value + ' --hemi=' + hemi +
+                ' --tablefile ' + outFileName +
+                ' --subjects ' + subject_ID + ' --skip -p ' + atlas, env)
+
+    with open(ctx.get_data('FS/FS_initial_files.txt'), encoding="utf-8") as f:
+        files_generated = [x.replace('\n','').split(" ") for x in f.readlines()]
+
+    data_dict={}
+
+    for file_generated in files_generated:
+        if os.path.isfile(dataDir + file_generated[0] + '.txt'):
+            data = read_file(dataDir + file_generated[0] + '.txt')
+        else:
+            data = read_file(ctx.get_data('FS/FS_data_ex/' +
+                                          file_generated[0] + '.txt'))
+        data_dict[file_generated[0]] = data
+
+    data_dict['ID'] = [['ID'], [subject]]
+
+    return data_dict
+
+
+#Quick consistency check
+def check_consistency(data_dict, SUBJECTS_DIR, ctx):
+
+    for file_generated in data_dict.keys():
+        if len(data_dict[file_generated])>2:
+            print("Error in " + file_generated + ': File has more than 2 lines')
+            save_data_NaNs(SUBJECTS_DIR, ctx)
+            sys.exit(-1)
+
+        len0=len(data_dict[file_generated][0])
+        len1=len(data_dict[file_generated][1])
+
+        if len0 != len1:
+            print("Error in " + file_generated + ': Inconsistent # of features')
+            save_data_NaNs(SUBJECTS_DIR, ctx)
+            sys.exit(-1)
+
+
+def fix_aseg_data(data_dict, subjectDir):
+
+    #Split aseg_1 into aseg_global and aseg_volume
+    data_dict['aseg_global'] = [[],[]]
+    data_dict['aseg_global'][0] = [data_dict['aseg_1'][0][0]] + data_dict['aseg_1'][0][46:]
+    data_dict['aseg_global'][1] = [data_dict['aseg_1'][1][0]] + data_dict['aseg_1'][1][46:]
+
+
+    #Variables not needed
+    vars_to_delete = ['CortexVol', 'CerebralWhiteMatterVol', \
+                      'SupraTentorialVolNotVentVox', 'MaskVol', 'SurfaceHoles']
+    ind_to_delete = []
+    for i in range(len(data_dict['aseg_global'][0])):
+        if not data_dict['aseg_global'][0][i] in vars_to_delete:
+            ind_to_delete.append(i)
+
+    data_dict['aseg_global'][0] = [data_dict['aseg_global'][0][x] for x in ind_to_delete]
+    data_dict['aseg_global'][1] = [data_dict['aseg_global'][1][x] for x in ind_to_delete]
+
+    # For some reason, the VentricleChoroidVol is not caught by asegstats2table
+    try:
+        with open(subjectDir + '/stats/aseg.stats', 'r', encoding="utf-8") as f:
+            val=[x.split(',')[3].strip() for x in f.readlines() if 'VentricleChoroidVol' in x]
+    except:
+        val=["NaN"]
+
+    data_dict['aseg_global'][0].append('VentricleChoroidVol')
+    data_dict['aseg_global'][1].append(val[0])
+
+    data_dict['aseg_volume'] = [[],[]]
+    data_dict['aseg_volume'][0] = data_dict['aseg_1'][0][0:46]
+    data_dict['aseg_volume'][1] = data_dict['aseg_1'][1][0:46]
+
+    del data_dict['aseg_1']
+
+    #Remove the WM-hypointensities. No value in any subject
+    cols_to_remove = ['Left-WM-hypointensities', 'Right-WM-hypointensities',
+                  'Left-non-WM-hypointensities', 'Right-non-WM-hypointensities']
+
+    for key in list(data_dict.keys()):
+        if key.startswith('aseg'):
+            sub_keys_to_remove = []
+            for sub_key in data_dict[key][0]:
+                for col in cols_to_remove:
+                    if col in sub_key:
+                        sub_keys_to_remove.append(sub_key)
+
+            for sub_key in sub_keys_to_remove:
+                ind = data_dict[key][0].index(sub_key)
+                del data_dict[key][0][ind]
+                del data_dict[key][1][ind]
+
+    return data_dict
+
+
+def gen_aparc_special(data_dict, subjectDir):
+
+    struct_data = []
+
+    struct_data.append(['aparc.pial_lh_area','TotalSurface','lh.aparc.pial.stats',  'PialSurfArea'])
+    struct_data.append(['aparc.pial_rh_area','TotalSurface','rh.aparc.pial.stats',  'PialSurfArea'])
+    struct_data.append(['aparc_lh_area',     'TotalSurface','lh.aparc.stats',       'WhiteSurfArea'])
+    struct_data.append(['aparc_rh_area',     'TotalSurface','rh.aparc.stats',       'WhiteSurfArea'])
+    struct_data.append(['aparc_lh_thickness',     'GlobalMeanThickness','lh.aparc.stats','MeanThickness'])
+    struct_data.append(['aparc_rh_thickness',     'GlobalMeanThickness','rh.aparc.stats','MeanThickness'])
+
+    for elem in struct_data:
+        data_dict[elem[0]][0].append(elem[1])
+        try:
+            with open(subjectDir + '/stats/' + elem[2], 'r',
+                      encoding="utf-8") as f:
+                v = [x.split(',')[3].strip() for x in f.readlines() if elem[3] in x]
+                data_dict[elem[0]][1].append(v[0])
+        except:
+            data_dict[elem[0]][1].append('NaN')
+
+    return data_dict
+
+
+def bool_FLAIR(data_dict, subjectDir):
+    if os.path.isfile(subjectDir + '/mri/FLAIR.mgz'):
+        data_dict['FLAIR'] = [['Use-T2-FLAIR-for-FreeSurfer'],['1']]
+    else:
+        data_dict['FLAIR'] = [['Use-T2-FLAIR-for-FreeSurfer'],['0']]
+
+    return data_dict
+
+
+def gen_subsegmentation(data_dict, subjectDir, subject, ctx):
+    struct_data = {}
+    struct_data['Brainstem_global'] = [['brainstemSsVolumes.v12.txt'],5]
+    struct_data['ThalamNuclei']     = [['ThalamicNuclei.v10.T1.volumes.txt'],52]
+    struct_data['AmygNuclei_lh']    = [['lh.amygNucVolumes-T1-AN.v21.txt',
+                                        'lh.amygNucVolumes-T1.v21.txt'],10]
+    struct_data['AmygNuclei_rh']    = [['rh.amygNucVolumes-T1-AN.v21.txt',
+                                        'rh.amygNucVolumes-T1.v21.txt'],10]
+    struct_data['HippSubfield_lh']  = [['lh.hippoSfVolumes-T1-AN.v21.txt',
+                                        'lh.hippoSfVolumes-T1.v21.txt'],22]
+    struct_data['HippSubfield_rh']  = [['rh.hippoSfVolumes-T1-AN.v21.txt',
+                                        'rh.hippoSfVolumes-T1.v21.txt'],22]
+
+    for struct in struct_data.keys():
+        found = False
+        data_dict[struct] = [[],[]]
+        for fil in struct_data[struct][0]:
+            final_fil = subjectDir + '/FreeSurfer/mri/' + fil
+            if os.path.isfile(final_fil):
+                with open(final_fil, 'r',encoding="utf-8") as f:
+                    for lin in f.readlines():
+                        lin = lin.replace('\n','').split(' ')
+                        data_dict[struct][0].append(lin[0])
+                        data_dict[struct][1].append(lin[1])
+                found = True
+                break
+
+        if not found:
+            with open(ctx.get_data('FS/FS_sub_headers/' +
+                      struct + '.txt'), encoding="utf-8") as f:
+                data_dict[struct][0] = [x.replace('\n','') for x in f.readlines()]
+                data_dict[struct][0] = ['NaN'] * len(data_dict[struct][0])
+        data_dict[struct][0]=['ID'] + data_dict[struct][0]
+        data_dict[struct][1]=[subject] + data_dict[struct][1]
+    return data_dict
+
+def fix_aparc_data(data_dict, subjectDir):
+
+    # Remove the column "temporalpole" in aparc files.
+    # Unreliable measure: Very few subjects have that measure.
+    for key in list(data_dict.keys()):
+        if key.startswith('aparc'):
+            sub_keys_to_remove = []
+            for sub_key in data_dict[key][0]:
+                if 'temporalpole' in sub_key:
+                    sub_keys_to_remove.append(sub_key)
+            for sub_key in sub_keys_to_remove:
+                ind = data_dict[key][0].index(sub_key)
+
+                if ind != -1:
+                    del data_dict[key][0][ind]
+                    del data_dict[key][1][ind]
+
+
+    #Remove ETIV and BrainSegVolNotVent (They are global)
+    for key in list(data_dict.keys()):
+        if key.startswith('aparc') or key.startswith('BA_exvivo'):
+            sub_keys_to_remove = []
+            for sub_key in  data_dict[key][0]:
+                if sub_key in ['BrainSegVolNotVent' , 'eTIV']:
+                    sub_keys_to_remove.append(sub_key)
+            for sub_key in sub_keys_to_remove:
+                ind = data_dict[key][0].index(sub_key)
+                del data_dict[key][0][ind]
+                del data_dict[key][1][ind]
+
+    #Removing last colum for thickness in aparc
+    for key in list(data_dict.keys()):
+        if key.startswith('aparc') and key.endswith('thickness'):
+            sub_keys_to_remove = []
+            for sub_key in data_dict[key][0]:
+                if sub_key.endswith('MeanThickness_thickness'):
+                    sub_keys_to_remove.append(sub_key)
+            for sub_key in sub_keys_to_remove:
+                ind = data_dict[key][0].index(sub_key)
+                del data_dict[key][0][ind]
+                del data_dict[key][1][ind]
+
+    #Removing the last column for areas, also in BA
+    for key in list(data_dict.keys()):
+        if key.endswith('area'):
+            sub_keys_to_remove = []
+            for sub_key in data_dict[key][0]:
+                if sub_key.endswith('WhiteSurfArea_area'):
+                    sub_keys_to_remove.append(sub_key)
+            for sub_key in sub_keys_to_remove:
+                ind = data_dict[key][0].index(sub_key)
+                del data_dict[key][0][ind]
+                del data_dict[key][1][ind]
+
+
+    return data_dict
+
+#Remove first feature in case it is the subject ID (except for ID itself)
+def remove_first_feature(data_dict, subject):
+    for key in list(data_dict.keys()):
+        if key != "ID":
+            if (data_dict[key][1][0] == subject) or \
+               (data_dict[key][1][0] == ('FS_' + subject)) or \
+               (data_dict[key][1][0] == ''):
+                del data_dict[key][0][0]
+                del data_dict[key][1][0]
+    return data_dict
+
+def fix_headers(data_dict):
+
+    #Applying some general replacing rules for the categories
+    replace_rules = [['.', '-'],
+                     ['BA_exvivo', 'BA-exvivo'],
+                     ['AmygNuclei_lh', 'AmygNuclei_lh_volume'],
+                     ['AmygNuclei_rh', 'AmygNuclei_rh_volume'],
+                     ['Brainstem_global', 'Brainstem_global_volume'],
+                     ['HippSubfield_lh', 'HippSubfield_lh_volume'],
+                     ['HippSubfield_rh', 'HippSubfield_rh_volume'],
+                     ['wg_lh_mean','wg_lh_intensity-contrast'],
+                     ['wg_rh_mean','wg_rh_intensity-contrast'],
+                     ['aparc_lh', 'aparc-Desikan_lh'],
+                     ['aparc_rh', 'aparc-Desikan_rh'],
+                     ['FLAIR','Use-T2-FLAIR-for-FreeSurfer']]
+
+    for key in list(data_dict.keys()):
+        new_key=key
+        for rule in replace_rules:
+            if rule[0] in new_key:
+                new_key = new_key.replace(rule[0],rule[1])
+                data_dict[new_key] = data_dict.pop(key)
+
+    #Renaming some special cases
+    structs = [['aseg_global', 'lhSurfaceHoles', 'aseg_lh_number', 'HolesBeforeFixing'],
+               ['aseg_global', 'rhSurfaceHoles', 'aseg_rh_number', 'HolesBeforeFixing'],
+               ['aseg_global', 'BrainSegVol-to-eTIV', 'aseg_global_volume-ratio', 'BrainSegVol-to-eTIV'],
+               ['aseg_global', 'MaskVol-to-eTIV', 'aseg_global_volume-ratio', 'MaskVol-to-eTIV']]
+
+    for struct in structs:
+        index = data_dict[struct[0]][0].index(struct[1])
+        if struct[2] not in data_dict.keys():
+            data_dict[struct[2]] = [struct[3]],[data_dict[struct[0]][1][index]]
+        else:
+            data_dict[struct[2]][0].append(struct[3])
+            data_dict[struct[2]][1].append(data_dict[struct[0]][1][index])
+        del data_dict[struct[0]][0][index]
+        del data_dict[struct[0]][1][index]
+
+
+
+    for metric in ['volume', 'intensity']:
+        old_key = 'aseg_' + metric
+        for i in range(len(data_dict[old_key][0])):
+            if 'Left-' in data_dict[old_key][0][i]:
+                new_name = data_dict[old_key][0][i].replace('Left-','')
+                new_key = 'aseg_lh_' + metric
+
+                if new_key not in data_dict.keys():
+                    data_dict[new_key] = [[],[]]
+                data_dict[new_key][0].append(new_name)
+                data_dict[new_key][1].append(data_dict[old_key][1][i])
+            elif 'Right-' in data_dict[old_key][0][i]:
+                new_name = data_dict[old_key][0][i].replace('Right-','')
+                new_key = 'aseg_rh_' + metric
+
+                if new_key not in data_dict.keys():
+                    data_dict[new_key] = [[],[]]
+                data_dict[new_key][0].append(new_name)
+                data_dict[new_key][1].append(data_dict[old_key][1][i])
+            else:
+                new_name = data_dict[old_key][0][i]
+                new_key = 'aseg_global_' + metric
+                if new_key not in data_dict.keys():
+                    data_dict[new_key] = [[],[]]
+                data_dict[new_key][0].append(new_name)
+                data_dict[new_key][1].append(data_dict[old_key][1][i])
+
+        del data_dict[old_key]
+
+    for i in range(len(data_dict['aseg_global'][0])):
+        if data_dict['aseg_global'][0][i].startswith('lh'):
+            new_name = data_dict['aseg_global'][0][i].replace('lh','').replace('Vol','')
+            data_dict['aseg_lh_volume'][0].append(new_name)
+            data_dict['aseg_lh_volume'][1].append(data_dict['aseg_global'][1][i])
+        elif data_dict['aseg_global'][0][i].startswith('rh'):
+            new_name = data_dict['aseg_global'][0][i].replace('rh','').replace('Vol','')
+            data_dict['aseg_rh_volume'][0].append(new_name)
+            data_dict['aseg_rh_volume'][1].append(data_dict['aseg_global'][1][i])
+        else:
+            new_name = data_dict['aseg_global'][0][i].replace('Vol','')
+            data_dict['aseg_global_volume'][0].append(new_name)
+            data_dict['aseg_global_volume'][1].append(data_dict['aseg_global'][1][i])
+
+    del data_dict['aseg_global']
+
+    # Split ThalamNuclei into Left and Right
+    data_dict['ThalamNuclei_lh_volume'] = [[],[]]
+    data_dict['ThalamNuclei_rh_volume'] = [[],[]]
+    for i in range(len(data_dict['ThalamNuclei'][0])):
+        if "Left" in data_dict['ThalamNuclei'][0][i]:
+            new_name = data_dict['ThalamNuclei'][0][i].replace('Left-','')
+            data_dict['ThalamNuclei_lh_volume'][0].append(new_name)
+            data_dict['ThalamNuclei_lh_volume'][1].append(data_dict['ThalamNuclei'][1][i])
+
+        elif "Right" in data_dict['ThalamNuclei'][0][i]:
+            new_name = data_dict['ThalamNuclei'][0][i].replace('Right-','')
+            data_dict['ThalamNuclei_rh_volume'][0].append(new_name)
+            data_dict['ThalamNuclei_rh_volume'][1].append(data_dict['ThalamNuclei'][1][i])
+    del data_dict['ThalamNuclei']
+
+    #Removing redundant prefix and sufix in BA_excvivo
+    BAs = ['_exvivo_area','_exvivo_thickness', '_exvivo_volume']
+    BA_keys = [key for key in list(data_dict.keys()) if key.startswith('BA-exvivo')]
+    for key in BA_keys:
+        for i in range(len(data_dict[key][0])):
+            for BA in BAs:
+                data_dict[key][0][i] = data_dict[key][0][i].replace(BA,'').replace('rh_','').replace('lh_','')
+
+    #Removing redundant prefix and sufix in aparc
+    aparcs = ['_area','_thickness', '_volume','-area','-thickness', '-volume']
+    aparc_keys = [key for key in list(data_dict.keys()) if key.startswith('aparc')]
+    for key in aparc_keys:
+        for i in range(len(data_dict[key][0])):
+            for aparc in aparcs:
+                data_dict[key][0][i] = data_dict[key][0][i].replace(aparc,'').replace('rh_','').replace('lh_','')
+
+    #Changing weird and inconsistent characters
+    for key in list(data_dict.keys()):
+        for i in range(len(data_dict[key][0])):
+            data_dict[key][0][i] = data_dict[key][0][i].replace('_','-').replace('&','+')
+
+    return data_dict
+
+
+def save_data_NaNs(SUBJECTS_DIR, ctx):
+
+    with open(ctx.get_data('FS/FS_headers.txt'), encoding="utf-8") as f:
+        final_headers = [x.replace('\n','') for x in f.readlines()]
+
+    num_NaNs = len(final_headers) - 1
+
+    with open(SUBJECTS_DIR+'/IDP_files/FS_IDPs.txt','w',encoding="utf-8") as f:
+        values = ['NaN'] * num_NaNs
+        values_str = SUBJECTS_DIR + " " + " ".join(values)
+        f.write(f"{values_str}\n")
+        f.close()
+
+
+def save_data(data_dict, SUBJECTS_DIR, ctx):
+
+    with open(ctx.get_data('FS/FS_headers.txt'), encoding="utf-8") as f:
+        final_headers = [x.replace('\n','') for x in f.readlines()]
+
+    temp_headers={}
+
+    for key in list(data_dict.keys()):
+        if key in ['ID', 'Use-T2-FLAIR-for-FreeSurfer']:
+            temp_headers[key] = data_dict[key][1][0]
+        else:
+            for i in range(len(data_dict[key][0])):
+                temp_headers[key+"_"+data_dict[key][0][i]] = data_dict[key][1][i]
+
+    for x in final_headers:
+        if x not in temp_headers.keys():
+            temp_headers[x] = "NaN"
+
+    with open(SUBJECTS_DIR+'/IDP_files/FS_IDPs.txt','w',encoding="utf-8") as f:
+        values = [temp_headers[x] for x in final_headers]
+        values_str = " ".join(values)
+        f.write(f"{values_str}\n")
+        f.close()
+
+
+def save_headers_info(data_dict, SUBJECTS_DIR, ctx):
+
+    with open(ctx.get_data('FS/FS_final_headers.txt'), encoding="utf-8") as f:
+        final_headers = [x.replace('\n','') for x in f.readlines()]
+
+    temp_headers={}
+
+    for key in list(data_dict.keys()):
+        if key in ['ID', 'Use-T2-FLAIR-for-FreeSurfer']:
+            temp_headers[key] = data_dict[key][1][0]
+        else:
+            for i in range(len(data_dict[key][0])):
+                temp_headers[key+"_"+data_dict[key][0][i]] = data_dict[key][1][i]
+
+    for x in final_headers:
+        if x not in temp_headers.keys():
+            temp_headers[x] = "NaN"
+
+    with open(SUBJECTS_DIR + '/IDP_files/FS_headers_info.txt','w',
+              encoding="utf-8") as f:
+        values = [temp_headers[x] for x in final_headers]
+        values_str = " ".join(values)
+        f.write(f"{values_str}\n")
+
+        f.close()
+
+def bb_FS_get_IDPs(ctx, env):
+
+    subject    = ctx.subject
+    subject_ID = 'FreeSurfer'
+    subjectDir = env['SUBJECTS_DIR']
+    dataDir    = subjectDir + '/data/'
+    headersDir = subjectDir + '/headers/'
+
+    #TODO: Raise an exception
+    if not os.path.isdir(subjectDir):
+        print("Error: FreeSurfer has not been run on this subject")
+        sys.exit(-1)
+
+    check_and_create_dir(dataDir)
+    check_and_create_dir(headersDir)
+
+    data_dict = generate_FS_IDP_files(subjectDir, subject_ID, subject, dataDir,
+                                      headersDir, ctx, env)
+    data_dict = fix_aseg_data(data_dict, subjectDir)
+    data_dict = gen_aparc_special(data_dict, subjectDir)
+    data_dict = gen_subsegmentation(data_dict, subjectDir, subject, ctx)
+    data_dict = bool_FLAIR(data_dict, subjectDir)
+    data_dict = fix_aparc_data(data_dict,subjectDir)
+    data_dict = remove_first_feature(data_dict, subject)
+    data_dict = fix_headers(data_dict)
+
+    check_consistency(data_dict, subjectDir, ctx)
+    save_data(data_dict, subjectDir, ctx)
+
+
+def run(ctx,
+        ThalamicNuclei:             In,
+        rh_entorhinal_exvivo_label: In,
+        logs_dir:                   Ref,
+        FS_IDPs:                    Out):
+
+    with redirect_logging('FS_get_IDPs', outdir=logs_dir):
+
+        env = dict(os.environ, SUBJECTS_DIR=os.getcwd() + "/" + ctx.subject)
+
+        bb_FS_get_IDPs(ctx, env)
diff --git a/bip/pipelines/struct_FS/FS_proc.py b/bip/pipelines/struct_FS/FS_proc.py
index a4bb840b7b3d0993a7d37c1fab21f36afe86f9de..7e3f834b4ea26bccf3a485328ff1e0a55641ff25 100755
--- a/bip/pipelines/struct_FS/FS_proc.py
+++ b/bip/pipelines/struct_FS/FS_proc.py
@@ -26,7 +26,7 @@ def run(ctx,
         fsaverage:                  Ref,
         rh_entorhinal_exvivo_label: Out):
 
-    with redirect_logging('FS_proc', outdir=logs_dir):
+    with redirect_logging(job_name(run), outdir=logs_dir):
 
         # We need to delete the folder because otherwise, FreeSurfer complains
         if os.path.exists(FreeSurfer_dir):
diff --git a/bip/pipelines/struct_FS/FS_segm.py b/bip/pipelines/struct_FS/FS_segm.py
index 27edcade7dc11a6b7ff15a27d2ce9b2d56243db7..0e646224685d391394ca4f0749992b9200356673 100755
--- a/bip/pipelines/struct_FS/FS_segm.py
+++ b/bip/pipelines/struct_FS/FS_segm.py
@@ -25,7 +25,7 @@ def run(ctx,
         FreeSurfer_dir:             Ref,
         ThalamicNuclei:             Out):
 
-    with redirect_logging('FS_segm', outdir=logs_dir):
+    with redirect_logging(job_name(run), outdir=logs_dir):
 
         env = dict(os.environ, SUBJECTS_DIR=os.getcwd() + "/" + ctx.subject)
 
diff --git a/bip/pipelines/struct_FS/__init__.py b/bip/pipelines/struct_FS/__init__.py
new file mode 100755
index 0000000000000000000000000000000000000000..dde0405e44debc0b4b67431c1b3747e734dbdd66
--- /dev/null
+++ b/bip/pipelines/struct_FS/__init__.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python
+#
+# struct_FS.py - Pipeline with the FreeSurfer processing.
+#
+# Author: Fidel Alfaro Almagro <fidel.alfaroalmagro@ndcn.ox.ac.uk>
+# Author: Paul McCarthy <pauldmccarthy@gmail.com>
+# Author: Michiel Cottaar <michiel.cottaar@ndcn.ox.ac.uk>
+#
+# pylint: disable=C0103,E0602,C0114,C0115,C0116,R0913,R0914,R0915
+# pylint: disable=W0613
+#
+
+import logging
+from bip.utils.log_utils     import job_name
+from bip.pipelines.struct_FS import FS_proc, FS_segm, FS_get_IDPs
+
+log = logging.getLogger(__name__)
+
+def add_to_pipeline(ctx, pipe, tree):
+
+    subj = ctx.subject
+
+    pipe(FS_proc.run,
+         submit=dict(jobtime=200, name=job_name(FS_proc.run, subj)),
+         kwargs={'ctx' : ctx})
+    pipe(FS_segm.run,
+         submit=dict(jobtime=200, name=job_name(FS_segm.run, subj)),
+         kwargs={'ctx' : ctx})
+    pipe(FS_get_IDPs.run,
+         submit=dict(jobtime=200, name=job_name(FS_get_IDPs.run, subj)),
+         kwargs={'ctx' : ctx})
diff --git a/bip/pipelines/struct_FS/struct_FS.py b/bip/pipelines/struct_FS/struct_FS.py
deleted file mode 100755
index 03f4172d0051a41140ee4df081f61918ab5b9c61..0000000000000000000000000000000000000000
--- a/bip/pipelines/struct_FS/struct_FS.py
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/usr/bin/env python
-#
-# struct_FS.py - Pipeline with the FreeSurfer processing.
-#
-# Author: Fidel Alfaro Almagro <fidel.alfaroalmagro@ndcn.ox.ac.uk>
-# Author: Paul McCarthy <pauldmccarthy@gmail.com>
-# Author: Michiel Cottaar <michiel.cottaar@ndcn.ox.ac.uk>
-#
-# pylint: disable=C0103,E0602,C0114,C0115,C0116,R0913,R0914,R0915
-#
-
-import logging
-from bip.utils.log_utils     import redirect_logging
-from bip.pipelines.struct_FS import FS_proc, FS_segm, FS_get_IDPs
-
-log = logging.getLogger(__name__)
-
-def add_to_pipeline(ctx, pipe, tree, targets):
-
-    logs_dir=tree.get('logs_dir')
-
-    subj = ctx.subject 
-
-    with redirect_logging('pipe_struct_FS', outdir=logs_dir):
-        pipe(FS_proc.run,
-             submit=dict(jobtime=200, name="BIP_FS_proc_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('rh_entorhinal_exvivo_label')
-        pipe(FS_segm.run,
-             submit=dict(jobtime=200, name="BIP_FS_segm_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('ThalamicNuclei')
-        pipe(FS_get_IDPs.run, 
-             submit=dict(jobtime=200, name="BIP_FS_get_IDPs_" + subj), 
-             kwargs={'ctx' : ctx})
-        targets.append('FS_IDPs')
-
-    return pipe, targets
diff --git a/bip/pipelines/struct_T1/T1_QC_CNR_corners.py b/bip/pipelines/struct_T1/T1_QC_CNR_corners.py
index aa1144ab3a1cd49d1704aef64a3512e0966802a6..dd3d960c1f8c464f42f9534c57fc54bfad7ba486 100755
--- a/bip/pipelines/struct_T1/T1_QC_CNR_corners.py
+++ b/bip/pipelines/struct_T1/T1_QC_CNR_corners.py
@@ -30,7 +30,7 @@ def run(ctx,
         logs_dir:                Ref,
         tmp_dir:                 Ref):
 
-    with redirect_logging('T1_QC_CNR_corners', outdir=logs_dir),\
+    with redirect_logging(job_name(run), outdir=logs_dir),\
          tempdir(tmp_dir):
 
         roi_1_1     = tmp_dir + '/roi_1_1.nii.gz'
diff --git a/bip/pipelines/struct_T1/T1_QC_CNR_eyes.py b/bip/pipelines/struct_T1/T1_QC_CNR_eyes.py
index d4a3eabc100d2d897528ab3ff22546faa04556eb..fac3844700b7552653e4761bc4592de8421802af 100755
--- a/bip/pipelines/struct_T1/T1_QC_CNR_eyes.py
+++ b/bip/pipelines/struct_T1/T1_QC_CNR_eyes.py
@@ -40,7 +40,7 @@ def run(ctx,
         logs_dir:              Ref,
         tmp_dir:               Ref):
 
-    with redirect_logging('T1_QC_CNR_eyes', outdir=logs_dir),\
+    with redirect_logging(job_name(run), outdir=logs_dir),\
         tempdir(tmp_dir):
 
         T1_brain_GM_mask_orig = tmp_dir + '/T1_brain_GM_mask_orig.nii.gz'
diff --git a/bip/pipelines/struct_T1/T1_QC_COG.py b/bip/pipelines/struct_T1/T1_QC_COG.py
index 3e48b4e572ede426a4c9b2cb8e455f86bac7a69d..7236b28349407422b7780455aea77591b9e6a6fe 100755
--- a/bip/pipelines/struct_T1/T1_QC_COG.py
+++ b/bip/pipelines/struct_T1/T1_QC_COG.py
@@ -24,7 +24,7 @@ def run(ctx,
         logs_dir:      Ref,
         tmp_dir:       Ref):
 
-    with redirect_logging('T1_QC_COG', outdir=logs_dir),\
+    with redirect_logging(job_name(run), outdir=logs_dir),\
          tempdir(tmp_dir):
 
         T1_tmp_5 = tmp_dir + '/T1_tmp_5.nii.gz'
diff --git a/bip/pipelines/struct_T1/T1_brain_extract.py b/bip/pipelines/struct_T1/T1_brain_extract.py
index 749e59acb6ca8fa28fc933dbaa64623d4df5df06..c068ef1413328765a0825c2567d30866a6f5703b 100755
--- a/bip/pipelines/struct_T1/T1_brain_extract.py
+++ b/bip/pipelines/struct_T1/T1_brain_extract.py
@@ -40,7 +40,7 @@ def run(ctx,
         T1_to_MNI_warp_coef:     Out,
         T1_to_MNI_warp_jac:      Out):
 
-    with redirect_logging('T1_brain_extract', outdir=logs_dir),tempdir(tmp_dir):
+    with redirect_logging(job_name(run), outdir=logs_dir), tempdir(tmp_dir):
 
         T1_tmp_1                  = tmp_dir + '/T1_tmp_1.nii.gz'
         T1_tmp_2                  = tmp_dir + '/T1_tmp_2.nii.gz'
diff --git a/bip/pipelines/struct_T1/T1_defacing.py b/bip/pipelines/struct_T1/T1_defacing.py
index 67a38c1e69cfca947428597e7e70d6551b272944..129c20e919f607883ee0515197de5cf47faf87a8 100755
--- a/bip/pipelines/struct_T1/T1_defacing.py
+++ b/bip/pipelines/struct_T1/T1_defacing.py
@@ -28,7 +28,7 @@ def run(ctx,
         logs_dir:                          Ref,
         tmp_dir:                           Ref):
 
-    with redirect_logging('T1_defacing', outdir=logs_dir),\
+    with redirect_logging(job_name(run), outdir=logs_dir),\
          tempdir(tmp_dir):
 
         T1_tmp_4                  = tmp_dir + '/T1_tmp_4.nii.gz'
diff --git a/bip/pipelines/struct_T1/T1_fast.py b/bip/pipelines/struct_T1/T1_fast.py
index c80d298a34a127ff84149a437d376e2689861602..db37b28f528ba73384cfe7cc304193f5a698f42b 100755
--- a/bip/pipelines/struct_T1/T1_fast.py
+++ b/bip/pipelines/struct_T1/T1_fast.py
@@ -33,7 +33,7 @@ def run(ctx,
         T1_fast_WM_mask:    Out,
         T1_fast_brain_bias: Out):
 
-    with redirect_logging('T1_fast', outdir=logs_dir):
+    with redirect_logging(job_name(run), outdir=logs_dir):
 
         #Run fast
         wrappers.fast(T1_brain, out = T1_fast_dir + '/T1_brain', b=True)
diff --git a/bip/pipelines/struct_T1/T1_first.py b/bip/pipelines/struct_T1/T1_first.py
index 71f15d69f89a704b878513a3052c46ae82749b17..cb39f91e9efabc785e123cdcfefe3714efe3e4f9 100755
--- a/bip/pipelines/struct_T1/T1_first.py
+++ b/bip/pipelines/struct_T1/T1_first.py
@@ -27,7 +27,7 @@ def run(ctx,
         logs_dir:                   Ref,
         T1_first_all_fast_firstseg: Out):
 
-    with redirect_logging('T1_first', outdir=logs_dir):
+    with redirect_logging(job_name(run), outdir=logs_dir):
 
         # Creates a link inside T1_first to T1_unbiased_brain.nii.gz
         if not os.path.exists(T1_first_unbiased_brain):
diff --git a/bip/pipelines/struct_T1/T1_gdc.py b/bip/pipelines/struct_T1/T1_gdc.py
index 35cc6bd5e18b527ab08f4c296b2d7054c893142a..a39aed50a2be5e69ef45e5797af419e4ef25da75 100755
--- a/bip/pipelines/struct_T1/T1_gdc.py
+++ b/bip/pipelines/struct_T1/T1_gdc.py
@@ -12,7 +12,7 @@
 import logging
 from shutil import copyfile
 from pipe_tree import In, Out, Ref
-from bip.utils.log_utils import redirect_logging
+from bip.utils.log_utils import redirect_logging, job_name
 from gradunwarp.core.gradient_unwarp_apply import gradient_unwarp_apply
 
 log = logging.getLogger(__name__)
@@ -24,7 +24,7 @@ def run(ctx,
         logs_dir:                Ref,
         T1_GDC:                  Ref):
 
-    with redirect_logging('T1_gdc', outdir=logs_dir):
+    with redirect_logging(job_name(run), outdir=logs_dir):
 
         if ctx.gdc != '':
             #Calculate and apply the Gradient Distortion Unwarp
diff --git a/bip/pipelines/struct_T1/T1_sienax.py b/bip/pipelines/struct_T1/T1_sienax.py
index ba31328cef99d145e7fe05c48b951db99772a151..6d2be868341c150b52c21f877bbbc97bc2fc8397 100755
--- a/bip/pipelines/struct_T1/T1_sienax.py
+++ b/bip/pipelines/struct_T1/T1_sienax.py
@@ -39,7 +39,7 @@ def run(ctx,
         logs_dir:                            Ref,
         tmp_dir:                             Ref):
 
-    with redirect_logging('T1_sienax', outdir=logs_dir),\
+    with redirect_logging(job_name(run), outdir=logs_dir),\
          tempdir(tmp_dir):
 
         T1_tmp_mat_1 = tmp_dir + '/tmp_mat_1.mat'
diff --git a/bip/pipelines/struct_T1/__init__.py b/bip/pipelines/struct_T1/__init__.py
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..1aef23511c22b724e83a0d080e177f5388164963 100755
--- a/bip/pipelines/struct_T1/__init__.py
+++ b/bip/pipelines/struct_T1/__init__.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+#
+# struct_T1.py - Pipeline with the T1w processing.
+#
+# Author: Fidel Alfaro Almagro <fidel.alfaroalmagro@ndcn.ox.ac.uk>
+# Author: Paul McCarthy <pauldmccarthy@gmail.com>
+# Author: Michiel Cottaar <michiel.cottaar@ndcn.ox.ac.uk>
+#
+# pylint: disable=C0103,E0602,C0114,C0115,C0116,R0913,R0914,R0915
+# pylint: disable=W0613
+#
+
+import logging
+from bip.utils.log_utils     import job_name
+from bip.pipelines.struct_T1 import T1_gdc, T1_brain_extract, T1_defacing
+from bip.pipelines.struct_T1 import T1_fast, T1_first, T1_sienax, T1_QC_COG
+from bip.pipelines.struct_T1 import T1_QC_CNR_corners, T1_QC_CNR_eyes
+
+log = logging.getLogger(__name__)
+
+def add_to_pipeline(ctx, pipe, tree):
+
+    subj = ctx.subject
+
+    pipe(T1_gdc.run,
+         submit=dict(jobtime=200, name=job_name(T1_gdc.run, subj)),
+         kwargs={'ctx' : ctx})
+    pipe(T1_brain_extract.run,
+         submit=dict(jobtime=200, name=job_name(T1_brain_extract.run, subj)),
+         kwargs={'ctx' : ctx})
+    pipe(T1_defacing.run,
+         submit=dict(jobtime=200, name=job_name(T1_defacing.run, subj)),
+         kwargs={'ctx' : ctx})
+    pipe(T1_fast.run,
+         submit=dict(jobtime=200, name=job_name(T1_fast.run, subj)),
+         kwargs={'ctx' : ctx})
+    pipe(T1_first.run,
+         submit=dict(jobtime=200, name=job_name(T1_first.run, subj)),
+         kwargs={'ctx' : ctx})
+    pipe(T1_sienax.run,
+         submit=dict(jobtime=200, name=job_name(T1_sienax.run, subj)),
+         kwargs={'ctx' : ctx})
+    pipe(T1_QC_COG.run,
+         submit=dict(jobtime=200, name=job_name(T1_QC_COG.run, subj)),
+         kwargs={'ctx' : ctx})
+    pipe(T1_QC_CNR_corners.run,
+         submit=dict(jobtime=200, name=job_name(T1_QC_CNR_corners.run, subj)),
+         kwargs={'ctx' : ctx})
+    pipe(T1_QC_CNR_eyes.run,
+         submit=dict(jobtime=200, name=job_name(T1_QC_CNR_eyes.run, subj)),
+         kwargs={'ctx' : ctx})
diff --git a/bip/pipelines/struct_T1/struct_T1.py b/bip/pipelines/struct_T1/struct_T1.py
deleted file mode 100755
index b4c6a77740fcb9956053293c1d0e29b39a4ea8e1..0000000000000000000000000000000000000000
--- a/bip/pipelines/struct_T1/struct_T1.py
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/usr/bin/env python
-#
-# struct_T1.py - Pipeline with the T1w processing.
-#
-# Author: Fidel Alfaro Almagro <fidel.alfaroalmagro@ndcn.ox.ac.uk>
-# Author: Paul McCarthy <pauldmccarthy@gmail.com>
-# Author: Michiel Cottaar <michiel.cottaar@ndcn.ox.ac.uk>
-#
-# pylint: disable=C0103,E0602,C0114,C0115,C0116,R0913,R0914,R0915
-#
-
-import logging
-from bip.utils.log_utils     import redirect_logging
-from bip.pipelines.struct_T1 import T1_gdc, T1_brain_extract, T1_defacing
-from bip.pipelines.struct_T1 import T1_fast, T1_first, T1_sienax, T1_QC_COG
-from bip.pipelines.struct_T1 import T1_QC_CNR_corners, T1_QC_CNR_eyes
-
-log = logging.getLogger(__name__)
-
-def add_to_pipeline(ctx, pipe, tree, targets):
-
-    logs_dir=tree.get('logs_dir')
-
-    subj = ctx.subject 
-
-    with redirect_logging('pipe_struct_T1', outdir=logs_dir):
-        pipe(T1_gdc.run,
-             submit=dict(jobtime=200, name="BIP_T1_gdc_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('T1_orig_ud_warp')
-        pipe(T1_brain_extract.run,
-             submit=dict(jobtime=200, name="BIP_T1_brain_extract_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('T1_brain')
-        pipe(T1_defacing.run,
-             submit=dict(jobtime=200, name="BIP_T1_defacing_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('T1_QC_face_mask_inside_brain_mask')
-        pipe(T1_fast.run,
-             submit=dict(jobtime=200, name="BIP_T1_fast_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('T1_unbiased_brain')
-        pipe(T1_first.run,
-             submit=dict(jobtime=200, name="BIP_T1_first_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('T1_first_all_fast_firstseg')
-        pipe(T1_sienax.run,
-             submit=dict(jobtime=200, name="BIP_T1_sienax_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('T1_sienax_report')
-        pipe(T1_QC_COG.run,
-             submit=dict(jobtime=200, name="BIP_T1_QC_COG_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('T1_QC_COG')
-        pipe(T1_QC_CNR_corners.run,
-             submit=dict(jobtime=200, name="BIP_T1_QC_CNR_corners_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('T1_notNorm_QC_CNR_upper')
-        pipe(T1_QC_CNR_eyes.run,
-             submit=dict(jobtime=200, name="BIP_T1_QC_CNR_eyes_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('T1_QC_CNR_eyes')
-
-    return pipe, targets
diff --git a/bip/pipelines/struct_T2_FLAIR/T2_FLAIR_apply_bfc.py b/bip/pipelines/struct_T2_FLAIR/T2_FLAIR_apply_bfc.py
index 5c80b1d277c81cde8e96c664115e4e9737a934ce..a9ba82cccefde53aac5f4dfee04525362e464e54 100755
--- a/bip/pipelines/struct_T2_FLAIR/T2_FLAIR_apply_bfc.py
+++ b/bip/pipelines/struct_T2_FLAIR/T2_FLAIR_apply_bfc.py
@@ -27,7 +27,7 @@ def run(ctx,
         T2_FLAIR_unbiased:       Out,
         T2_FLAIR_unbiased_brain: Out):
 
-    with redirect_logging('T2_FLAIR_apply_bfc', outdir=logs_dir):
+    with redirect_logging(job_name(run), outdir=logs_dir):
 
         #Apply bias field correction to T2_FLAIR warped
         if os.path.isfile(T1_fast_brain_bias):
diff --git a/bip/pipelines/struct_T2_FLAIR/T2_FLAIR_bianca.py b/bip/pipelines/struct_T2_FLAIR/T2_FLAIR_bianca.py
index ef881a9bbaa2ff957f786658a26a690e15ac05aa..d85c2745e45bc6a7b8b619bc58e3a80a0fb3e96c 100755
--- a/bip/pipelines/struct_T2_FLAIR/T2_FLAIR_bianca.py
+++ b/bip/pipelines/struct_T2_FLAIR/T2_FLAIR_bianca.py
@@ -44,7 +44,7 @@ def run(ctx,
         T2_FLAIR_bianca_tot_pvent_deep_volume: Out,
         T2_FLAIR_bianca_volume:                Out):
 
-    with redirect_logging('T2_FLAIR_bianca', outdir=logs_dir):
+    with redirect_logging(job_name(run), outdir=logs_dir):
 
         # Create bianca mask
         wrappers.make_bianca_mask(T1_unbiased, T1_fast_pve_0,
diff --git a/bip/pipelines/struct_T2_FLAIR/T2_FLAIR_brain_extract.py b/bip/pipelines/struct_T2_FLAIR/T2_FLAIR_brain_extract.py
index 62dda36448c77d1aea894c1560f2d5cec43933f9..1fba52cd703e7275591c1bcc2cde5461ba46ca71 100755
--- a/bip/pipelines/struct_T2_FLAIR/T2_FLAIR_brain_extract.py
+++ b/bip/pipelines/struct_T2_FLAIR/T2_FLAIR_brain_extract.py
@@ -38,7 +38,7 @@ def run(ctx,
         logs_dir:                           Ref,
         tmp_dir:                            Ref):
 
-    with redirect_logging('T2_FLAIR_brain_extract', outdir=logs_dir),\
+    with redirect_logging(job_name(run), outdir=logs_dir),\
          tempdir(tmp_dir):
 
         T2_FLAIR_tmp_1_mat = tmp_dir + '/T2_FLAIR_tmp_1.mat'
diff --git a/bip/pipelines/struct_T2_FLAIR/T2_FLAIR_defacing.py b/bip/pipelines/struct_T2_FLAIR/T2_FLAIR_defacing.py
index aa64af2806a7a8fce0076d98a095266026191f4c..d06e5f2aa1b1a8bf19da9ab71cd42f7da7161923 100755
--- a/bip/pipelines/struct_T2_FLAIR/T2_FLAIR_defacing.py
+++ b/bip/pipelines/struct_T2_FLAIR/T2_FLAIR_defacing.py
@@ -31,7 +31,7 @@ def run(ctx,
         logs_dir:                         Ref,
         tmp_dir:                          Ref):
 
-    with redirect_logging('T2_FLAIR_defacing', outdir=logs_dir),\
+    with redirect_logging(job_name(run), outdir=logs_dir),\
          tempdir(tmp_dir):
 
         T2_FLAIR_tmp_1    = tmp_dir + '/T2_FLAIR_tmp_1nii.gz'
diff --git a/bip/pipelines/struct_T2_FLAIR/T2_FLAIR_gdc.py b/bip/pipelines/struct_T2_FLAIR/T2_FLAIR_gdc.py
index d772bd1ca1db6495aa20ce5dfc2a5edfc1081338..7d1fed84d9a2baf2897a3dc0bc8c4078cb65c8c2 100755
--- a/bip/pipelines/struct_T2_FLAIR/T2_FLAIR_gdc.py
+++ b/bip/pipelines/struct_T2_FLAIR/T2_FLAIR_gdc.py
@@ -25,7 +25,7 @@ def run(ctx,
         logs_dir:              Ref,
         T2_FLAIR_GDC:          Ref):
 
-    with redirect_logging('T2_FLAIR_gdc', outdir=logs_dir):
+    with redirect_logging(job_name(run), outdir=logs_dir):
 
         if ctx.gdc != '':
             #Calculate and apply the Gradient Distortion Unwarp
diff --git a/bip/pipelines/struct_T2_FLAIR/__init__.py b/bip/pipelines/struct_T2_FLAIR/__init__.py
old mode 100644
new mode 100755
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..cd379c1c117ab1f864c3afdd00a651b42c1e8d7a
--- a/bip/pipelines/struct_T2_FLAIR/__init__.py
+++ b/bip/pipelines/struct_T2_FLAIR/__init__.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+#
+# struct_T2_FLAIR.py - Pipeline with the T2 FLAIR processing.
+#
+# Author: Fidel Alfaro Almagro <fidel.alfaroalmagro@ndcn.ox.ac.uk>
+# Author: Paul McCarthy <pauldmccarthy@gmail.com>
+# Author: Michiel Cottaar <michiel.cottaar@ndcn.ox.ac.uk>
+#
+# pylint: disable=C0103,E0602,C0114,C0115,C0116,R0913,R0914,R0915
+# pylint: disable=W0613
+#
+
+import logging
+from bip.utils.log_utils           import job_name
+from bip.pipelines.struct_T2_FLAIR import T2_FLAIR_gdc, T2_FLAIR_brain_extract
+from bip.pipelines.struct_T2_FLAIR import T2_FLAIR_defacing, T2_FLAIR_apply_bfc
+from bip.pipelines.struct_T2_FLAIR import T2_FLAIR_bianca
+
+log = logging.getLogger(__name__)
+
+def add_to_pipeline(ctx, pipe, tree):
+
+    subj = ctx.subject
+
+    pipe(T2_FLAIR_gdc.run,
+         submit=dict(jobtime=200, name=job_name(T2_FLAIR_gdc.run, subj)),
+         kwargs={'ctx' : ctx})
+    pipe(T2_FLAIR_brain_extract.run,
+         submit=dict(jobtime=200, name=job_name(T2_FLAIR_brain_extract.run,
+                                                subj)),
+         kwargs={'ctx' : ctx})
+    pipe(T2_FLAIR_defacing.run,
+         submit=dict(jobtime=200, name=job_name(T2_FLAIR_defacing.run, subj)),
+         kwargs={'ctx' : ctx})
+    pipe(T2_FLAIR_apply_bfc.run,
+         submit=dict(jobtime=200, name=job_name(T2_FLAIR_apply_bfc.run, subj)),
+         kwargs={'ctx' : ctx})
+    pipe(T2_FLAIR_bianca.run,
+         submit=dict(jobtime=200, name=job_name(T2_FLAIR_bianca.run, subj)),
+         kwargs={'ctx' : ctx})
diff --git a/bip/pipelines/struct_T2_FLAIR/struct_T2_FLAIR.py b/bip/pipelines/struct_T2_FLAIR/struct_T2_FLAIR.py
deleted file mode 100755
index 79138dcf30ff43baa1bdff16a172c5cbef2eed82..0000000000000000000000000000000000000000
--- a/bip/pipelines/struct_T2_FLAIR/struct_T2_FLAIR.py
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/usr/bin/env python
-#
-# struct_T2_FLAIR.py - Pipeline with the T2 FLAIR processing.
-#
-# Author: Fidel Alfaro Almagro <fidel.alfaroalmagro@ndcn.ox.ac.uk>
-# Author: Paul McCarthy <pauldmccarthy@gmail.com>
-# Author: Michiel Cottaar <michiel.cottaar@ndcn.ox.ac.uk>
-#
-# pylint: disable=C0103,E0602,C0114,C0115,C0116,R0913,R0914,R0915
-#
-
-import logging
-from bip.utils.log_utils           import redirect_logging
-from bip.pipelines.struct_T2_FLAIR import T2_FLAIR_gdc, T2_FLAIR_brain_extract
-from bip.pipelines.struct_T2_FLAIR import T2_FLAIR_defacing, T2_FLAIR_apply_bfc
-from bip.pipelines.struct_T2_FLAIR import T2_FLAIR_bianca
-
-log = logging.getLogger(__name__)
-
-def add_to_pipeline(ctx, pipe, tree, targets):
-
-    logs_dir=tree.get('logs_dir')
-
-    subj = ctx.subject 
-
-    with redirect_logging('pipe_struct_T2_FLAIR', outdir=logs_dir):
-        pipe(T2_FLAIR_gdc.run,
-             submit=dict(jobtime=200, name="BIP_T2_FLAIR_gdc_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('T2_FLAIR_orig_ud_warp')
-        pipe(T2_FLAIR_brain_extract.run,
-             submit=dict(jobtime=200, name="BIP_T2_FLAIR_brain_extract_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('T2_FLAIR_brain')
-        pipe(T2_FLAIR_defacing.run,
-             submit=dict(jobtime=200, name="BIP_T2_FLAIR_defacing_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('T2_FLAIR_defacing_mask')
-        pipe(T2_FLAIR_apply_bfc.run,
-             submit=dict(jobtime=200, name="BIP_T2_FLAIR_apply_bfc_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('T2_FLAIR_unbiased_brain')
-        pipe(T2_FLAIR_bianca.run,
-             submit=dict(jobtime=200, name="BIP_T2_FLAIR_bianca_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('T2_FLAIR_bianca_volume')
-
-
-    return pipe, targets
diff --git a/bip/pipelines/struct_asl/__init__.py b/bip/pipelines/struct_asl/__init__.py
new file mode 100755
index 0000000000000000000000000000000000000000..4a1cd4ef4f7bd775ca057265adb206ca460396a5
--- /dev/null
+++ b/bip/pipelines/struct_asl/__init__.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+#
+# struct_asl.py - Pipeline with the ASL processing.
+#
+# Author: Fidel Alfaro Almagro <fidel.alfaroalmagro@ndcn.ox.ac.uk>
+# Author: Paul McCarthy <pauldmccarthy@gmail.com>
+# Author: Michiel Cottaar <michiel.cottaar@ndcn.ox.ac.uk>
+#
+# pylint: disable=C0103,E0602,C0114,C0115,C0116,R0913,R0914,R0915
+# pylint: disable=W0613
+#
+
+import logging
+from bip.utils.log_utils      import job_name
+from bip.pipelines.struct_asl import asl_proc, asl_get_IDPs
+
+log = logging.getLogger(__name__)
+
+def add_to_pipeline(ctx, pipe, tree):
+
+    subj = ctx.subject
+
+    pipe(asl_proc.run,
+         submit=dict(jobtime=200, name=job_name(asl_proc.run, subj)),
+         kwargs={'ctx' : ctx})
+    pipe(asl_get_IDPs.run,
+         submit=dict(jobtime=200, name=job_name(asl_get_IDPs.run, subj)),
+         kwargs={'ctx' : ctx})
diff --git a/bip/pipelines/struct_asl/asl_get_IDPs.py b/bip/pipelines/struct_asl/asl_get_IDPs.py
index 361e25017f6754cd8e562077c94bb21f1384487b..0a1dd4ebc731e6662e51192d5c00f4773a55959d 100755
--- a/bip/pipelines/struct_asl/asl_get_IDPs.py
+++ b/bip/pipelines/struct_asl/asl_get_IDPs.py
@@ -22,7 +22,7 @@ def run(ctx,
         ASL_region_analysis_dir: Ref,
         ASL_IDPs:                Out):
 
-    with redirect_logging('asl_get_IDPs', outdir=logs_dir):
+    with redirect_logging(job_name(run), outdir=logs_dir):
 
         result = ctx.subject
 
diff --git a/bip/pipelines/struct_asl/asl_proc.py b/bip/pipelines/struct_asl/asl_proc.py
index c6eebed9d632db558302a8f1dee2346d7dcffeef..4f785d42d823ca8ab23c44585d33346ee22b280a 100755
--- a/bip/pipelines/struct_asl/asl_proc.py
+++ b/bip/pipelines/struct_asl/asl_proc.py
@@ -55,7 +55,7 @@ def run(ctx,
         ASL_std_ACBV_calib:            Out,
         region_analysis_gm_csv:        Out):
 
-    with redirect_logging('asl_proc', outdir=logs_dir):
+    with redirect_logging(job_name(run), outdir=logs_dir):
 
         BBASL_ROI_DIR = ctx.get_data("asl/ukb_rois/")
 
diff --git a/bip/pipelines/struct_asl/struct_asl.py b/bip/pipelines/struct_asl/struct_asl.py
deleted file mode 100755
index a62cbdd9d9d0efa7700d982c60541b0c1b7f8164..0000000000000000000000000000000000000000
--- a/bip/pipelines/struct_asl/struct_asl.py
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/usr/bin/env python
-#
-# struct_asl.py - Pipeline with the ASL processing.
-#
-# Author: Fidel Alfaro Almagro <fidel.alfaroalmagro@ndcn.ox.ac.uk>
-# Author: Paul McCarthy <pauldmccarthy@gmail.com>
-# Author: Michiel Cottaar <michiel.cottaar@ndcn.ox.ac.uk>
-#
-# pylint: disable=C0103,E0602,C0114,C0115,C0116,R0913,R0914,R0915
-#
-
-import logging
-from bip.utils.log_utils      import redirect_logging
-from bip.pipelines.struct_asl import asl_proc, asl_get_IDPs
-
-log = logging.getLogger(__name__)
-
-def add_to_pipeline(ctx, pipe, tree, targets):
-
-    logs_dir=tree.get('logs_dir')
-
-    subj = ctx.subject 
-
-    with redirect_logging('pipe_struct_asl', outdir=logs_dir):
-        pipe(asl_proc.run,
-             submit=dict(jobtime=200, name="BIP_asl_proc_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('region_analysis_gm_csv')
-        pipe(asl_get_IDPs.run,
-             submit=dict(jobtime=200, name="BIP_asl_get_IDPs_" + subj),
-             kwargs={'ctx' : ctx})
-        targets.append('ASL_IDPs')
-
-    return pipe, targets
diff --git a/bip/pipelines/struct_swMRI/struct_swMRI.py b/bip/pipelines/struct_swMRI/__init__.py
similarity index 50%
rename from bip/pipelines/struct_swMRI/struct_swMRI.py
rename to bip/pipelines/struct_swMRI/__init__.py
index 882e8d7eadeb3a4153466a5c37a1af14b50c6643..42b806add51585602cb9a45a7bc048ce61b625ee 100755
--- a/bip/pipelines/struct_swMRI/struct_swMRI.py
+++ b/bip/pipelines/struct_swMRI/__init__.py
@@ -7,24 +7,19 @@
 # Author: Michiel Cottaar <michiel.cottaar@ndcn.ox.ac.uk>
 #
 # pylint: disable=C0103,E0602,C0114,C0115,C0116,R0913,R0914,R0915
+# pylint: disable=W0613
 #
 
 import logging
-from bip.utils.log_utils        import redirect_logging
+from bip.utils.log_utils        import job_name
 from bip.pipelines.struct_swMRI import swMRI_proc
 
 log = logging.getLogger(__name__)
 
-def add_to_pipeline(ctx, pipe, tree, targets):
+def add_to_pipeline(ctx, pipe, tree):
 
-    logs_dir=tree.get('logs_dir')
+    subj = ctx.subject
 
-    subj = ctx.subject 
-
-    with redirect_logging('pipe_struct_swMRI', outdir=logs_dir):
-        pipe(swMRI_proc.run,
-            submit=dict(jobtime=200, name="BIP_swMRI_proc_" + subj),
-            kwargs={'ctx' : ctx})
-        targets.append('SWI')
-
-    return pipe, targets
+    pipe(swMRI_proc.run,
+        submit=dict(jobtime=200, name=job_name(swMRI_proc.run, subj)),
+        kwargs={'ctx' : ctx})
diff --git a/bip/pipelines/struct_swMRI/swMRI_gdc.py b/bip/pipelines/struct_swMRI/swMRI_gdc.py
index 00aee31304f19da9d5173aa894e3066dcf16dc03..daa322d8a20573152ba90aa387fc07d18c6bfe0a 100755
--- a/bip/pipelines/struct_swMRI/swMRI_gdc.py
+++ b/bip/pipelines/struct_swMRI/swMRI_gdc.py
@@ -24,7 +24,7 @@ def run(ctx,
         logs_dir:                Ref,
         T1_GDC:                  Ref):
 
-    with redirect_logging('T1_gdc', outdir=logs_dir):
+    with redirect_logging(job_name(run), outdir=logs_dir):
 
         if ctx.gdc != '':
             #Calculate and apply the Gradient Distortion Unwarp
diff --git a/bip/pipelines/struct_swMRI/swMRI_proc.py b/bip/pipelines/struct_swMRI/swMRI_proc.py
index 5469cb4386a14428dec3fd91dc574a6dcdc92595..c75c4728d9ffaaa6a7115fd07240898808ccd71c 100755
--- a/bip/pipelines/struct_swMRI/swMRI_proc.py
+++ b/bip/pipelines/struct_swMRI/swMRI_proc.py
@@ -51,7 +51,7 @@ def run(ctx,
         T1_to_SWI_mat:              Out,
         filtered_phase:             Out):
 
-    with redirect_logging('swMRI_proc', outdir=logs_dir):
+    with redirect_logging(job_name(run), outdir=logs_dir):
 
         with open(SWI_num_coils, 'r', encoding="utf-8") as f:
             num_coils = int(f.read())
diff --git a/bip/utils/log_utils.py b/bip/utils/log_utils.py
index 1abf843c9848cdf7274fd57e55b8ea1b79e4a720..1ac4518d850d217372e583f615ef6b1dbdcca27e 100755
--- a/bip/utils/log_utils.py
+++ b/bip/utils/log_utils.py
@@ -22,13 +22,10 @@ from subprocess import check_output
 from fsl import wrappers
 
 
-def run_command(logger, command, **kwargs): This fails
-#def run_command(logger, command, env):
+def run_command(logger, command, **kwargs): 
     try:
         logger.info(command.strip())
-        # This fails
         job_output = check_output(command,shell=True,**kwargs).decode('UTF-8')
-        #job_output = check_output(command,shell=True,env=env).decode('UTF-8')
         logger.info('Output: \t' + job_output.strip())
 
     except Exception as e:
@@ -40,6 +37,10 @@ def run_command(logger, command, **kwargs): This fails
 
     return job_output.strip()
 
+def job_name(function, *args):
+    func = function.__module__.removeprefix('bip.pipelines.')
+    return '_'.join([func] + list(args))
+
 
 def create_formatter():
     """Create a logging.Formatter. """