Commit 5194a1ea authored by Paul McCarthy's avatar Paul McCarthy 🚵
Browse files

TEST: Little tweaks; update notebook

parent 966441e3
......@@ -161,7 +161,7 @@ def gen_test_data(num_vars,
table_headers = {
'variables' : 'ID\tType\tDescription\tDataCoding\tInstancing\tNAValues\tRawLevels\tNewLevels\tParentValues\tChildValues\tClean', # noqa
'variables' : 'ID\tType\tInternalType\tDescription\tDataCoding\tInstancing\tNAValues\tRawLevels\tNewLevels\tParentValues\tChildValues\tClean', # noqa
'datacodings' : 'ID\tNAValues\tRawLevels\tNewLevels',
'categories' : 'ID\tCategory\tVariables',
'types' : 'Type\tClean',
......@@ -169,7 +169,7 @@ table_headers = {
}
table_templates = {
'variables' : '{variable}\t{type}\t\t\t2\t\t\t\t\t\t',
'variables' : '{variable}\t{type}\t\t\t\t2\t\t\t\t\t\t',
'datacodings' : '',
'categories' : '',
'types' : '',
......
......@@ -2753,13 +2753,13 @@
" Cleaning functions: True\n",
" 3066: [parseSpirometryData[cleaner]()]\n",
" 10697: [parseSpirometryData[cleaner]()]\n",
" 40001: [codeToNumeric[cleaner](icd10)]\n",
" 40002: [codeToNumeric[cleaner](icd10)]\n",
" 40006: [codeToNumeric[cleaner](icd10)]\n",
" 40013: [codeToNumeric[cleaner](icd9)]\n",
" 41201: [codeToNumeric[cleaner](icd10)]\n",
" 41202: [codeToNumeric[cleaner](icd10)]\n",
" 41204: [codeToNumeric[cleaner](icd10)]\n",
" 40001: [codeToNumeric[cleaner]()]\n",
" 40002: [codeToNumeric[cleaner]()]\n",
" 40006: [codeToNumeric[cleaner]()]\n",
" 40013: [codeToNumeric[cleaner]()]\n",
" 41201: [codeToNumeric[cleaner]()]\n",
" 41202: [codeToNumeric[cleaner]()]\n",
" 41204: [codeToNumeric[cleaner]()]\n",
"\n",
" Child value replacement: True\n",
" 757: [v6142 == 1] -> [0.]\n",
......@@ -3288,52 +3288,52 @@
"Processing: True\n",
" 1: ('vids', [20001]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 2: ('vids', [20002]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 2: ('vids', [20003]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 3: ('vids', [20004]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 4: ('vids', [40001]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 5: ('vids', [40002]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 6: ('vids', [40006]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 7: ('vids', [40011]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 8: ('vids', [40012]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 9: ('vids', [40013]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 10: ('vids', [41200]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 11: ('vids', [41201]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 12: ('vids', [41202]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 13: ('vids', [41203]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 14: ('vids', [41204]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 15: ('vids', [41205]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 16: ('vids', [41210]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 17: ('vids', [41256]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 18: ('vids', [41258]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 19: ('vids', [41270]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 20: ('vids', [41271]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 21: ('vids', [41272]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 22: ('vids', [41273]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 23: ('all_independent_except', [20001, 20002, 20003, 20004, 40001, 40002, 40006, 40011, 40012, 40013, 41200, 41201, 41202, 41203, 41204, 41205, 41210, 41256, 41258, 41270, 41271, 41272, 41273]) -> [removeIfSparse[processor](minpres=51,maxcat=0.99,minstd=1e-06,abscat=False)]\n",
" 24: ('vids', [20001]) -> [removeIfSparse[processor](mincat=10)]\n",
" 25: ('vids', [20002]) -> [removeIfSparse[processor](mincat=10)]\n",
" 25: ('vids', [20003]) -> [removeIfSparse[processor](mincat=10)]\n",
" 26: ('vids', [20004]) -> [removeIfSparse[processor](mincat=10)]\n",
" 27: ('vids', [40001]) -> [removeIfSparse[processor](mincat=10)]\n",
" 28: ('vids', [40002]) -> [removeIfSparse[processor](mincat=10)]\n",
" 29: ('vids', [40006]) -> [removeIfSparse[processor](mincat=10)]\n",
" 30: ('vids', [40011]) -> [removeIfSparse[processor](mincat=10)]\n",
" 31: ('vids', [40012]) -> [removeIfSparse[processor](mincat=10)]\n",
" 32: ('vids', [40013]) -> [removeIfSparse[processor](mincat=10)]\n",
" 33: ('vids', [41200]) -> [removeIfSparse[processor](mincat=10)]\n",
" 34: ('vids', [41201]) -> [removeIfSparse[processor](mincat=10)]\n",
" 35: ('vids', [41202]) -> [removeIfSparse[processor](mincat=10)]\n",
" 36: ('vids', [41203]) -> [removeIfSparse[processor](mincat=10)]\n",
" 37: ('vids', [41204]) -> [removeIfSparse[processor](mincat=10)]\n",
" 38: ('vids', [41205]) -> [removeIfSparse[processor](mincat=10)]\n",
" 39: ('vids', [41210]) -> [removeIfSparse[processor](mincat=10)]\n",
" 40: ('vids', [41256]) -> [removeIfSparse[processor](mincat=10)]\n",
" 41: ('vids', [41258]) -> [removeIfSparse[processor](mincat=10)]\n",
" 42: ('vids', [41270]) -> [removeIfSparse[processor](mincat=10)]\n",
" 43: ('vids', [41271]) -> [removeIfSparse[processor](mincat=10)]\n",
" 44: ('vids', [41272]) -> [removeIfSparse[processor](mincat=10)]\n",
" 45: ('vids', [41273]) -> [removeIfSparse[processor](mincat=10)]\n",
" 46: ('all', []) -> [removeIfRedundant[processor](0.99,0.2)]\n"
" 3: ('vids', [20003]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 4: ('vids', [20004]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 5: ('vids', [40001]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 6: ('vids', [40002]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 7: ('vids', [40006]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 8: ('vids', [40011]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 9: ('vids', [40012]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 10: ('vids', [40013]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 11: ('vids', [41200]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 12: ('vids', [41201]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 13: ('vids', [41202]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 14: ('vids', [41203]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 15: ('vids', [41204]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 16: ('vids', [41205]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 17: ('vids', [41210]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 18: ('vids', [41256]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 19: ('vids', [41258]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 20: ('vids', [41270]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 21: ('vids', [41271]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 22: ('vids', [41272]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 23: ('vids', [41273]) -> [binariseCategorical[processor](acrossVisits=True,acrossInstances=True)]\n",
" 24: ('all_independent_except', [20001, 20002, 20003, 20004, 40001, 40002, 40006, 40011, 40012, 40013, 41200, 41201, 41202, 41203, 41204, 41205, 41210, 41256, 41258, 41270, 41271, 41272, 41273]) -> [removeIfSparse[processor](minpres=51,maxcat=0.99,minstd=1e-06,abscat=False)]\n",
" 25: ('vids', [20001]) -> [removeIfSparse[processor](mincat=10)]\n",
" 26: ('vids', [20002]) -> [removeIfSparse[processor](mincat=10)]\n",
" 27: ('vids', [20003]) -> [removeIfSparse[processor](mincat=10)]\n",
" 28: ('vids', [20004]) -> [removeIfSparse[processor](mincat=10)]\n",
" 29: ('vids', [40001]) -> [removeIfSparse[processor](mincat=10)]\n",
" 30: ('vids', [40002]) -> [removeIfSparse[processor](mincat=10)]\n",
" 31: ('vids', [40006]) -> [removeIfSparse[processor](mincat=10)]\n",
" 32: ('vids', [40011]) -> [removeIfSparse[processor](mincat=10)]\n",
" 33: ('vids', [40012]) -> [removeIfSparse[processor](mincat=10)]\n",
" 34: ('vids', [40013]) -> [removeIfSparse[processor](mincat=10)]\n",
" 35: ('vids', [41200]) -> [removeIfSparse[processor](mincat=10)]\n",
" 36: ('vids', [41201]) -> [removeIfSparse[processor](mincat=10)]\n",
" 37: ('vids', [41202]) -> [removeIfSparse[processor](mincat=10)]\n",
" 38: ('vids', [41203]) -> [removeIfSparse[processor](mincat=10)]\n",
" 39: ('vids', [41204]) -> [removeIfSparse[processor](mincat=10)]\n",
" 40: ('vids', [41205]) -> [removeIfSparse[processor](mincat=10)]\n",
" 41: ('vids', [41210]) -> [removeIfSparse[processor](mincat=10)]\n",
" 42: ('vids', [41256]) -> [removeIfSparse[processor](mincat=10)]\n",
" 43: ('vids', [41258]) -> [removeIfSparse[processor](mincat=10)]\n",
" 44: ('vids', [41270]) -> [removeIfSparse[processor](mincat=10)]\n",
" 45: ('vids', [41271]) -> [removeIfSparse[processor](mincat=10)]\n",
" 46: ('vids', [41272]) -> [removeIfSparse[processor](mincat=10)]\n",
" 47: ('vids', [41273]) -> [removeIfSparse[processor](mincat=10)]\n",
" 48: ('all', []) -> [removeIfRedundant[processor](0.99,0.2)]\n"
]
}
],
......
%% Cell type:markdown id: tags:
![image.png](attachment:image.png)
# `funpack`
> Paul McCarthy <paul.mccarthy@ndcn.ox.ac.uk> ([WIN@FMRIB](https://www.win.ox.ac.uk/))
`funpack` is a command-line program which you can use to extract data from UK BioBank (and other tabular) data.
You can give `funpack` one or more input files (e.g. `.csv`, `.tsv`), and it will merge them together, perform some preprocessing, and produce a single output file.
A large number of rules are built into `funpack` which are specific to the UK BioBank data set. But you can control and customise everything that `funpack` does to your data, including which rows and columns to extract, and which cleaning/processing steps to perform on each column.
The `funpack` source code is available at https://git.fmrib.ox.ac.uk/fsl/funpack. You can install `funpack` into a Python environment using `pip`:
pip install fmrib-unpack
Get command-line help by typing:
funpack -h
*The examples in this notebook assume that you have installed `funpack` 1.4.0 or newer.*
%% Cell type:code id: tags:
``` bash
funpack -V
```
%%%% Output: stream
funpack 1.4.0
%% Cell type:markdown id: tags:
### Contents
1. [Overview](#Overview)
1. [Import](#1.-Import)
2. [Cleaning](#2.-Cleaning)
3. [Processing](#3.-Processing)
4. [Export](#4.-Export)
2. [Examples](#Examples)
3. [Import examples](#Import-examples)
1. [Selecting variables (columns)](#Selecting-variables-(columns))
1. [Selecting individual variables](#Selecting-individual-variables)
2. [Selecting variable ranges](#Selecting-variable-ranges)
3. [Selecting variables with a file](#Selecting-variables-with-a-file)
4. [Selecting variables from pre-defined categories](#Selecting-variables-from-pre-defined-categories)
2. [Selecting subjects (rows)](#Selecting-subjects-(rows))
1. [Selecting individual subjects](#Selecting-individual-subjects)
2. [Selecting subject ranges](#Selecting-subject-ranges)
3. [Selecting subjects from a file](#Selecting-subjects-from-a-file)
4. [Selecting subjects by variable value](#Selecting-subjects-by-variable-value)
5. [Excluding subjects](#Excluding-subjects)
3. [Selecting visits](#Selecting-visits)
4. [Merging multiple input files](#Merging-multiple-input-files)
1. [Merging by subject](#Merging-by-subject)
2. [Merging by column](#Merging-by-column)
3. [Naive merging](#Merging-by-column)
4. [Cleaning examples](#Cleaning-examples)
1. [NA insertion](#NA-insertion)
2. [Variable-specific cleaning functions](#Variable-specific-cleaning-functions)
3. [Categorical recoding](#Categorical-recoding)
4. [Child value replacement](#Child-value-replacement)
5. [Processing examples](#Processing-examples)
1. [Sparsity check](#Sparsity-check)
2. [Redundancy check](#Redundancy-check)
3. [Categorical binarisation](#Categorical-binarisation)
6. [Custom cleaning, processing and loading - funpack plugins](#Custom-cleaning,-processing-and-loading---funpack-plugins)
1. [Custom cleaning functions](#Custom-cleaning-functions)
2. [Custom processing functions](#Custom-processing-functions)
3. [Custom file loaders](#Custom-file-loaders)
7. [Miscellaneous topics](#Miscellaneous-topics)
1. [Non-numeric data](#Non-numeric-data)
2. [Dry run](#Dry-run)
3. [Built-in rules](#Built-in-rules)
4. [Using a configuration file](#Using-a-configuration-file)
5. [Reporting unknown variables](#Reporting-unknown-variables)
6. [Low-memory mode](#Low-memory-mode)
%% Cell type:markdown id: tags:
# Overview
`funpack` performs the following steps:
## 1. Import
All data files are loaded in, unwanted columns and subjects are dropped, and the data files are merged into a single table (a.k.a. data frame). Multiple files can be merged according to an index column (e.g. subject ID). Or, if the input files contain the same columns/subjects, they can be naively concatenated along rows or columns.
## 2. Cleaning
The following cleaning steps are applied to each column:
1. **NA value replacement:** Specific values for some columns are replaced with NA, for example, variables where a value of `-1` indicates *Do not know*.
2. **Variable-specific cleaning functions:** Certain columns are re-formatted - for example, the [ICD10](https://en.wikipedia.org/wiki/ICD-10) disease codes can be converted to integer representations.
3. **Categorical recoding:** Certain categorical columns are re-coded.
4. **Child value replacement:** NA values within some columns which are dependent upon other columns may have values inserted based on the values of their parent columns.
## 3. Processing
During the processing stage, columns may be removed, merged, or expanded into additional columns. For example, a categorical column may be expanded into a set of binary columns, one for each category.
A column may also be removed on the basis of being too sparse, or being redundant with respect to another column.
## 4. Export
The processed data can be saved as a `.csv`, `.tsv`, or `.hdf5` file.
%% Cell type:markdown id: tags:
# Examples
Throughout these examples, we are going to use a few command line options, which you will probably **not** normally want to use:
- `-ow` (short for `--overwrite`): This tells `funpack` not to complain if the output file already exists.
- `-q` (short for `--quiet`): This tells `funpack` to be quiet.
Without the `-q` option, `funpack` can be quite verbose, which can be annoying, but is very useful when things go wrong. A good strategy is to tell `funpack` to produce verbose output using the `--noisy` (`-n` for short) option, and to send all of its output to a log file with the `--log_file` (or `-lf`) option. For example:
funpack -n -n -n -lf log.txt out.tsv in.tsv
Here's the first example input data set, with UK BioBank-style column names:
%% Cell type:code id: tags:
``` bash
cat data_01.tsv
```
%%%% Output: stream
eid 1-0.0 2-0.0 3-0.0 4-0.0 5-0.0 6-0.0 7-0.0 8-0.0 9-0.0 10-0.0
1 31 65 10 11 84 22 56 65 90 12
2 56 52 52 42 89 35 3 65 50 67
3 45 84 20 84 93 36 96 62 48 59
4 7 46 37 48 80 20 18 72 37 27
5 8 86 51 68 80 84 11 28 69 10
6 6 29 85 59 7 46 14 60 73 80
7 24 49 41 46 92 23 39 68 7 63
8 80 92 97 30 92 83 98 36 6 23
9 84 59 89 79 16 12 95 73 2 62
10 23 96 67 41 8 20 97 57 59 23
%% Cell type:markdown id: tags:
The numbers in each column name typically represent:
1. The variable ID
2. The visit, for variables which were collected at multiple points in time.
3. The "instance", for multi-valued variables.
Note that one **variable** is typically associated with several **columns**, although we're keeping things simple for this first example - there is only one visit for each variable, and there are no mulit-valued variables.
> _Most but not all_ variables in the UK BioBank contain data collected at different visits, the times that the participants visited a UK BioBank assessment centre. However there are some variables (e.g. [ICD10 diagnosis codes](https://biobank.ctsu.ox.ac.uk/crystal/field.cgi?id=41202)) for which this is not the case.
%% Cell type:markdown id: tags:
# Import examples
## Selecting variables (columns)
You can specify which variables you want to load in the following ways, using the `--variable` (`-v` for short) and `--category` (`-c` for short) command line options:
* By variable ID
* By variable ranges
* By a text file which contains the IDs you want to keep.
* By pre-defined variable categories
* By column name
### Selecting individual variables
Simply provide the IDs of the variables you want to extract:
%% Cell type:code id: tags:
``` bash
funpack -q -ow -v 1 -v 5 out.tsv data_01.tsv
cat out.tsv
```
%%%% Output: stream
eid 1-0.0 5-0.0
1 31 84.0
2 56 89.0
3 45 93.0
4 7 80.0
5 8 80.0
6 6 7.0
7 24 92.0
8 80 92.0
9 84 16.0
10 23 8.0
%% Cell type:markdown id: tags:
### Selecting variable ranges
The `--variable`/`-v` option accepts MATLAB-style ranges of the form `start:step:stop` (where the `stop` is inclusive):
%% Cell type:code id: tags:
``` bash
funpack -q -ow -v 1:3:10 out.tsv data_01.tsv
cat out.tsv
```
%%%% Output: stream
eid 1-0.0 4-0.0 7-0.0 10-0.0
1 31 11.0 56 12
2 56 42.0 3 67
3 45 84.0 96 59
4 7 48.0 18 27
5 8 68.0 11 10
6 6 59.0 14 80
7 24 46.0 39 63
8 80 30.0 98 23
9 84 79.0 95 62
10 23 41.0 97 23
%% Cell type:markdown id: tags:
### Selecting variables with a file
If your variables of interest are listed in a plain-text file, you can simply pass that file:
%% Cell type:code id: tags:
``` bash
echo -e "1\n6\n9" > vars.txt
funpack -q -ow -v vars.txt out.tsv data_01.tsv
cat out.tsv
```
%%%% Output: stream
eid 1-0.0 6-0.0 9-0.0
1 31 22.0 90
2 56 35.0 50
3 45 36.0 48
4 7 20.0 37
5 8 84.0 69
6 6 46.0 73
7 24 23.0 7
8 80 83.0 6
9 84 12.0 2
10 23 20.0 59
%% Cell type:markdown id: tags:
### Selecting variables from pre-defined categories
Some UK BioBank-specific categories are baked into `funpack`, but you can also define your own categories - you just need to create a `.tsv` file, and pass it to `funpack` via the `--category_file` (`-cf` for short):
%% Cell type:code id: tags:
``` bash
echo -e "ID\tCategory\tVariables" > custom_categories.tsv
echo -e "1\tCool variables\t1:5,7" >> custom_categories.tsv
echo -e "2\tUncool variables\t6,8:10" >> custom_categories.tsv
cat custom_categories.tsv
```
%%%% Output: stream
ID Category Variables
1 Cool variables 1:5,7
2 Uncool variables 6,8:10
%% Cell type:markdown id: tags:
Use the `--category` (`-c` for short) to select categories to output. You can refer to categories by their ID:
%% Cell type:code id: tags:
``` bash
funpack -q -ow -cf custom_categories.tsv -c 1 out.tsv data_01.tsv
cat out.tsv
```
%%%% Output: stream
eid 1-0.0 2-0.0 3-0.0 4-0.0 5-0.0 7-0.0
1 31 65 10.0 11.0 84.0 56
2 56 52 52.0 42.0 89.0 3
3 45 84 20.0 84.0 93.0 96
4 7 46 37.0 48.0 80.0 18
5 8 86 51.0 68.0 80.0 11
6 6 29 85.0 59.0 7.0 14
7 24 49 41.0 46.0 92.0 39
8 80 92 97.0 30.0 92.0 98
9 84 59 89.0 79.0 16.0 95
10 23 96 67.0 41.0 8.0 97
%% Cell type:markdown id: tags:
Or by name:
%% Cell type:code id: tags:
``` bash
funpack -q -ow -cf custom_categories.tsv -c uncool out.tsv data_01.tsv
cat out.tsv
```
%%%% Output: stream
eid 6-0.0 8-0.0 9-0.0 10-0.0
1 22.0 65 90 12
2 35.0 65 50 67
3 36.0 62 48 59
4 20.0 72 37 27
5 84.0 28 69 10
6 46.0 60 73 80
7 23.0 68 7 63
8 83.0 36 6 23
9 12.0 73 2 62
10 20.0 57 59 23
%% Cell type:markdown id: tags:
### Selecting column names
If you are working with data that has non-UK BioBank style column names, you can use the `--column` (`-co` for short) to select individual columns by their name, rather than the variable with which they are associated. The `--column` option accepts full column names, and also shell-style wildcard patterns:
%% Cell type:code id: tags:
``` bash
funpack -q -ow -co 4-0.0 -co "??-0.0" out.tsv data_01.tsv
cat out.tsv
```
%%%% Output: stream
eid 4-0.0 10-0.0
1 11.0 12
2 42.0 67
3 84.0 59
4 48.0 27
5 68.0 10
6 59.0 80
7 46.0 63
8 30.0 23
9 79.0 62
10 41.0 23
%% Cell type:markdown id: tags:
## Selecting subjects (rows)
`funpack` assumes that the first column in every input file is a subject ID. You can specify which subjects you want to load via the `--subject` (`-s` for short) option. You can specify subjects in the same way that you specified variables above, and also:
* By specifying a conditional expression on variable values - only subjects for which the expression evaluates to true will be imported
* By specifying subjects to exclude
### Selecting individual subjects
%% Cell type:code id: tags:
``` bash
funpack -q -ow -s 1 -s 3 -s 5 out.tsv data_01.tsv
cat out.tsv
```
%%%% Output: stream
eid 1-0.0 2-0.0 3-0.0 4-0.0 5-0.0 6-0.0 7-0.0 8-0.0 9-0.0 10-0.0
1 31 65 10.0 11.0 84.0 22.0 56 65 90 12
3 45 84 20.0 84.0 93.0 36.0 96 62 48 59
5 8 86 51.0 68.0 80.0 84.0 11 28 69 10
%% Cell type:markdown id: tags:
### Selecting subject ranges
%% Cell type:code id: tags:
``` bash
funpack -q -ow -s 2:2:10 out.tsv data_01.tsv
cat out.tsv
```
%%%% Output: stream
eid 1-0.0 2-0.0 3-0.0 4-0.0 5-0.0 6-0.0 7-0.0 8-0.0 9-0.0 10-0.0
2 56 52 52.0 42.0 89.0 35.0 3 65 50 67
4 7 46 37.0 48.0 80.0 20.0 18 72 37 27
6 6 29 85.0 59.0 7.0 46.0 14 60 73 80
8 80 92 97.0 30.0 92.0 83.0 98 36 6 23
10 23 96 67.0 41.0 8.0 20.0 97 57 59 23
%% Cell type:markdown id: tags:
### Selecting subjects from a file
%% Cell type:code id: tags:
``` bash
echo -e "5\n6\n7\n8\n9\n10" > subjects.txt
funpack -q -ow -s subjects.txt out.tsv data_01.tsv
cat out.tsv
```
%%%% Output: stream
eid 1-0.0 2-0.0 3-0.0 4-0.0 5-0.0 6-0.0 7-0.0 8-0.0 9-0.0 10-0.0
5 8 86 51.0 68.0 80.0 84.0 11 28 69 10
6 6 29 85.0 59.0 7.0 46.0 14 60 73 80
7 24 49 41.0 46.0 92.0 23.0 39 68 7 63
8 80 92 97.0 30.0 92.0 83.0 98 36 6 23
9 84 59 89.0 79.0 16.0 12.0 95 73 2 62
10 23 96 67.0 41.0 8.0 20.0 97 57 59 23
%% Cell type:markdown id: tags:
### Selecting subjects by variable value
The `--subject` option accepts *variable expressions* - you can write an expression performing numerical comparisons on variables (denoted with a leading `v`) and combine these expressions using boolean algebra. Only subjects for which the expression evaluates to true will be imported. For example, to only import subjects where variable 1 is greater than 10, and variable 2 is less than 70, you can type:
%% Cell type:code id: tags:
``` bash
funpack -q -ow -sp -s "v1 > 10 && v2 < 70" out.tsv data_01.tsv
cat out.tsv
```
%%%% Output: stream
eid 1-0.0 2-0.0 3-0.0 4-0.0 5-0.0 6-0.0 7-0.0 8-0.0 9-0.0 10-0.0
1 31 65 10.0 11.0 84.0 22.0 56 65 90 12
2 56 52 52.0 42.0 89.0 35.0 3 65 50 67
7 24 49 41.0 46.0 92.0 23.0 39 68 7 63
9 84 59 89.0 79.0 16.0 12.0 95 73 2 62
%% Cell type:markdown id: tags:
The following symbols can be used in variable expressions:
| Symbol | Meaning |
|---------------------------|--------------------------|
| `==` | equal to |
| `!=` | not equal to |
| `>` | greater than |
| `>=` | greater than or equal to |
| `<` | less than |
| `<=` | less than or equal to |
| `na` | N/A |
| `&&` | logical and |
| <code>&#x7c;&#x7c;</code> | logical or |
| `~` | logical not |
| `()` | to denote precedence |
### Excluding subjects
The `--exclude` (`-ex` for short) option allows you to exclude subjects - it accepts individual IDs, an ID range, or a file containing IDs. The `--exclude`/`-ex` option takes precedence over the `--subject`/`-s` option:
%% Cell type:code id: tags:
``` bash
funpack -q -ow -s 1:8 -ex 5:10 out.tsv data_01.tsv
cat out.tsv
```
%%%% Output: stream
eid 1-0.0 2-0.0 3-0.0 4-0.0 5-0.0 6-0.0 7-0.0 8-0.0 9-0.0 10-0.0
1 31 65 10.0 11.0 84.0 22.0 56 65 90 12
2 56 52 52.0 42.0 89.0 35.0 3 65 50 67
3 45 84 20.0 84.0 93.0 36.0 96 62 48 59
4 7 46 37.0 48.0 80.0 20.0 18 72 37 27
%% Cell type:markdown id: tags:
## Selecting visits
%% Cell type:markdown id: tags:
Many variables in the UK BioBank data contain observations at multiple points in time, or visits. `funpack` allows you to specify which visits you are interested in. Here is an example data set with variables that have data for multiple visits (remember that the second number in the column names denotes the visit):
%% Cell type:code id: tags:
``` bash
cat data_02.tsv
```
%%%% Output: stream
eid 1-0.0 2-0.0 2-1.0 2-2.0 3-0.0 3-1.0 4-0.0 5-0.0
1 86 76 82 75 34 99 50 5
2 20 25 40 44 30 57 54 44
3 85 2 48 42 23 77 84 27
4 23 30 18 97 44 55 97 20
5 83 45 76 51 18 64 8 33
%% Cell type:markdown id: tags:
We can use the `--visit` (`-vi` for short) option to get just the last visit for each variable:
%% Cell type:code id: tags:
``` bash
funpack -q -ow -vi last out.tsv data_02.tsv
cat out.tsv
```
%%%% Output: stream
eid 1-0.0 2-2.0 3-1.0 4-0.0 5-0.0
1 86 75 99.0 50.0 5.0
2 20 44 57.0 54.0 44.0
3 85 42 77.0 84.0 27.0
4 23 97 55.0 97.0 20.0
5 83 51 64.0 8.0 33.0
%% Cell type:markdown id: tags:
You can also specify which visit you want by its number:
%% Cell type:code id: tags:
``` bash
funpack -q -ow -vi 1 out.tsv data_02.tsv
cat out.tsv
```
%%%% Output: stream
eid 2-1.0 3-1.0
1 82 99.0
2 40 57.0
3 48 77.0
4 18 55.0
5 76 64.0
%% Cell type:markdown id: tags:
> Variables which are not associated with specific visits (e.g. [ICD10 diagnosis codes](https://biobank.ctsu.ox.ac.uk/crystal/field.cgi?id=41202)) will not be affected by the `-vi` option.
%% Cell type:markdown id: tags:
## Merging multiple input files
If your data is split across multiple files, you can specify how `funpack` should merge them together.
### Merging by subject
For example, let's say we have these two input files (shown side-by-side):
%% Cell type:code id: tags:
``` bash
echo " " | paste data_03.tsv - data_04.tsv
```
%%%% Output: stream
eid 1-0.0 2-0.0 3-0.0 eid 4-0.0 5-0.0 6-0.0
1 89 47 26 2 19 17 62
2 94 37 70 3 41 12 7
3 63 5 97 4 8 86 9
4 98 97 91 5 7 65 71
5 37 10 11 6 3 23 15
%% Cell type:markdown id: tags:
Note that each file contains different variables, and different, but overlapping, subjects. By default, when you pass these files to `funpack`, it will output the intersection of the two files (more formally known as an *inner join*), i.e. subjects which are present in both files:
%% Cell type:code id: tags:
``` bash
funpack -q -ow out.tsv data_03.tsv data_04.tsv
cat out.tsv
```
%%%% Output: stream
eid 1-0.0 2-0.0 3-0.0 4-0.0 5-0.0 6-0.0
2 94 37 70.0 19.0 17.0 62.0
3 63 5 97.0 41.0 12.0 7.0
4 98 97 91.0 8.0 86.0 9.0
5 37 10 11.0 7.0 65.0 71.0
%% Cell type:markdown id: tags:
If you want to keep all subjects, you can instruct `funpack` to output the union (a.k.a. *outer join*) via the `--merge_strategy` (`-ms` for short) option:
%% Cell type:code id: tags:
``` bash
funpack -q -ow -ms outer out.tsv data_03.tsv data_04.tsv
cat out.tsv
```
%%%% Output: stream
eid 1-0.0 2-0.0 3-0.0 4-0.0 5-0.0 6-0.0
1 89.0 47.0 26.0
2 94.0 37.0 70.0 19.0 17.0 62.0
3 63.0 5.0 97.0 41.0 12.0 7.0
4 98.0 97.0 91.0 8.0 86.0 9.0
5 37.0 10.0 11.0 7.0 65.0 71.0
6 3.0 23.0 15.0
%% Cell type:markdown id: tags:
### Merging by column
Your data may be organised in a different way. For example, these next two files contain different groups of subjects, but overlapping columns:
%% Cell type:code id: tags:
``` bash
echo " " | paste data_05.tsv - data_06.tsv
```
%%%% Output: stream
eid 1-0.0 2-0.0 3-0.0 4-0.0 5-0.0 eid 2-0.0 3-0.0 4-0.0 5-0.0 6-0.0
1 69 80 70 60 42 4 17 36 56 90 12
2 64 15 82 99 67 5 63 16 87 57 63
3 33 67 58 96 26 6 43 19 84 53 63
%% Cell type:markdown id: tags:
In this case, we need to tell `funpack` to merge along the row axis, rather than along the column axis. We can do this with the `--merge_axis` (`-ma` for short) option:
%% Cell type:code id: tags:
``` bash
funpack -q -ow -ma rows out.tsv data_05.tsv data_06.tsv
cat out.tsv
```
%%%% Output: stream
eid 2-0.0 3-0.0 4-0.0 5-0.0
1 80 70.0 60.0 42.0
2 15 82.0 99.0 67.0
3 67 58.0 96.0 26.0
4 17 36.0 56.0 90.0
5 63 16.0 87.0 57.0
6 43 19.0 84.0 53.0
%% Cell type:markdown id: tags:
Again, if we want to retain all columns, we can tell `funpack` to perform an outer join with the `-ms` option:
%% Cell type:code id: tags:
``` bash
funpack -q -ow -ma rows -ms outer out.tsv data_05.tsv data_06.tsv
cat out.tsv
```
%%%% Output: stream
eid 1-0.0 2-0.0 3-0.0 4-0.0 5-0.0 6-0.0
1 69.0 80 70.0 60.0 42.0
2 64.0 15 82.0 99.0 67.0
3 33.0 67 58.0 96.0 26.0
4 17 36.0 56.0 90.0 12.0
5 63 16.0 87.0 57.0 63.0
6 43 19.0 84.0 53.0 63.0
%% Cell type:markdown id: tags:
### Naive merging
Finally, your data may be organised such that you simply want to "paste", or concatenate them together, along either rows or columns. For example, your data files might look like this:
%% Cell type:code id: tags:
``` bash
echo " " | paste data_07.tsv - data_08.tsv
```
%%%% Output: stream
eid 1-0.0 2-0.0 3-0.0 eid 4-0.0 5-0.0 6-0.0
1 30 99 57 1 16 54 60
2 3 6 75 2 43 59 9
3 13 91 36 3 71 73 38
%% Cell type:markdown id: tags:
Here, we have columns for different variables on the same set of subjects, and we just need to concatenate them together horizontally. We do this by using `--merge_strategy naive` (`-ms naive` for short):
%% Cell type:code id: tags:
``` bash
funpack -q -ow -ms naive out.tsv data_07.tsv data_08.tsv
cat out.tsv
```
%%%% Output: stream
eid 1-0.0 2-0.0 3-0.0 4-0.0 5-0.0 6-0.0
1 30 99 57.0 16.0 54.0 60.0
2 3 6 75.0 43.0 59.0 9.0
3 13 91 36.0 71.0 73.0 38.0
%% Cell type:markdown id: tags:
For files which need to be concatenated vertically, such as these:
%% Cell type:code id: tags:
``` bash
echo " " | paste data_09.tsv - data_10.tsv
```
%%%% Output: stream
eid 1-0.0 2-0.0 3-0.0 eid 1-0.0 2-0.0 3-0.0
1 16 34 10 4 40 89 58
2 62 78 16 5 25 75 9
3 72 29 53 6 28 74 57
%% Cell type:markdown id: tags:
We need to tell `funpack` which axis to concatenate along, again using the `-ma` option:
%% Cell type:code id: tags:
``` bash
funpack -q -ow -ms naive -ma rows out.tsv data_09.tsv data_10.tsv
cat out.tsv
```
%%%% Output: stream
eid 1-0.0 2-0.0 3-0.0
1 16 34 10.0
2 62 78 16.0
3 72 29 53.0
4 40 89 58.0
5 25 75 9.0
6 28 74 57.0
%% Cell type:markdown id: tags:
# Cleaning examples
Once the data has been imported, a sequence of cleaning steps are applied to each column.
## NA insertion
For some variables it may make sense to discard or ignore certain values. For example, if an individual selects *"Do not know"* to a question such as *"How much milk did you drink yesterday?"*, that answer will be coded with a specific value (e.g. `-1`). It does not make any sense to include these values in most analyses, so `funpack` can be used to mark such values as *Not Available (NA)*.
A large number of NA insertion rules, specific to UK BioBank variables, are coded into `funpack`, and are applied when you use the `-cfg fmrib` option (see the section below on [built-in rules](#Built-in-rules)). You can also specify your own rules via the `--na_values` (`-nv` for short) option.
Let's say we have this data set:
%% Cell type:code id: tags:
``` bash
cat data_11.tsv
```
%%%% Output: stream
eid 1-0.0 2-0.0 3-0.0
1 4 1 6
2 2 6 0
3 7 0 -1
4 -1 6 1
5 2 8 4
6 0 2 7
7 -1 0 0
8 7 7 2
9 4 -1 -1
10 8 -1 2
%% Cell type:markdown id: tags:
For variable 1, we want to ignore values of -1, for variable 2 we want to ignore -1 and 0, and for variable 3 we want to ignore 1 and 2:
%% Cell type:code id: tags:
``` bash
funpack -q -ow -nv 1 " -1" -nv 2 " -1,0" -nv 3 "1,2" out.tsv data_11.tsv
cat out.tsv
```
%%%% Output: stream
eid 1-0.0 2-0.0 3-0.0
1 4.0 1.0 6.0
2 2.0 6.0 0.0
3 7.0 -1.0
4 6.0
5 2.0 8.0 4.0
6 0.0 2.0 7.0
7 0.0
8 7.0 7.0
9 4.0 -1.0
10 8.0
%% Cell type:markdown id: tags:
> The `--na_values` option expects two arguments:
> * The variable ID
> * A comma-separated list of values to replace with NA
%% Cell type:markdown id: tags:
## Variable-specific cleaning functions
A small number of cleaning/preprocessing functions are built into `funpack`, which can be applied to specific variables. For example, some variables in the UK BioBank contain ICD10 disease codes, which may be more useful if converted to a numeric format. Imagine that we have some data with ICD10 codes:
%% Cell type:code id: tags:
``` bash
cat data_12.tsv
```
%%%% Output: stream
eid 1-0.0
1 A481
2 A590
3 B391
4 D596
5 Z980
%% Cell type:markdown id: tags:
We can use the `--clean` (`-cl` for short) option with the built-in `codeToNumeric` cleaning function to convert the codes to a numeric representation:
%% Cell type:code id: tags:
``` bash
funpack -q -ow -cl 1 "codeToNumeric('icd10')" out.tsv data_12.tsv
cat out.tsv
```
%%%% Output: stream
eid 1-0.0
1 534
2 596
3 932
4 2159
5 19143
%% Cell type:markdown id: tags:
> The `--clean` option expects two arguments:
> * The variable ID
> * The cleaning function to apply. Some cleaning functions accept arguments - refer to the command-line help for a summary of available functions.
>
> You can define your own cleaning functions by passing them in as a `--plugin_file` (see the [section on custom plugins below](#Custom-cleaning,-processing-and-loading----funpack-plugins)).
### Example: flattening hierarchical data
Several variables in the UK Biobank (including the ICD10 disease categorisations) are organised in a hierarchical manner - each value is a child of a more general parent category. The `flattenHierarchical` cleaninng function can be used to replace each value in a data set with the value that corresponds to a parent category. Let's apply this to our example ICD10 data set.
%% Cell type:code id: tags:
``` bash
funpack -q -ow -cl 1 "flattenHierarchical(name='icd10')" out.tsv data_12.tsv
cat out.tsv
```
%%%% Output: stream
eid 1-0.0
1 Chapter I
2 Chapter I
3 Chapter I
4 Chapter III
5 Chapter XXI
%% Cell type:markdown id: tags:
### Aside: ICD10 mapping file
`funpack` has a feature specific to these ICD10 disease categorisations - you can use the `--icd10_map_file` (`-imf` for short) option to tell `funpack` to save a file which contains a list of all ICD10 codes that were present in the input data, and the corresponding numerical codes that `funpack` generated:
%% Cell type:code id: tags:
``` bash
funpack -q -ow -cl 1 "codeToNumeric('icd10')" -imf icd10_codes.tsv out.tsv data_12.tsv
cat icd10_codes.tsv
```
%%%% Output: stream
code value description parent_descs
A481 534 A48.1 Legionnaires' disease [Chapter I Certain infectious and parasitic diseases] [A30-A49 Other bacterial diseases] [A48 Other bacterial diseases, not elsewhere classified]
A590 596 A59.0 Urogenital trichomoniasis [Chapter I Certain infectious and parasitic diseases] [A50-A64 Infections with a predominantly sexual mode of transmission] [A59 Trichomoniasis]
B391 932 B39.1 Chronic pulmonary histoplasmosis capsulati [Chapter I Certain infectious and parasitic diseases] [B35-B49 Mycoses] [B39 Histoplasmosis]
D596 2159 D59.6 Haemoglobinuria due to haemolysis from other external causes [Chapter III Diseases of the blood and blood-forming organs and certain disorders involving the immune mechanism] [D55-D59 Haemolytic anaemias] [D59 Acquired haemolytic anaemia]
Z980 19143 Z98.0 Intestinal bypass and anastomosis status [Chapter XXI Factors influencing health status and contact with health services] [Z80-Z99 Persons with potential health hazards related to family and personal history and certain conditions influencing health status] [Z98 Other postsurgical states]
%% Cell type:markdown id: tags:
## Categorical recoding
%% Cell type:markdown id: tags:
You may have some categorical data which is coded in an awkward manner, such as in this example, which encodes the amount of some item that an individual has consumed:
<img src="attachment:image.png" width="100"/>
You can use the `--recoding` (`-re` for short) option to recode data like this into something more useful. For example, given this data:
%% Cell type:code id: tags:
``` bash
cat data_13.tsv
```
%%%% Output: stream
eid 1-0.0
1 1
2 555
3 444
4 2
5 300
6 444
7 2
8 2
%% Cell type:markdown id: tags:
Let's recode it to be more monotonic:
%% Cell type:code id: tags:
``` bash
funpack -q -ow -re 1 "300,444,555" "3,0.25,0.5" out.tsv data_13.tsv
cat out.tsv
```
%%%% Output: stream
eid 1-0.0
1 1.0
2 0.5
3 0.25
4 2.0
5 3.0
6 0.25
7 2.0
8 2.0
%% Cell type:markdown id: tags:
The `--recoding` option expects three arguments:
* The variable ID
* A comma-separated list of the values to be replaced
* A comma-separated list of the values to replace them with
%% Cell type:markdown id: tags:
## Child value replacement
Imagine that we have these two questions:
* **1**: *Do you currently smoke cigarettes?*
* **2**: *How many cigarettes did you smoke yesterday?*
Now, question 2 was only asked if the answer to question 1 was *"Yes"*. So for all individuals who answered *"No"* to question 1, we will have a missing value for question 2. But for some analyses, it would make more sense to have a value of 0, rather than NA, for these subjects.
`funpack` can handle these sorts of dependencies by way of *child value replacement*. For question 2, we can define a conditional variable expression such that when both question 2 is NA and question 1 is *"No"*, we can insert a value of 0 into question 2.
This scenario is demonstrated in this example data set (where, for question 1 values of `1` and `0` represent *"Yes"* and *"No"* respectively):
%% Cell type:code id: tags:
``` bash
cat data_14.tsv
```
%%%% Output: stream
eid 1-0.0 2-0.0
1 1 7
2 1 4
3 1 1
4 0
5 0
6 0
7 1 25
8 0
%% Cell type:markdown id: tags:
We can fill in the values for variable 2 by using the `--child_values` (`-cv` for short) option:
%% Cell type:code id: tags:
``` bash
funpack -q -ow -cv 2 "v1 == 0" "0" out.tsv data_14.tsv
cat out.tsv
```
%%%% Output: stream
eid 1-0.0 2-0.0
1 1 7.0
2 1 4.0
3 1 1.0
4 0 0.0
5 0 0.0
6 0 0.0
7 1 25.0
8 0 0.0
%% Cell type:markdown id: tags:
> The `--child_values` option expects three arguments:
> * The variable ID
> * An expression evaluating some condition on the parent variable(s)
> * A value to replace NA with where the expression evaluates to true.
%% Cell type:markdown id: tags:
# Processing examples
After every column has been cleaned, the entire data set undergoes a series of processing steps. The processing stage may result in columns being removed or manipulated, or new columns being added.
The processing stage can be controlled with these options:
* `--prepend_process` (`-ppr` for short): Apply a processing function before the built-in processing
* `--append_process` (`-apr` for short): Apply a processing function after the built-in processing
A default set of processing steps are applied when you apply the `fmrib` configuration profile by using `-cfg fmrib` - see the section on [built-in rules](#Built-in-rules).
The `--prepend_process` and `--append_process` options require two arguments:
* The variable ID(s) to apply the function to, or `all` to denote all variables.
* The processing function to apply. The available processing functions are listed in the command line help, or you can write your own and pass it in as a plugin file ([see below](#Custom-cleaning,-processing-and-loading----funpack-plugins)).
## Sparsity check
The `removeIfSparse` process will remove columns that are deemed to have too many missing values. If we take this data set:
%% Cell type:code id: tags:
``` bash
cat data_15.tsv
```
%%%% Output: stream
eid 1-0.0 2-0.0
1 7 24
2 2 37
3 4 14
4 6
5 77
6 7 10
7
8 3 13
9 62
10 74
%% Cell type:markdown id: tags:
Imagine that our analysis requires at least 8 values per variable to work. We can use the `minpres` option to`funpack` to drop any columns which do not meet this threshold:
%% Cell type:code id: tags:
``` bash
funpack -q -ow -apr all "removeIfSparse(minpres=8)" out.tsv data_15.tsv
cat out.tsv
```
%%%% Output: stream
eid 2-0.0
1 24.0
2 37.0
3 14.0
4
5 77.0
6 10.0
7
8 13.0
9 62.0
10 74.0
%% Cell type:markdown id: tags:
You can also specify `minpres` as a proportion, rather than an absolute number. e.g.:
%% Cell type:code id: tags:
``` bash
funpack -q -ow -apr all "removeIfSparse(minpres=0.65, abspres=False)" out.tsv data_15.tsv
cat out.tsv
```
%%%% Output: stream
eid 2-0.0
1 24.0
2 37.0
3 14.0
4
5 77.0
6 10.0
7
8 13.0
9 62.0
10 74.0
%% Cell type:markdown id: tags:
## Redundancy check
You may wish to remove columns which contain redundant information. The `removeIfRedundant` process calculates the pairwise correlation between all columns, and removes columns with a correlation above a threshold that you provide. Imagine that we have this data set:
%% Cell type:code id: tags:
``` bash
cat data_16.tsv
```
%%%% Output: stream
eid 1-0.0 2-0.0 3-0.0
1 1 10 1
2 2 20 6
3 3 30 3
4 4 40 7
5 5 50 2
6 6 60 3
7 5 50 2
8 4 40 9
9 3 30 8
10 2 20 5
%% Cell type:markdown id: tags:
The data in column `2-0.0` is effectively equivalent to the data in column `1-0.0`, so is not of any use to us. We can tell `funpack` to remove it like so:
%% Cell type:code id: tags:
``` bash
funpack -q -ow -apr all "removeIfRedundant(0.9)" out.tsv data_16.tsv
cat out.tsv
```
%%%% Output: stream
eid 1-0.0 3-0.0
1 1 1.0
2 2 6.0
3 3 3.0
4 4 7.0
5 5 2.0
6 6 3.0
7 5 2.0
8 4 9.0
9 3 8.0
10 2 5.0
%% Cell type:markdown id: tags:
The `removeIfRedundant` process can also calculate the correlation of the patterns of missing values between variables - Consider this example:
%% Cell type:code id: tags:
``` bash
cat data_17.tsv
```
%%%% Output: stream
eid 1-0.0 2-0.0 3-0.0
1 1 10 100
2 2 20 200
3 300
4 4 40 400
5 500
6 4 40
7 3 300
8 2 20 200
9 1 10 100
10 0
%% Cell type:markdown id: tags:
All three columns are highly correlated, but the pattern of missing values in column `3-0.0` is different to that of the other columns.
If we use the `nathres` option, `funpack` will only remove columns where the correlation of both present and missing values meet the thresholds. Note that the column which contains more missing values will be the one that gets removed:
%% Cell type:code id: tags:
``` bash
funpack -q -ow -apr all "removeIfRedundant(0.9, nathres=0.6)" out.tsv data_17.tsv
cat out.tsv
```
%%%% Output: stream
eid 1-0.0 3-0.0
1 1.0 100.0
2 2.0 200.0
3 300.0
4 4.0 400.0
5 500.0
6 4.0
7 3.0 300.0
8 2.0 200.0
9 1.0 100.0
10 0.0
%% Cell type:markdown id: tags:
## Categorical binarisation
The `binariseCategorical` process takes a column containing categorical labels, and replaces it with
a set of new binary columns, one for each category. Imagine that we have this data:
%% Cell type:code id: tags:
``` bash
cat data_18.tsv
```
%%%% Output: stream
eid 1-0.0
1 1
2 2
3 3
4 2
5 2
6 3
7 1
8 4
9 1
10 3
%% Cell type:markdown id: tags:
We can use the `binariseCategorical` process to split column `1-0.0` into a separate column for each category:
%% Cell type:code id: tags:
``` bash
funpack -q -ow -apr 1 "binariseCategorical" out.tsv data_18.tsv
cat out.tsv
```
%%%% Output: stream
eid 1-0.1 1-0.2 1-0.3 1-0.4
1 1 0 0 0
2 0 1 0 0
3 0 0 1 0
4 0 1 0 0
5 0 1 0 0
6 0 0 1 0
7 1 0 0 0
8 0 0 0 1
9 1 0 0 0
10 0 0 1 0
%% Cell type:markdown id: tags:
There are a few options to `binariseCategorical`, including controlling whether the original column is removed, and also the naming of the newly created columns:
%% Cell type:code id: tags:
``` bash
funpack -q -ow -apr 1 "binariseCategorical(replace=False, nameFormat='{vid}:{value}')" out.tsv data_18.tsv
cat out.tsv
```
%%%% Output: stream
eid 1-0.0 1:1 1:2 1:3 1:4
1 1 1 0 0 0
2 2 0 1 0 0
3 3 0 0 1 0
4 2 0 1 0 0
5 2 0 1 0 0
6 3 0 0 1 0
7 1 1 0 0 0
8 4 0 0 0 1
9 1 1 0 0 0
10 3 0 0 1 0
%% Cell type:markdown id: tags:
# Custom cleaning, processing and loading - `funpack` plugins
If you want to apply some specific cleaning or processing function to a variable, you can code your functions up in python, and then tell `funpack` to apply them.
As an example, let's say we have some data like this:
%% Cell type:code id: tags:
``` bash
cat data_19.tsv
```
%%%% Output: stream
eid 1-0.0 2-0.0 3-0.0
1 28 65 18
2 12 71 63
3 17 60 95
4 36 80 38
5 91 55 36
6 4 97 71
7 20 3 88
8 58 64 65
9 87 27 26
10 36 17 22
%% Cell type:markdown id: tags:
## Custom cleaning functions
But for our analysis, we are only interested in the even values for columns 1 and 2. Let's write a cleaning function which replaces all odd values with NA:
%% Cell type:code id: tags:
``` bash
cat plugin_1.py | pygmentize
```
%%%% Output: stream
#!/usr/bin/env python
import numpy as np
from funpack import cleaner
# Cleaner functions are passed:
#
# - dtable: An object which provides access to the data set.
# - vid: The variable ID of the column(s) to be cleaned.
#
# Cleaner functions should modify the data in-place.
@cleaner()
def drop_odd_values(dtable, vid):
# Remember that a variable may be
# associated with multiple columns
cols = dtable.columns(vid)
# the columns() method returns a list of
# Column objects, each of which contains
# information about one column in the data.
for col in cols:
col = col.name
mask = dtable[:, col] % 2 != 0
dtable[mask, col] = np.nan
%% Cell type:markdown id: tags:
To use our custom cleaner function, we simply pass our plugin file to `funpack` using the `--plugin_file` (`-p` for short) option:
%% Cell type:code id: tags:
``` bash
funpack -q -ow -p plugin_1.py -cl 1 drop_odd_values -cl 2 drop_odd_values out.tsv data_19.tsv
cat out.tsv
```
%%%% Output: stream
eid 1-0.0 2-0.0 3-0.0
1 28.0 18.0
2 12.0 63.0
3 60.0 95.0
4 36.0 80.0 38.0
5 36.0
6 4.0 71.0
7 20.0 88.0
8 58.0 64.0 65.0
9 26.0
10 36.0 22.0
%% Cell type:markdown id: tags:
## Custom processing functions
Recall that **cleaning** functions are applied independently to each column, whereas **processing** functions may be applied to multiple columns simultaneously, and may add and/or remove columns. Let's say we want to derive a new column from columns `1-0.0` and `2-0.0` in our example data set. Our plugin file might look like this:
%% Cell type:code id: tags:
``` bash
cat plugin_2.py | pygmentize
```
%%%% Output: stream
#!/usr/bin/env python
import itertools as it
from funpack import processor
# Processor functions are passed:
#
# - dtable: An object which provides access to the data set.
# - vid: The variable ID of the column(s) to be cleaned.
#
# Processor functions can do any of the following:
#
# - modify existing columns in place,
# - return a list of columns that should be removed
# - return a list of columns that should be added
@processor()
def sum_squares(dtable, vids):
cols = it.chain(*[dtable.columns(v) for v in vids])
series = [dtable[:, c.name] for c in cols]
squares = [s * s for s in series]
sumsq = sum(squares)
sumsq.name = 'sum_square({})'.format(','.join([str(v) for v in vids]))
# The value returned by a processor function differs
# depending on what it wishes to do. In this case,
# we are returning a list of new pandas.Series to be
# added as columns, and a list of integer variable
# IDs, one for each new column. The variable IDs are
# optional, so we are just returning None instead.
return [sumsq], None
%% Cell type:markdown id: tags:
Again, to use our plugin, we pass it to `funpack` via the `--plugin`/`-p` option:
%% Cell type:code id: tags:
``` bash
funpack -q -ow -p plugin_2.py -apr "1,2" "sum_squares" out.tsv data_19.tsv
cat out.tsv
```
%%%% Output: stream
eid 1-0.0 2-0.0 3-0.0 sum_square(1,2)
1 28 65 18.0 5009
2 12 71 63.0 5185
3 17 60 95.0 3889
4 36 80 38.0 7696
5 91 55 36.0 11306
6 4 97 71.0 9425
7 20 3 88.0 409
8 58 64 65.0 7460
9 87 27 26.0 8298
10 36 17 22.0 1585
%% Cell type:markdown id: tags:
## Custom file loaders
You might want to load some auxillary data which is in an awkward format that cannot be automatically parsed by `funpack`. For example, you may have a file which has acquisition date information separated into *year*, *month* and *day* columns, e.g.:
%% Cell type:code id: tags:
``` bash
cat data_20.tsv
```
%%%% Output: stream
eid year month day
1 2018 6 1
2 2018 6 17
3 2018 7 5
4 2018 3 11
5 2018 2 21
6 2018 8 8
7 2018 11 30
8 2018 12 5
9 2018 4 13
%% Cell type:markdown id: tags:
These three columns would be better loaded as a single column. So we can write a plugin to load this file for us. We need to write two functions:
* A "sniffer" function, which returns information about the columns contained in the file
* A "loader" function which loads the file, returning it as a `pandas.DataFrame`.
%% Cell type:code id: tags:
``` bash
cat plugin_3.py | pygmentize
```
%%%% Output: stream
#!/usr/bin/env python
import pandas as pd
from funpack import sniffer, loader, Column
# Sniffer and loader functions are defined in
# pairs. A pair is denoted by its functions
# being given the same label, passed to the
# @sniffer/@loader decorators
# ("my_datefile_loader" in this example). The
# function names are irrelevant.
@sniffer('my_datefile_loader')
def columns_datefile(infile):
# A sniifer function must return a
# sequence of Column objects which
# describe the file (after it has
# been loaded by the loader function).
#
# The values passed to a Column object
# are:
# - file name
# - column name
# - column index (starting from 0)
return [Column(infile, 'eid', 0),
Column(infile, 'acquisition_date', 1)]
@loader('my_datefile_loader')
def load_datefile(infile):
def create_date(row):
return pd.Timestamp(row['year'], row['month'], row['day'])
df = pd.read_csv(infile, index_col=0, delim_whitespace=True)
df['acquisition_date'] = df.apply(create_date, axis=1)
df.drop(['year', 'month', 'day'], axis=1, inplace=True)
return df
%% Cell type:markdown id: tags:
And to see it in action:
%% Cell type:code id: tags:
``` bash
funpack -q -ow -p plugin_3.py -l data_20.tsv my_datefile_loader out.tsv data_20.tsv
cat out.tsv
```
%%%% Output: stream
eid acquisition_date
1 2018-06-01T00:00:00
2 2018-06-17T00:00:00
3 2018-07-05T00:00:00
4 2018-03-11T00:00:00
5 2018-02-21T00:00:00
6 2018-08-08T00:00:00
7 2018-11-30T00:00:00
8 2018-12-05T00:00:00
9 2018-04-13T00:00:00
%% Cell type:markdown id: tags:
# Miscellaneous topics
## Non-numeric data
Many UK Biobank variables contain non-numeric data, such as alpha-numeric codes and unstructured text. If you want to handle this data separately from the numeric data, you can use the `--non_numeric_file` option (`-nn` for short) to save all non-numeric columns to a separate file. Here is an example of a file containing both numeric and non-numeric data:
%% Cell type:code id: tags:
``` bash
cat data_21.tsv | column -t -s $'\t'
```
%%%% Output: stream
eid 1-0.0 2-0.0 3-0.0 4-0.0
1 A481 54 red green blue 0.6
2 A590 24 yellow aqua 3.2
3 B391 17 brown black purple 1.4
4 D596 77 green 7.3
5 Z980 26 white red 5.1
%% Cell type:markdown id: tags:
We can save the numeric and non-numeric columns to separate files as follows:
> Note the use of the `-nb` (`--no_builtins`) option here - this tells `funpack` to ignore its built-in variable table, which contains information about the type of each variable, and would otherwise interfere with this example.
%% Cell type:code id: tags:
``` bash
funpack -nb -q -ow -nn non_numerics.tsv numerics.tsv data_21.tsv
cat numerics.tsv
cat non_numerics.tsv
```
%%%% Output: stream
eid 2-0.0 4-0.0
1 54 0.6
2 24 3.2
3 17 1.4
4 77 7.3
5 26 5.1
eid 1-0.0 3-0.0
1 A481 red green blue
2 A590 yellow aqua
3 B391 brown black purple
4 D596 green
5 Z980 white red
%% Cell type:markdown id: tags:
## Dry run
The `--dry_run` (`-d` for short) option allows you to see what `funpack` is going to do - it is useful to perform a dry run before running a large processing job, which could take a long time. For example, if we have a complicated configuration such as the following, we can use the `--dry_run` option to check that `funpack` is going to do what we expect:
%% Cell type:code id: tags:
``` bash
funpack \
-nb -q -ow -d \
-nv 1 "7,8,9" \
-re 2 "1,2,3" "100,200,300" \
-cv 3 "v4 != 20" "25" \
-cl 4 "makeNa('< 50')" \
-apr all "removeIfSparse(minpres=0.5)" \
out.tsv data_01.tsv
```
%%%% Output: stream
funpack 1.4.0 dry run
Input data
Loaded columns: 11
Ignored columns: 0
Unknown columns: 10
Unprocessed/uncategorised columns: 0
Loaded variables: 11
Cleaning
NA Insertion: True
1: [7. 8. 9.]
Cleaning functions: True
4: [makeNa[cleaner](< 50)]
Child value replacement: True
3: [v4 != 20] -> [25.]
Categorical recoding: True
2: [1. 2. 3.] -> [100. 200. 300.]
Processing: True
1: ('all', []) -> [removeIfSparse[processor](minpres=0.5)]
%% Cell type:markdown id: tags:
## Built-in rules
`funpack` has a large number of hand-crafted rules built in, which are specific to variables found in the UK BioBank data set. These rules are part of the ``fmrib`` configuration, which can be used by adding `-cfg fmrib` to the command-line options.
We can use the `--dry_run` (`-d` for short) option, along with some dummy data files which just contain the UK BioBank column names, to get a summary of these rules:
%% Cell type:code id: tags:
``` bash
funpack -q -ow -cfg fmrib -d out.tsv ukb_dataset_1_only_column_names.tsv ukb_dataset_2_only_column_names.tsv
```
%%%% Output: stream
funpack 1.4.0 dry run
Input data
Loaded columns: 13282
Ignored columns: 908
Unknown columns: 0
Unprocessed/uncategorised columns: 0
Loaded variables: 1986
Cleaning
NA Insertion: True
19: [3. 6. 7.]
21: [-1. 4.]
84: [-1. -3.]
87: [-1. -3.]
92: [-1. -3.]
400: [0.]
670: [-3.]
680: [-3.]
699: [-1. -3.]
709: [-1. -3.]
728: [-1. -3.]
738: [-1. -3.]
757: [-1. -3.]
767: [-1. -3.]
777: [-1. -3.]
796: [-1. -3.]
806: [-1. -3.]
816: [-1. -3.]
826: [-1. -3.]
845: [-1. -2. -3.]
864: [-1. -2. -3.]
874: [-1. -3.]
884: [-1. -3.]
894: [-1. -3.]
904: [-1. -3.]
914: [-1. -3.]
924: [-3.]
943: [-1. -3.]
971: [-1. -3.]
981: [-1. -3.]
991: [-1. -3.]
1001: [-1. -3.]
1011: [-1. -3.]
1021: [-1. -3.]
1031: [-1. -3.]
1050: [-1. -3.]
1060: [-1. -3.]
1070: [-1. -3.]
1080: [-1. -3.]
1090: [-1. -3.]
1100: [-1. -3. 5.]
1110: [-1. -3.]
1120: [-1. -3.]
1130: [-1. -3.]
1140: [-1. -3. 3.]
1150: [-1. -3.]
1160: [-1. -3.]
1170: [-1. -3.]
1180: [-1. -3.]
1190: [-3.]
1200: [-3.]
1210: [-1. -3.]
1220: [-1. -3.]
1239: [-3.]
1249: [-3.]
1259: [-3.]
1269: [-1. -3.]
1279: [-1. -3.]
1289: [-1. -3.]
1299: [-1. -3.]
1309: [-1. -3.]
1319: [-1. -3.]
1329: [-1. -3.]
1339: [-1. -3.]
1349: [-1. -3.]
1359: [-1. -3.]
1369: [-1. -3.]
1379: [-1. -3.]
1389: [-1. -3.]
1408: [-1. -3.]
1418: [-1. -3.]
1428: [-1. -3.]
1438: [-1. -3.]
1448: [-1. -3.]
1458: [-1. -3.]
1468: [-1. -3.]
1478: [-3.]
1488: [-1. -3.]
1498: [-1. -3.]
1508: [-1. -3.]
1518: [-3.]
1528: [-1. -3.]
1538: [-3.]
1548: [-1. -3.]
1558: [-3.]
1568: [-1. -3.]
1578: [-1. -3.]
1588: [-1. -3.]
1598: [-1. -3.]
1608: [-1. -3.]
1618: [-1. -3.]
1628: [-1. -3.]
1647: [-1. -3.]
1677: [-1. -3.]
1687: [-1. -3.]
1697: [-1. -3.]
1707: [-3.]
1717: [-1. -3.]
1727: [-1. -3.]
1737: [-1. -3.]
1747: [-1. -3.]
1757: [-1. -3.]
1767: [-1. -3.]
1777: [-1. -3.]
1787: [-1. -3.]
1797: [-1. -3.]
1807: [-1. -3.]
1835: [-1. -3.]
1845: [-1. -3.]
1873: [-1. -3.]
1883: [-1. -3.]
1920: [-1. -3.]
1930: [-1. -3.]
1940: [-1. -3.]
1950: [-1. -3.]
1960: [-1. -3.]
1970: [-1. -3.]
1980: [-1. -3.]
1990: [-1. -3.]
2000: [-1. -3.]
2010: [-1. -3.]
2020: [-1. -3.]
2030: [-1. -3.]
2040: [-1. -3.]
2050: [-1. -3.]
2060: [-1. -3.]
2070: [-1. -3.]
2080: [-1. -3.]
2090: [-1. -3.]
2100: [-1. -3.]
2110: [-1. -3.]
2139: [-1. -3.]
2149: [-1. -3.]
2159: [-3.]
2178: [-1. -3.]
2188: [-1. -3.]
2207: [-3.]
2217: [-1. -3.]
2227: [-3.]
2237: [-3.]
2247: [-1. -3.]
2257: [-1. -3.]
2267: [-1. -3.]
2277: [-1. -3.]
2296: [-3.]
2306: [-1. -3.]
2316: [-1. -3.]
2335: [-1. -3.]
2345: [-1. -3.]
2355: [-1. -3.]
2365: [-1. -3.]
2375: [-1. -3.]
2385: [-1. -3.]
2395: [-1. -3.]
2405: [-1. -3.]
2415: [-1. -3.]
2443: [-1. -3.]
2453: [-1. -3.]
2463: [-1. -3.]
2473: [-1. -3.]
2492: [-1. -3.]
2624: [-1. -3.]
2634: [-1. -3.]
2644: [-1. -3.]
2654: [-1. -3.]
2664: [-1. -3.]
2674: [-1. -3.]
2684: [-1. -3.]
2694: [-1. -3.]
2704: [-1. -3.]
2714: [-1. -3.]
2724: [-3.]
2734: [-3.]
2744: [-1. -3.]
2754: [-3. -4.]
2764: [-3. -4.]
2774: [-1. -3.]
2784: [-1. -3.]
2794: [-1. -3.]
2804: [-1. -3.]
2814: [-1. -3.]
2824: [-1. -3.]
2834: [-3. -5.]
2844: [-1. -3.]
2867: [-1. -3.]
2877: [-3.]
2887: [-1.]
2897: [-1. -3.]
2907: [-1. -3.]
2926: [-1. -3.]
2936: [-1. -3.]
2946: [-1. -3.]
2956: [-1. -3.]
2966: [-1. -3.]
2976: [-1. -3.]
2986: [-1. -3.]
3005: [-1. -3.]
3393: [-3.]
3404: [-1. -3.]
3414: [-1. -3.]
3426: [-1. -3.]
3436: [-1. -3.]
3446: [-3.]
3456: [-1. -3.]
3466: [-1. -3.]
3476: [-3.]
3486: [-3.]
3496: [-3.]
3506: [-3.]
3526: [-1. -3.]
3536: [-1. -3.]
3546: [-1. -3.]
3571: [-1. -3.]
3581: [-1. -3.]
3591: [-3. -5.]
3606: [-3.]
3616: [-1. -3.]
3627: [-1. -3.]
3637: [-1. -3.]
3647: [-1. -3.]
3659: [-1. -3.]
3669: [-1. -3.]
3680: [-1. -3.]
3700: [-1. -3.]
3710: [-1. -3.]
3720: [-1. -3.]
3731: [-3.]
3741: [-1. -3.]
3751: [-3.]
3761: [-1. -3.]
3773: [-1. -3.]
3786: [-1. -3.]
3799: [-1. -3.]
3809: [-1. -3.]
3829: [-1. -3.]
3839: [-1. -3.]
3849: [-1. -3.]
3859: [-1. -3.]
3872: [-3. -4.]
3882: [-1. -3.]
3894: [-1. -3.]
3912: [-1. -3.]
3942: [-1. -3.]
3972: [-1. -3.]
3982: [-1. -3.]
3992: [-1. -3.]
4012: [-1. -3.]
4022: [-1. -3.]
4041: [-1. -3.]
4056: [-1. -3.]
4067: [-1. -3.]
4092: [3. 6. 7.]
4095: [3. 6. 7.]
4407: [-1. -3.]
4418: [-1. -3.]
4429: [-1. -3.]
4440: [-1. -3.]
4451: [-1. -3.]
4462: [-1. -3.]
4501: [-1. -3.]
4526: [-1. -3.]
4537: [-1. -3.]
4548: [-1. -3.]
4559: [-1. -3.]
4570: [-1. -3.]
4581: [-1. -3.]
4598: [-1. -3.]
4609: [-1. -3.]
4620: [-1. -3.]
4631: [-1. -3.]
4642: [-1. -3.]
4653: [-1. -3.]
4674: [-1. -3.]
4689: [-1. -3.]
4700: [-1. -3.]
4717: [-1. -3.]
4728: [-1. -3.]
4792: [-3.]
4803: [-1. -3.]
4814: [-1. -3.]
4825: [-1. -3.]
4836: [-1. -3.]
4935: [-1. -3.]
4957: [-1. -3.]
4968: [-1. -3.]
4979: [-1. -3.]
4990: [-1. -3.]
5001: [-1. -3.]
5012: [-1. -3.]
5057: [-1. -3.]
5364: [-1. -3.]
5375: [-1. -3.]
5386: [-1. -3.]
5430: [-1. -3.]
5452: [-1. -3.]
5463: [-1. -3.]
5474: [-1. -3.]
5485: [-1. -3.]
5496: [-1. -3.]
5507: [-1. -3.]
5518: [-1. -3.]
5529: [-1. -3.]
5540: [-1. -3.]
5556: [-1. -3.]
5663: [-1. -3.]
5674: [-1. -3.]
5699: [-1. -3.]
5779: [-1. -3.]
5790: [-1. -3.]
5866: [-1. -3.]
5901: [-1. -3.]
5923: [-1. -3.]
5945: [-1. -3.]
5959: [-3.]
6019: [6. 7.]
6070: [6. 7.]
6072: [6. 7.]
6138: [-7. -3.]
6139: [-1. -3.]
6140: [-1. -3.]
6141: [-3.]
6142: [-3.]
6143: [-3.]
6144: [-3.]
6145: [-3.]
6146: [-1. -3.]
6147: [-1. -3.]
6148: [-1. -3.]
6149: [-3.]
6150: [-3.]
6151: [-1. -3.]
6152: [-3.]
6153: [-1. -3.]
6154: [-1. -3.]
6155: [-3.]
6157: [-1. -3.]
6158: [-1. -3.]
6159: [-3.]
6160: [-3.]
6162: [-3.]
6164: [-3.]
6177: [-1. -3.]
6179: [-1. -3.]
6183: [-1.]
6194: [-1. -3.]
10004: [-1. -3.]
10005: [-1. -3.]
10006: [-3.]
10007: [-3.]
10016: [-1. -3.]
10105: [-3.]
10114: [-3.]
10115: [-1. -3.]
10138: [0.]
10721: [-1. -3.]
10722: [-3.]
10723: [-3.]
10740: [-3.]
10749: [-1. -3.]
10767: [-1. -3.]
10776: [-1. -3.]
10793: [-1. -3.]
10818: [-1. -3.]
10827: [-1.]
10854: [-3.]
10855: [-3.]
10860: [-1. -3.]
10877: [-1. -3.]
10886: [-1. -3.]
10895: [-3.]
10912: [-3.]
10953: [-1. -3.]
10962: [-1. -3.]
10971: [-1. -3.]
12706: [ 1. 99.]
20006: [-1. -3.]
20007: [-1. -3.]
20008: [-1. -3.]
20009: [-1. -3.]
20010: [-1. -3.]
20011: [-1. -3.]
20012: [-1. -3.]
20013: [-1. -3.]
20014: [-1. -3.]
20107: [-11. -13. -21. -23.]
20110: [-11. -13. -21. -23.]
20111: [-11. -13. -21. -23.]
20112: [-11. -13. -21. -23.]
20113: [-11. -13. -21. -23.]
20114: [-11. -13. -21. -23.]
20116: [-3.]
20117: [-3.]
20119: [-3.]
20133: [-1.]
20149: [-1.]
20155: [-1.]
21000: [-1. -3.]
22052: [-1.]
22499: [-818.]
22501: [-1. -2. -3.]
22506: [-818.]
22508: [-818.]
22603: [-313.]
22606: [-121.]
22607: [-121.]
22608: [-121.]
22609: [-121.]
22610: [-121.]
22611: [-121.]
22612: [-121.]
22613: [-121.]
22614: [-121.]
22615: [-121.]
22660: [-121. -818.]
22664: [-313.]
100370: [111.]
100380: [111.]
100490: [111.]
100500: [111.]
100900: [111.]
100910: [111.]
Cleaning functions: True
3066: [parseSpirometryData[cleaner]()]
10697: [parseSpirometryData[cleaner]()]
40001: [codeToNumeric[cleaner](icd10)]
40002: [codeToNumeric[cleaner](icd10)]
40006: [codeToNumeric[cleaner](icd10)]
40013: [codeToNumeric[cleaner](icd9)]
41201: [codeToNumeric[cleaner](icd10)]
41202: [codeToNumeric[cleaner](icd10)]