Commit 1b279984 authored by Paul McCarthy's avatar Paul McCarthy 🚵
Browse files

Merge branch 'rf/sparse_parallel' into 'master'

Rf/sparse parallel

Closes #15

See merge request !64
parents d7c61d77 51daca01
......@@ -6,6 +6,15 @@ FUNPACK changelog
----------------------------
Changed
^^^^^^^
* The :func:`.removeIfSparse` processing function can now parallelise the
check across columns, rather than relying on the :mod:`.processing` module
to parallelise calls across variables (!64).
Fixed
^^^^^
......
......@@ -6,7 +6,7 @@
#
__version__ = '2.3.1.dev0'
__version__ = '2.3.1'
"""The ``funpack`` versioning scheme roughly follows Semantic Versioning
conventions.
"""
......
......@@ -20,17 +20,17 @@ Variable Process
#
# Categorical columns will be dropped if one category comprises
# 99% of the data.
all_independent_except,6150,6155,20001,20002,20003,20004,20199,40001,40002,40006,40011,40012,40013,41200,41201,41202,41203,41204,41205,41210,41256,41258,41270,41271,41272,41273 removeIfSparse(minpres=51, maxcat=0.99, minstd=1e-6, abscat=False)
all_except,6150,6155,20001,20002,20003,20004,20199,40001,40002,40006,40011,40012,40013,41200,41201,41202,41203,41204,41205,41210,41256,41258,41270,41271,41272,41273 removeIfSparse(minpres=51, maxcat=0.99, minstd=1e-6, abscat=False)
# Binarised vars are subjected to a slightly adjusted sparsity
# check - we drop columns which don't have at least 10 diagnoses
# (or which have less than 10 non-diagnoses).
independent,6150,6155,20001,20002,20003,20004,20199,40001,40002,40006,40011,40012,40013,41200,41201,41204,41205,41210,41256,41258,41272,41273 removeIfSparse(mincat=10)
6150,6155,20001,20002,20003,20004,20199,40001,40002,40006,40011,40012,40013,41200,41201,41204,41205,41210,41256,41258,41272,41273 removeIfSparse(mincat=10)
# At this point, the main ICD vars will contain either
# a date, or nan (fillval=0, used above, is only applied at
# export), so a minpres test will suffice.
independent,41202,41203,41270,41271 removeIfSparse(minpres=10)
41202,41203,41270,41271 removeIfSparse(minpres=10)
# Drop columns which are correlated with other columns (the one
# with more missing values is dropped).
......
......@@ -61,6 +61,7 @@ The following processing functions are defined:
"""
import functools as ft
import itertools as it
import logging
import collections
......@@ -69,25 +70,29 @@ import numpy as np
import pandas as pd
import pandas.api.types as pdtypes
from typing import List, Optional, Any
from . import processing_functions_core as core
from . import util
from . import custom
from . import datatable
log = logging.getLogger(__name__)
@custom.processor()
def removeIfSparse(dtable,
vids,
minpres=None,
minstd=None,
mincat=None,
maxcat=None,
abspres=True,
abscat=True,
naval=None,
ignoreType=False):
def removeIfSparse(
dtable : datatable.DataTable,
vids : List[int],
minpres : Optional[float] = None,
minstd : Optional[float] = None,
mincat : Optional[float] = None,
maxcat : Optional[float] = None,
abspres : bool = True,
abscat : bool = True,
naval : Optional[Any] = None,
ignoreType : bool = False
) -> List[datatable.Column]:
"""removeIfSparse([minpres], [minstd], [mincat], [maxcat], [abspres], [abscat], [naval])
Removes columns deemed to be sparse.
......@@ -100,31 +105,42 @@ def removeIfSparse(dtable,
See the :func:`.isSparse` function for details on the other arguments.
""" # noqa
remove = []
cols = []
series = []
vtypes = []
for vid in vids:
if ignoreType: vtype = None
else: vtype = dtable.vartable.loc[vid, 'Type']
for col in dtable.columns(vid):
vcols = dtable.columns(vid)
cols .extend(vcols)
series.extend([dtable[:, c.name] for c in vcols])
vtypes.extend([vtype] * len(vcols))
log.debug('Checking column %s for sparsity', col.name)
log.debug('Checking %u columns for sparsity %s ...', len(series), vids[:5])
isSparse, test, val = core.isSparse(dtable[:, col.name],
vtype,
minpres=minpres,
minstd=minstd,
mincat=mincat,
maxcat=maxcat,
abspres=abspres,
abscat=abscat,
naval=naval)
func = ft.partial(core.isSparse,
minpres=minpres,
minstd=minstd,
mincat=mincat,
maxcat=maxcat,
abspres=abspres,
abscat=abscat,
naval=naval)
with dtable.pool() as pool:
results = pool.starmap(func, zip(series, vtypes))
remove = []
if isSparse:
log.debug('Dropping sparse column %s (%s: %f)',
col.name, test, val)
remove.append(col)
for col, (isSparse, reason, value) in zip(cols, results):
if isSparse:
log.debug('Dropping sparse column %s (%s: %f)',
col.name, reason, value)
remove.append(col)
if len(remove) > 0:
log.debug('Dropping %u sparse columns: %s ...',
......
......@@ -51,7 +51,7 @@ def isSparse(
) -> Tuple[bool, Union[str, None], Any]:
"""Returns ``True`` if the given data looks sparse, ``False`` otherwise.
Used by :func:`removeIfSparse`.
Used by :func:`.removeIfSparse`.
The check is based on the following criteria:
......
%% Cell type:markdown id: tags:
![win logo](win.png)
# `funpack` (https://git.fmrib.ox.ac.uk/fsl/funpack)
> Paul McCarthy <paul.mccarthy@ndcn.ox.ac.uk>
> ([WIN@FMRIB](https://www.win.ox.ac.uk/))
`funpack` is a command-line program which you can use to extract data from UK
BioBank (and other tabular) data.
You can give `funpack` one or more input files (e.g. `.csv`, `.tsv`), and it
will merge them together, perform some preprocessing, and produce a single
output file.
A large number of rules are built into `funpack` which are specific to the UK
BioBank data set. But you can control and customise everything that `funpack`
does to your data, including which rows and columns to extract, and which
cleaning/processing steps to perform on each column.
`funpack` comes installed with recent versions of
[FSL](https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/). You can also install `funpack`
via `conda`:
> ```
> conda install -c conda-forge fmrib-unpack
> ```
Or using `pip`:
> ```
> pip install fmrib-unpack
> ```
Get command-line help by typing:
> ```
> funpack -h
> ```
**Important** The examples in this notebook assume that you have installed `funpack`
2.3.1.dev0 or newer.
2.3.1 or newer.
%% Cell type:code id: tags:
``` bash
funpack -V
```
%% Cell type:markdown id: tags:
### Contents
1. [Overview](#Overview)
1. [Import](#1.-Import)
2. [Cleaning](#2.-Cleaning)
3. [Processing](#3.-Processing)
4. [Export](#4.-Export)
2. [Examples](#Examples)
3. [Import examples](#Import-examples)
1. [Selecting variables (columns)](#Selecting-variables-(columns))
1. [Selecting individual variables](#Selecting-individual-variables)
2. [Selecting variable ranges](#Selecting-variable-ranges)
3. [Selecting variables with a file](#Selecting-variables-with-a-file)
4. [Selecting variables from pre-defined categories](#Selecting-variables-from-pre-defined-categories)
2. [Selecting subjects (rows)](#Selecting-subjects-(rows))
1. [Selecting individual subjects](#Selecting-individual-subjects)
2. [Selecting subject ranges](#Selecting-subject-ranges)
3. [Selecting subjects from a file](#Selecting-subjects-from-a-file)
4. [Selecting subjects by variable value](#Selecting-subjects-by-variable-value)
5. [Excluding subjects](#Excluding-subjects)
3. [Selecting visits](#Selecting-visits)
1. [Evaluating expressions across visits](#Evaluating-expressions-across-visits)
4. [Merging multiple input files](#Merging-multiple-input-files)
1. [Merging by subject](#Merging-by-subject)
2. [Merging by column](#Merging-by-column)
3. [Naive merging](#Merging-by-column)
4. [Cleaning examples](#Cleaning-examples)
1. [NA insertion](#NA-insertion)
2. [Variable-specific cleaning functions](#Variable-specific-cleaning-functions)
3. [Categorical recoding](#Categorical-recoding)
4. [Child value replacement](#Child-value-replacement)
5. [Processing examples](#Processing-examples)
1. [Sparsity check](#Sparsity-check)
2. [Redundancy check](#Redundancy-check)
3. [Categorical binarisation](#Categorical-binarisation)
6. [Custom cleaning, processing and loading - funpack plugins](#Custom-cleaning,-processing-and-loading---funpack-plugins)
1. [Custom cleaning functions](#Custom-cleaning-functions)
2. [Custom processing functions](#Custom-processing-functions)
3. [Custom file loaders](#Custom-file-loaders)
7. [Miscellaneous topics](#Miscellaneous-topics)
1. [Non-numeric data](#Non-numeric-data)
2. [Dry run](#Dry-run)
3. [Built-in rules](#Built-in-rules)
4. [Using a configuration file](#Using-a-configuration-file)
5. [Working with unknown/uncategorised variables](#Working-with-unknown/uncategorised-variables)
# Overview
`funpack` performs the following steps:
## 1. Import
All data files are loaded in, unwanted columns and subjects are dropped, and
the data files are merged into a single table (a.k.a. data frame). Multiple
files can be merged according to an index column (e.g. subject ID). Or, if the
input files contain the same columns/subjects, they can be naively
concatenated along rows or columns.
## 2. Cleaning
The following cleaning steps are applied to each column:
1. **NA value replacement:** Specific values for some columns are replaced
with NA, for example, variables where a value of `-1` indicates *Do not
know*.
2. **Variable-specific cleaning functions:** Certain columns are
re-formatted; for example, the [ICD10](https://en.wikipedia.org/wiki/ICD-10)
disease codes can be converted to integer representations.
3. **Categorical recoding:** Certain categorical columns are re-coded.
4. **Child value replacement:** NA values within some columns which are
dependent upon other columns may have values inserted based on the values
of their parent columns.
## 3. Processing
During the processing stage, columns may be removed, merged, or expanded into
additional columns. For example, a categorical column may be expanded into a set
of binary columns, one for each category.
A column may also be removed on the basis of being too sparse, or being
redundant with respect to another column.
## 4. Export
The processed data can be saved as a `.csv`, `.tsv`, or `.hdf5` file.
# Examples
Throughout these examples, we are going to use a few command line
options, which you will probably **not** normally want to use:
- `-ow` (short for `--overwrite`): This tells `funpack` not to complain if
the output file already exists.
- `-q` (short for `--quiet`): This tells `funpack` to be quiet. Without the
`-q` option, `funpack` can be quite verbose, which can be annoying, but is
very useful when things go wrong. A good strategy is to tell `funpack` to
produce verbose output using the `--noisy` (`-n` for short) option, and to
send all of its output to a log file with the `--log_file` (or `-lf`)
option. For example:
> ```
> funpack -n -n -n -lf log.txt out.tsv in.tsv
> ```
Here's the first example input data set, with UK BioBank-style column names:
%% Cell type:code id: tags:
``` bash
cat data_01.tsv
```
%% Cell type:markdown id: tags:
The numbers in each column name typically represent:
1. The variable ID
2. The visit, for variables which were collected at multiple points in time.
3. The "instance", for multi-valued variables.
Note that one **variable** is typically associated with several **columns**,
although we're keeping things simple for this first example - there is only
one visit for each variable, and there are no mulit-valued variables.
> _Most but not all_ variables in the UK BioBank contain data collected at
> different visits, the times that the participants visited a UK BioBank
> assessment centre. However there are some variables (e.g. [ICD10 diagnosis
> codes](https://biobank.ctsu.ox.ac.uk/crystal/field.cgi?id=41202)) for which
> this is not the case.
# Import examples
## Selecting variables (columns)
You can specify which variables you want to load in the following ways, using
the `--variable` (`-v` for short), `--category` (`-c` for short) and
`--column` (`-co` for short) command line options:
* By variable ID
* By variable ranges
* By a text file which contains the IDs you want to keep.
* By pre-defined variable categories
* By column name
### Selecting individual variables
Simply provide the IDs of the variables you want to extract:
%% Cell type:code id: tags:
``` bash
funpack -q -ow -v 1 -v 5 out.tsv data_01.tsv
cat out.tsv
```
%% Cell type:markdown id: tags:
### Selecting variable ranges
The `--variable`/`-v` option accepts MATLAB-style ranges of the form
`start:step:stop` (where the `stop` is inclusive):
%% Cell type:code id: tags:
``` bash
funpack -q -ow -v 1:3:10 out.tsv data_01.tsv
cat out.tsv
```
%% Cell type:markdown id: tags:
### Selecting variables with a file
If your variables of interest are listed in a plain-text file, you can simply
pass that file:
%% Cell type:code id: tags:
``` bash
echo -e "1\n6\n9" > vars.txt
funpack -q -ow -v vars.txt out.tsv data_01.tsv
cat out.tsv
```
%% Cell type:markdown id: tags:
### Selecting variables from pre-defined categories
Some UK BioBank-specific categories are [built into
`funpack`](#Built-in-rules), but you can also define your own categories - you
just need to create a `.tsv` file, and pass it to `funpack` via the
`--category_file` (`-cf` for short):
%% Cell type:code id: tags:
``` bash
echo -e "ID\tCategory\tVariables" > custom_categories.tsv
echo -e "1\tCool variables\t1:5,7" >> custom_categories.tsv
echo -e "2\tUncool variables\t6,8:10" >> custom_categories.tsv
cat custom_categories.tsv
```
%% Cell type:markdown id: tags:
Use the `--category` (`-c` for short) to select categories to output. You can
refer to categories by their ID:
%% Cell type:code id: tags:
``` bash
funpack -q -ow -cf custom_categories.tsv -c 1 out.tsv data_01.tsv
cat out.tsv
```
%% Cell type:markdown id: tags:
Or by name:
%% Cell type:code id: tags:
``` bash
funpack -q -ow -cf custom_categories.tsv -c uncool out.tsv data_01.tsv
cat out.tsv
```
%% Cell type:markdown id: tags:
### Selecting column names
If you are working with data that has non-UK BioBank style column names, you
can use the `--column` (`-co` for short) to select individual columns by their
name, rather than the variable with which they are associated. The `--column`
option accepts full column names, and also shell-style wildcard patterns:
%% Cell type:code id: tags:
``` bash
funpack -q -ow -co 4-0.0 -co "??-0.0" out.tsv data_01.tsv
cat out.tsv
```
%% Cell type:markdown id: tags:
## Selecting subjects (rows)
`funpack` assumes that the first column in every input file is a subject
ID. You can specify which subjects you want to load via the `--subject` (`-s`
for short) option. You can specify subjects in the same way that you specified
variables above, and also:
* By specifying a conditional expression on variable values - only subjects
for which the expression evaluates to true will be imported
* By specifying subjects to exclude
### Selecting individual subjects
%% Cell type:code id: tags:
``` bash
funpack -q -ow -s 1 -s 3 -s 5 out.tsv data_01.tsv
cat out.tsv
```
%% Cell type:markdown id: tags:
### Selecting subject ranges
%% Cell type:code id: tags:
``` bash
funpack -q -ow -s 2:2:10 out.tsv data_01.tsv
cat out.tsv
```
%% Cell type:markdown id: tags:
### Selecting subjects from a file
%% Cell type:code id: tags:
``` bash
echo -e "5\n6\n7\n8\n9\n10" > subjects.txt
funpack -q -ow -s subjects.txt out.tsv data_01.tsv
cat out.tsv
```
%% Cell type:markdown id: tags:
### Selecting subjects by variable value
The `--subject` option accepts *variable expressions* - you can write an
expression performing numerical comparisons on variables (denoted with a
leading `v`) and combine these expressions using boolean algebra. Only
subjects for which the expression evaluates to true will be imported. For
example, to only import subjects where variable 1 is greater than 10, and
variable 2 is less than 70, you can type:
%% Cell type:code id: tags:
``` bash
funpack -q -ow -sp -s "v1 > 10 && v2 < 70" out.tsv data_01.tsv
cat out.tsv
```
%% Cell type:markdown id: tags:
The following symbols can be used in variable expressions:
| Symbol | Meaning |
|---------------------------|---------------------------------|
| `==` | equal to |
| `!=` | not equal to |
| `>` | greater than |
| `>=` | greater than or equal to |
| `<` | less than |
| `<=` | less than or equal to |
| `na` | N/A |
| `&&` | logical and |
| <code>&#x7c;&#x7c;</code> | logical or |
| `~` | logical not |
| `contains` | Contains sub-string |
| `all` | all columns must meet condition |
| `any` | any column must meet condition |
| `()` | to denote precedence |
Non-numeric (i.e. string) variables can be used in these expressions in
conjunction with the `==`, `!=`, and `contains` operators. An example of such
an expression is given in the section on [non-numeric
data](#Non-numeric-data), below.
The `all` and `any` symbols allow you to control how an expression is
evaluated across multiple columns which are associated with one variable
(e.g. separate columns for each visit). We will give an example of this in the
section on [selecting visits](#Selecting-visits), below.
### Excluding subjects
The `--exclude` (`-ex` for short) option allows you to exclude subjects - it
accepts individual IDs, an ID range, or a file containing IDs. The
`--exclude`/`-ex` option takes precedence over the `--subject`/`-s` option:
%% Cell type:code id: tags:
``` bash
funpack -q -ow -s 1:8 -ex 5:10 out.tsv data_01.tsv
cat out.tsv
```
%% Cell type:markdown id: tags:
## Selecting visits
Many variables in the UK BioBank data contain observations at multiple points in
time, or visits. `funpack` allows you to specify which visits you are interested