Commit 975ea232 authored by William Clarke's avatar William Clarke
Browse files

Merge branch 'broken_svs_file' into 'master'

Broken svs file

See merge request !3
parents d7c9fa73 93139cb2
mapVBVD/_version.py export-subst
*.dat filter=lfs diff=lfs merge=lfs -text
......@@ -5,3 +5,6 @@ build
dist
*.egg-info
__pycache__
.DS_Store
.idea/*
This diff is collapsed.
......@@ -9,11 +9,12 @@ Python port of the Matlab mapVBVD tool for reading Siemens raw data 'twix' (.dat
I have attempted to replicate the syntax of the original matlab code, but there are a few differences due to differing variable types.
This package contains a demo Jupyter notebook 'Demo.ipynb' which can be run on the two bits of demo data found in tests/test_data. Both of these data are unsuppressed water SVS MRS, one from a 7T VB scanner and the other from a VE Prisma.
This package contains a demo Jupyter notebook 'Demo.ipynb' which can be run on the demo data found in tests/test_data. There is unsuppressed water SVS MRS, from both a 7T VB scanner and a VE Prisma. There is also imaging data (3D GRE and EPI) from the [ISMRMRD test dataset](https://doi.org/10.5281/zenodo.33166).
Run using the following:
```
twixObj = mapVBVD(filename)
import mapvbvd
twixObj = mapvbvd.mapVBVD(filename)
```
For multi raid files (VD+) twixObj is a list, for single files it is a AttrDict containing data with keys relating to the MDH flags and a header object. The MDH flags present can be retrieved as a list using `twixObj.MDH_flags()`, whilst the header is found at `twixObj.hdr`.
......@@ -49,7 +50,18 @@ key_value = twixObj.search_header_for_val('MeasYaps',('sTXSPEC', 'asNucleusInfo'
`search_header_for_keys` takes the keyword argument regex (default True) to either search via case insensitive regular expressions or via exact matches only. Specify top_lvl to restrict to just some parameter sets.
## Other info
Much of the auxiliary parts of mapVBVD (e.g. line reflection, OS removal) is not yet implemented. Please feel free to contribute these parts! As this is a port the code is intended to resemble the original matlab code, it is not "good" python code!
Thanks to Mo Shahdloo the latest version now implements OS removal, ramp sample regridding, averaging and line reflection.
Set the appropriate flags to enable these features
```
twixObj.image.flagRemoveOS = True
twixObj.image.flagRampSampRegrid = True
twixObj.refscanPC.flagIgnoreSeg = True
twixObj.refscanPC.flagDoAverage = True
```
Some of the auxiliary parts of mapVBVD remain unimplemented. Please feel free to contribute these parts! As this is a port the code is intended to resemble the original matlab code, it is not "good" python code.
## Credit where credit is due
This code is a port of Philipp Ehses' original Matlab code. I am incredibly grateful to him for releasing this code to the MR community. There are a number of other names in the original code comments and log, these are: Felix Breuer, Wolf Blecher, Stephen Yutzy, Zhitao Li, Michael VÃlker, Jonas Bause and Chris Mirke.
......
This diff is collapsed.
from mapVBVD.mapVBVD import mapVBVD
from mapvbvd.mapVBVD import mapVBVD
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
......@@ -43,7 +43,7 @@ def get_config():
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = ""
cfg.versionfile_source = "mapVBVD/_version.py"
cfg.versionfile_source = "mapvbvd/_version.py"
cfg.verbose = False
return cfg
......
This diff is collapsed.
import re
import numpy as np
from scipy.integrate import cumtrapz
from itertools import chain
from attrdict import AttrDict
class twix_hdr(AttrDict):
def __init__(self,*args):
def __init__(self, *args):
super().__init__(*args)
def __str__(self):
keystr = '\n'.join([k for k in self.keys()])
des_str = ('***twix_hdr***\n'
f'Top level data structures: \n{keystr}\n')
return des_str
def __repr__(self):
return str(self)
@staticmethod
def search_using_tuple(s_terms,key,regex=True):
def search_using_tuple(s_terms, key, regex=True):
if regex:
def regex_tuple(pattern,key_tuple):
def regex_tuple(pattern, key_tuple):
inner_out = []
re_comp = re.compile(pattern,re.IGNORECASE)
re_comp = re.compile(pattern, re.IGNORECASE)
for k in key_tuple:
m = re_comp.search(k)
if m:
......@@ -24,12 +35,12 @@ class twix_hdr(AttrDict):
out = []
for st in s_terms:
out.append(regex_tuple(st,key))
return all(out)
out.append(regex_tuple(st, key))
return all(out)
else:
return all([st in key for st in s_terms])
def search_for_keys(self,search_terms,top_lvl=None,print_flag=True,regex=True):
def search_for_keys(self, search_terms, top_lvl=None, print_flag=True, regex=True):
"""Search header keys for terms.
Args:
......@@ -37,120 +48,158 @@ class twix_hdr(AttrDict):
recursive (optional): Search using regex or for exact strings.
top_lvl (optional) : Specify list of parameter sets to search (e.g. YAPS)
print_flag(optional): If False no output will be printed.
"""
"""
if top_lvl is None:
top_lvl = self.keys()
elif isinstance(top_lvl,str):
top_lvl = [top_lvl,]
elif isinstance(top_lvl, str):
top_lvl = [top_lvl, ]
out = {}
for key in top_lvl:
matching_keys = []
list_of_keys = self[key].keys()
for sub_key in list_of_keys:
if twix_hdr.search_using_tuple(search_terms,sub_key,regex=regex):
if twix_hdr.search_using_tuple(search_terms, sub_key, regex=regex):
matching_keys.append(sub_key)
if print_flag:
print(f'{key}:')
for mk in matching_keys:
print(f'\t{mk}: {self[key][mk]}')
out.update({key:matching_keys})
out.update({key: matching_keys})
return out
def parse_ascconv(buffer):
#print(buffer)
vararray = re.finditer(r'(?P<name>\S*)\s*=\s*(?P<value>\S*)\n',buffer)
#print(vararray)
mrprot ={}
for v in vararray:
# print(buffer)
vararray = re.finditer(r'(?P<name>\S*)\s*=\s*(?P<value>\S*)\n', buffer)
# print(vararray)
mrprot = AttrDict()
for v in vararray:
try:
value = float(v.group('value'))
except ValueError:
value = v.group('value')
#now split array name and index (if present)
vvarray = re.finditer(r'(?P<name>\w+)(\[(?P<ix>[0-9]+)\])?',v.group('name'))
# now split array name and index (if present)
vvarray = re.finditer(r'(?P<name>\w+)(\[(?P<ix>[0-9]+)\])?', v.group('name'))
currKey = []
for vv in vvarray:
#print(vv.group('name'))
# print(vv.group('name'))
currKey.append(vv.group('name'))
if vv.group('ix') is not None:
#print(vv.group('ix'))
# print(vv.group('ix'))
currKey.append(vv.group('ix'))
mrprot.update({tuple(currKey):value})
mrprot.update({tuple(currKey): value})
return mrprot
def parse_xprot(buffer):
xprot = {}
tokens = re.finditer(r'<Param(?:Bool|Long|String)\."(\w+)">\s*{([^}]*)',buffer)
tokensDouble = re.finditer(r'<ParamDouble\."(\w+)">\s*{\s*(<Precision>\s*[0-9]*)?\s*([^}]*)',buffer)
alltokens = chain(tokens,tokensDouble)
tokens = re.finditer(r'<Param(?:Bool|Long|String)\."(\w+)">\s*{([^}]*)', buffer)
tokensDouble = re.finditer(r'<ParamDouble\."(\w+)">\s*{\s*(<Precision>\s*[0-9]*)?\s*([^}]*)', buffer)
alltokens = chain(tokens, tokensDouble)
for t in alltokens:
#print(t.group(1))
#print(t.group(2))
# print(t.group(1))
# print(t.group(2))
name = t.group(1)
value = re.sub(r'("*)|( *<\w*> *[^\n]*)','',t.groups()[-1])
value = re.sub(r'[\t\n\r\f\v]*','',value.strip()) #value = re.sub(r'\s*',' ',value) for some bonkers reason this inserts whitespace between all the letters! Just look for other whitespace that \s usually does.
value = re.sub(r'("*)|( *<\w*> *[^\n]*)', '', t.groups()[-1])
value = re.sub(r'[\t\n\r\f\v]*', '',
value.strip()) # value = re.sub(r'\s*',' ',value) for some bonkers reason this inserts whitespace between all the letters! Just look for other whitespace that \s usually does.
try:
value = float(value)
except ValueError:
pass
xprot.update({name : value})
xprot.update({name: value})
return xprot
def parse_buffer(buffer):
reASCCONV = re.compile(r'### ASCCONV BEGIN[^\n]*\n(.*)\s### ASCCONV END ###',re.DOTALL)
#print(f'buffer = {buffer[0:10]}')
#import pdb; pdb.set_trace()
reASCCONV = re.compile(r'### ASCCONV BEGIN[^\n]*\n(.*)\s### ASCCONV END ###', re.DOTALL)
# print(f'buffer = {buffer[0:10]}')
# import pdb; pdb.set_trace()
ascconv = reASCCONV.search(buffer)
#print(f'ascconv = {ascconv}')
# print(f'ascconv = {ascconv}')
if ascconv is not None:
prot = parse_ascconv(ascconv.group(0))
prot = parse_ascconv(ascconv.group(0))
else:
prot = {}
prot = AttrDict()
xprot = reASCCONV.split(buffer)
#print(f'xprot = {xprot[0][0:10]}')
# print(f'xprot = {xprot[0][0:10]}')
if xprot is not None:
xprot = ''.join([found for found in xprot])
prot2 = parse_xprot(xprot)
prot.update(prot2)
return prot
def read_twix_hdr(fid):
#function to read raw data header information from siemens MRI scanners
#(currently VB and VD software versions are supported and tested).
def read_twix_hdr(fid, prot):
# function to read raw data header information from siemens MRI scanners
# (currently VB and VD software versions are supported and tested).
nbuffers = np.fromfile(fid, dtype=np.uint32, count=1)
prot = {}
for _ in range(nbuffers[0]):
tmpBuff = np.fromfile(fid, dtype=np.uint8, count=10)
bufname = ''.join([chr(item) for item in tmpBuff])
bufname = re.match(r'^\w*', bufname).group(0)
fid.seek(len(bufname)-9,1)
fid.seek(len(bufname) - 9, 1)
buflen = np.fromfile(fid, dtype=np.uint32, count=1)
tmpBuff = np.fromfile(fid, dtype=np.uint8, count=buflen[0])
buffer = ''.join([chr(item) for item in tmpBuff])
buffer = re.sub(r'\n\s*\n', '', buffer)
prot.update({bufname:parse_buffer(buffer)})
return twix_hdr(prot)
\ No newline at end of file
prot.update({bufname: parse_buffer(buffer)})
rstraj = None
# read gridding info
if 'alRegridMode' in prot.Meas:
regrid_mode = int(prot.Meas.alRegridMode.split(' ')[0])
if regrid_mode > 1:
ncol = int(prot.Meas.alRegridDestSamples.split(' ')[0])
dwelltime = float(prot.Meas.aflRegridADCDuration.split(' ')[0]) / ncol
gr_adc = np.zeros(ncol, dtype=np.single)
start = float(prot.Meas.alRegridDelaySamplesTime.split(' ')[0])
time_adc = start + dwelltime * (np.array(range(ncol)) + 0.5)
rampup_time = float(prot.Meas.alRegridRampupTime.split(' ')[0])
flattop_time = float(prot.Meas.alRegridFlattopTime.split(' ')[0])
rampdown_time = float(prot.Meas.alRegridRampdownTime.split(' ')[0])
ixUp = np.where(time_adc < rampup_time)[0]
ixFlat = np.setdiff1d(np.where(time_adc <= rampup_time + flattop_time)[0],
np.where(time_adc < rampup_time)[0])
ixDn = np.setdiff1d(np.setdiff1d(list(range(ncol)), ixFlat), ixUp)
gr_adc[ixFlat] = 1
if regrid_mode == 2:
# trapezoidal gradient
gr_adc[ixUp] = time_adc[ixUp] / float(prot.Meas.alRegridRampupTime.split(' ')[0])
gr_adc[ixDn] = 1 - (time_adc[ixDn] - rampup_time - flattop_time) / rampdown_time
elif regrid_mode == 4:
gr_adc[ixUp] = np.sin(np.pi / 2 * time_adc[ixUp] / rampup_time)
gr_adc[ixDn] = np.sin(np.pi / 2 * (1 + (time_adc[ixDn] - rampup_time - flattop_time) / rampdown_time))
else:
raise Exception('regridding mode unknown')
# make sure that gr_adc is always positive (rstraj needs to be strictly monotonic)
gr_adc = np.maximum(gr_adc, 1e-4)
rstraj = (np.append(0, cumtrapz(gr_adc)) - ncol / 2) / np.sum(gr_adc)
rstraj -= np.mean(rstraj[int(ncol / 2) - 1:int(ncol / 2) + 1])
# scale rstraj by kmax (only works if all slices have same FoV!!!)
# TODO: these are examples of the keys not arranged correctly
kmax = prot.MeasYaps[('sKSpace', 'lBaseResolution')] / prot.MeasYaps[
('sSliceArray', 'asSlice', '0', 'dReadoutFOV')]
rstraj *= kmax
return prot, rstraj
This diff is collapsed.
[versioneer]
VCS = git
style = pep440
versionfile_source = mapVBVD/_version.py
versionfile_build = mapVBVD/_version.py
versionfile_source = mapvbvd/_version.py
versionfile_build = mapvbvd/_version.py
tag_prefix =
parentdir_prefix =
\ No newline at end of file
"""
Created by shahdloo
22/09/2020
"""
\ No newline at end of file
This source diff could not be displayed because it is stored in LFS. You can view the blob instead.
This source diff could not be displayed because it is stored in LFS. You can view the blob instead.
This source diff could not be displayed because it is stored in LFS. You can view the blob instead.
"""
Created by shahdloo
22/09/2020
"""
import os.path as op
import numpy as np
from mapvbvd import mapVBVD
test_data_vb_broken = op.join(op.dirname(__file__), 'test_data', 'meas_MID111_sLaser_broken_FID4873.dat')
test_data_gre = op.join(op.dirname(__file__), 'test_data', 'meas_MID00255_FID12798_GRE_surf.dat')
test_data_epi = op.join(op.dirname(__file__), 'test_data', 'meas_MID00265_FID12808_FMRI.dat')
def test_flagRemoveOS():
twixObj = mapVBVD(test_data_gre, quiet=False)
twixObj[1].image.flagRemoveOS = False
assert np.allclose(twixObj[1].image.dataSize, [256, 16, 128, 1, 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
twixObj[1].image.flagRemoveOS = True
assert np.allclose(twixObj[1].image.dataSize, [128, 16, 128, 1, 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
# broken file
twixObj = mapVBVD(test_data_vb_broken, quiet=False)
twixObj.image.flagRemoveOS = False
assert np.allclose(twixObj.image.dataSize, [4096, 32, 1, 1, 1, 1, 1, 1, 1, 97, 1, 1, 1, 1, 1, 1])
twixObj.image.flagRemoveOS = True
assert np.allclose(twixObj.image.dataSize, [2048, 32, 1, 1, 1, 1, 1, 1, 1, 97, 1, 1, 1, 1, 1, 1])
def test_flagIgnoreSeg_flagDoAverage():
twixObj = mapVBVD(test_data_epi, quiet=False)
twixObj[1].refscanPC.flagIgnoreSeg = False
twixObj[1].refscanPC.flagDoAverage = False
assert np.allclose(twixObj[1].refscanPC.dataSize, [110, 16, 1, 1, 5, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1])
twixObj[1].refscanPC.flagDoAverage = True
assert np.allclose(twixObj[1].refscanPC.dataSize, [110, 16, 1, 1, 5, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1])
twixObj[1].refscanPC.flagIgnoreSeg = True
twixObj[1].refscanPC.flagDoAverage = False
assert np.allclose(twixObj[1].refscanPC.dataSize, [110, 16, 1, 1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
twixObj[1].refscanPC.flagDoAverage = True
assert np.allclose(twixObj[1].refscanPC.dataSize, [110, 16, 1, 1, 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
def test_flagSkipToFirstLine():
twixObj = mapVBVD(test_data_epi, quiet=False)
twixObj[1].refscan.flagSkipToFirstLine = False
assert np.allclose(twixObj[1].refscan.dataSize, [110, 16, 82, 1, 5, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1])
twixObj[1].refscan.flagSkipToFirstLine = True
assert np.allclose(twixObj[1].refscan.dataSize, [110, 16, 54, 1, 5, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1])
"""
Created by shahdloo
23/09/2020
"""
import os.path as op
import numpy as np
from mapvbvd import mapVBVD
import h5py
test_data_gre = op.join(op.dirname(__file__), 'test_data', 'meas_MID00255_FID12798_GRE_surf.dat')
test_data_gre_mat = op.join(op.dirname(__file__), 'test_data', 'meas_MID00255_FID12798_GRE_surf.mat')
test_data_epi = op.join(op.dirname(__file__), 'test_data', 'meas_MID00265_FID12808_FMRI.dat')
test_data_epi_mat = op.join(op.dirname(__file__), 'test_data', 'meas_MID00265_FID12808_FMRI.mat')
def test_gre():
twixObj = mapVBVD(test_data_gre, quiet=False)
twixObj[1].image.squeeze = True
twixObj[1].image.flagRemoveOS = False
img_py = twixObj[1].image[:, :, :, 0]
twixObj[1].image.flagRemoveOS = True
img_py_os = twixObj[1].image[:, :, :, 0]
with h5py.File(test_data_gre_mat, 'r') as f:
base = f['img'][0, 0, :, :, :]
img_mat = (base['real'] + 1j * base['imag']).transpose()
base = f['img_os'][0, 0, :, :, :]
img_mat_os = (base['real'] + 1j * base['imag']).transpose()
assert np.allclose(img_py, img_mat)
assert np.allclose(img_py_os, img_mat_os)
def test_epi():
twixObj = mapVBVD(test_data_epi, quiet=False)
twixObj[1].image.squeeze = True
twixObj[1].image.flagRampSampRegrid = False
twixObj[1].image.flagRemoveOS = False
img_py = twixObj[1].image[:, :, :, 0, 0, 0]
twixObj[1].image.flagRemoveOS = True
img_py_os = twixObj[1].image[:, :, :, 0, 0, 0]
twixObj[1].image.flagRampSampRegrid = True
img_py_os_rg = twixObj[1].image[:, :, :, 0, 0, 0]
with h5py.File(test_data_epi_mat, 'r') as f:
base = f['img'][0, 0, 0, 0, 0, 0, 0, 0, :, :, :]
img_mat = (base['real'] + 1j * base['imag']).transpose()
base = f['img_os'][0, 0, 0, 0, 0, 0, 0, 0, :, :, :]
img_mat_os = (base['real'] + 1j * base['imag']).transpose()
base = f['img_os_rg'][0, 0, 0, 0, 0, 0, 0, 0, :, :, :]
img_mat_os_rg = (base['real'] + 1j * base['imag']).transpose()
assert np.allclose(img_py, img_mat)
assert np.allclose(img_py_os, img_mat_os)
assert np.allclose(img_py_os_rg, img_mat_os_rg, atol=1e-6, rtol=1e-6)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment