Commit 8370ebaf authored by Sean Fitzgibbon's avatar Sean Fitzgibbon
Browse files

Merge branch 'new_chart_reg' into 'master'

Improved chart-to-slide registration

See merge request !2
parents 3f1720fd b896788f
......@@ -132,18 +132,24 @@ You can register a Neurolucida chart file to a 2D-slide using the `CHART` subcom
```shell
>> slider_app.py CHART -h
usage: slider_app.py CHART [-h] [--out <dir>] <chart> <slide> <slide-resolution>
usage: slider_app.py CHART [-h] [--outdir <dir>] [--boundary_key <key>] [--justify <left-right>]
[--config <config.yaml>]
<chart> <slide> <slide-resolution>
Register charting to 2D slide
positional arguments:
<chart> Neurolucida chart file
<slide> Fixed slide
<slide-resolution> Slide image resolution (mm)
<chart> Neurolucida chart file
<slide> Fixed slide
<slide-resolution> Slide image resolution (mm)
optional arguments:
-h, --help show this help message and exit
--out <dir> Output directory
-h, --help show this help message and exit
--outdir <dir> Output directory
--boundary_key <key> Name of boundary contour in chart
--justify <left-right> Justify chart bounding-box to the left/right of the image bounding box.
Useful when only one hemisphere is charted.
--config <config.yaml> configuration file
```
For example:
......
......@@ -14,28 +14,57 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
from operator import index
import os
import os.path as op
import json
from xml.dom import INDEX_SIZE_ERR
import yaml
from scipy.ndimage.interpolation import affine_transform
from slider.external import neurolucida
from slider import util
import glymur
from skimage.measure import regionprops, label
# from skimage.measure import regionprops, label
from skimage.color import rgb2gray, rgb2hsv
from skimage import filters, transform, exposure, morphology
from skimage import filters, transform, exposure, morphology, color, segmentation, measure
from matplotlib import patches
import numpy as np
import matplotlib.pyplot as plt
DO_PLOTS = False
OUTDIR = None
from scipy import ndimage as ndi
import warnings
def register_chart_to_slide(chart, slide, slide_res, out, boundary_key=None, config=None):
# DO_PLOTS = False
# OUTDIR = None
def estimate_field(img):
'''Estimate field of slide as being light or dark'''
# calculate mean intensity of the four corners of the image
corners = [img[:20, :20], img[:20, -20:], img[-20:, :20], img[-20:, -20:]]
corners = np.median(np.concatenate(corners, axis=0))
# calculate mean intensity of the centre of the image
h, w = img.shape
h = h//2
w = w//2
centre = img[h-100:h+100, w-100:w+100]
centre = np.mean(centre)
# estimate field (light or dark)
if corners > centre:
return 'light'
else:
return 'dark'
def register_chart_to_slide(chart, slide, slide_res, outdir, boundary_key=None, config=None, do_plots=None, justify=None):
if config is None:
config = util.get_resource("chart.yaml")
......@@ -48,269 +77,304 @@ def register_chart_to_slide(chart, slide, slide_res, out, boundary_key=None, con
if boundary_key is None:
boundary_key = config["chart"]["boundary_key"]
print(boundary_key)
# set global variables required for summary plots
plots = config["general"]["plots"]
DO_PLOTS = plots
OUTDIR = out
if do_plots is None:
do_plots = config["general"]["plots"]
# create output dir
if not op.exists(out):
os.makedirs(out)
if not op.exists(outdir):
os.makedirs(outdir)
# load chart
contour, cells = neurolucida.read(chart)
chart = util.Chart.from_neurolucida_ascii(chart)
outline = chart.get_contours(name=boundary_key, closed=True)
if len(outline)<1: raise ValueError(f'Boundary key {boundary_key} not found in chart')
if len(outline)>1: warnings.warn(f'Repeated entries of boundary key in chart: {boundary_key}')
edge_crds = [x.points[:, :2]*[1, -1] for x in outline]
edge_crds_cat = np.concatenate(edge_crds)
# load slide, convert to grayscale, and invert if light-field
slide_res = slide_res * (2 ** rlevel)
# load slide
jp2 = glymur.Jp2k(slide)
img = jp2.read(rlevel=rlevel)
img_orig = jp2.read(rlevel=rlevel)
# initial scaling based on boundng boxes
boundary_idx = np.where([f==boundary_key for f,_ in contour])[0]
if boundary_idx.size > 1:
raise RuntimeError(f'Repeated entries of boundary key in chart: {boundary_key}')
print(boundary_idx)
img = color.rgb2gray(img_orig)
edge_crds = contour[boundary_idx[0]][1][:, :2] * [1, -1]
init = _init_scale(img, slide_res, edge_crds, verbose=True)
print(init)
# print(
# f"Rotation: {init.rotation}, Translation: {init.translation}, Scale: {init.scale}, Shear: {init.shear}"
# )
field = estimate_field(img)
# calculate normal line to boundary points
xfm_edge_coords = _apply_xfm(init, edge_crds)
init_nrmls = normal(xfm_edge_coords)
if field == 'light':
img = 1 - img
img = enhance(img)
# refine edge_coords (to image)
refined_edge_coords = _refine_edge_coord(
img, slide_res, xfm_edge_coords, init_nrmls
# initial alignment based on boundng boxes
init_xfm, img_props, coord_props = init_scale(img, slide_res, edge_crds_cat, justify=justify)
# print(init_xfm)
tr_x, tr_y = init_xfm.translation
print(
f"Initial XFM - Rotation: {init_xfm.rotation:0.5f}, Translation: [{tr_x:0.5f} {tr_y:0.5f}], Scale: {init_xfm.scale:0.5f}"
)
# estimate opimised affine transform
opt = transform.SimilarityTransform()
opt.estimate(edge_crds, refined_edge_coords)
print(opt)
# print(
# f"Rotation:\t{opt.rotation}\nTranslation:\t{opt.translation}\nScale:\t\t{list(opt.scale)}\nShear:\t\t{opt.shear}"
# )
np.savetxt(f"{outdir}/chart-to-image-init.xfm", init_xfm.params)
# save opt transform
np.savetxt(f"{out}/chart-to-image.xfm", opt.params)
# refine alignmnet (to mask edges)
# apply opt-xfm to contours and cells and save
opt_xfm = refine_edge_coord(
img, slide_res, edge_crds_cat, init_xfm
)
# print(opt_xfm)
tr_x, tr_y = opt_xfm.translation
print(
f"Optimised XFM - Rotation: {opt_xfm.rotation:0.5f}, Translation: [{tr_x:0.5f} {tr_y:0.5f}], Scale: {opt_xfm.scale:0.5f}"
)
np.savetxt(f"{outdir}/chart-to-image.xfm", opt_xfm.params)
# apply optimised xfm to contours and cells and save
contour_xfm = [
(k, _apply_xfm(opt, v[:, :2] * [1, -1]).tolist()) for k, v in contour
(contour.name, apply_xfm(opt_xfm, contour.points[:, :2] * [1, -1]).tolist(), contour.closed) for contour in chart.contours
]
if DO_PLOTS:
fig, ax = plt.subplots(figsize=(10, 10))
with open(f"{outdir}/contour.json", "w") as fp:
json.dump(contour_xfm, fp, indent=4)
extent = np.array([0, img.shape[1], img.shape[0], 0]) * slide_res
ax.imshow(img, extent=extent)
if chart.n_cells > 0:
cells = np.concatenate([cell.point[:2][np.newaxis, :] for cell in chart.cells]) * [1, -1]
cells_xfm = apply_xfm(init_xfm, cells )
with open(f"{outdir}/cells.json", "w") as fp:
json.dump(cells_xfm.tolist(), fp, indent=4)
for name, coords in contour_xfm:
# do plots
coords = np.array(coords)
if do_plots:
cog = np.mean(coords, axis=0)
ax.text(cog[0], cog[1], name)
ax.plot(coords[:, 0], coords[:, 1], "k-")
fig, ax = plt.subplots(2, 2, figsize=(40, 40))
ax = np.ravel(ax)
ax.set_xlabel("mm")
ax.set_ylabel("mm")
ax.set_title("Aligned Chart")
# chart bounding box
fig.savefig(f"{OUTDIR}/aligned_chart.png", bbox_inches="tight", dpi=300)
plot_contour(ax[0], edge_crds_cat, title="Boundary contour + bounding box", linestyle='none', marker='.', color='b')
plot_box(ax[0], coord_props['bbox'])
ax[0].axis("equal")
# fig, ax = plt.subplots(1, 5, figsize=(25, 5))
# ax[0].imshow(plt.imread(f'{OUTDIR}/chart_bounding_box.png'))
# ax[1].imshow(plt.imread(f'{OUTDIR}/image_bounding_box.png'))
# ax[2].imshow(plt.imread(f'{OUTDIR}/normals.png'))
# ax[3].imshow(plt.imread(f'{OUTDIR}/refined_coords.png'))
# ax[4].imshow(plt.imread(f'{OUTDIR}/aligned_chart.png'))
# image bounding box
# for a in ax:
# a.axis('off')
plot_slide(ax[1], img_props['mask'], slide_res, title="Slide mask + bounding box")
plot_box(ax[1], img_props['bbox'])
# fig.savefig(f'{OUTDIR}/alignment.png', bbox_inches='tight', dpi=150)
with open(f"{out}/contour.json", "w") as fp:
json.dump(contour_xfm, fp, indent=4)
# boundary plots
if cells.size > 0:
cells_xfm = _apply_xfm(opt, cells[:, :2] * [1, -1])
with open(f"{out}/cells.json", "w") as fp:
json.dump(cells_xfm.tolist(), fp, indent=4)
plot_slide(ax[2], img, slide_res, title='Boundary alignment')
for contour in edge_crds:
plot_contour(ax[2], apply_xfm(init_xfm, contour), color=(1, 0, 0), marker='.')
plot_contour(ax[2], apply_xfm(opt_xfm, contour), color=(0, 1, 0), marker='.')
def _refine_edge_coord(img, img_res, edge_coords, normals):
"""
Refine edge_coord by sampling binarised image along normal (to edge) and looking for big step change.
"""
ax[2].legend(["boundary_init", "boundary_optimised"])
# calculate normal line (to edge_coords)
edge_x, edge_y = edge_coords.T
nrml_x, nrml_y = normals.T
# TODO: move these line extents to the config file
line_smpl = np.linspace(-0.03, 0.15, 20)
# aligned chart
line_x = edge_x[:, np.newaxis] + nrml_x[:, np.newaxis] * line_smpl
line_y = edge_y[:, np.newaxis] + nrml_y[:, np.newaxis] * line_smpl
plot_slide(ax[3], img, slide_res, title="Aligned Chart")
cmap = plt.get_cmap("tab10")
# find threshold for background
image = rgb2gray(img)
threshold_value = filters.threshold_otsu(image)
for idx, (name, coords, closed) in enumerate(contour_xfm):
if not closed: continue
plot_contour(ax[3], coords, name, color=cmap(idx))
print(f"threshold={threshold_value}")
fig.savefig(f'{outdir}/alignment.png', bbox_inches='tight', dpi=300)
# binarise foreground
# NOTE: this assumes a bright background!
labeled_foreground = (image < threshold_value).astype(int)
def plot_box(ax, bbox, edgecolor='r', facecolor='none', linewidth=1):
rect = patches.Rectangle(
(bbox[1], bbox[0]),
bbox[3] - bbox[1],
bbox[2] - bbox[0],
linewidth=linewidth,
edgecolor=edgecolor,
facecolor=facecolor,
)
ax.add_patch(rect)
# calculate connected components
cc = label(labeled_foreground, background=0)
# get region properties for each cc and select the largest cc (in terms of area)
p = regionprops(cc, image)
ridx = np.argmax([p0.area for p0 in p])
p = p[ridx]
def plot_slide(ax, img, slide_res, title=None):
cc0 = cc == p.label
extent = np.array([0, img.shape[1], img.shape[0], 0]) * slide_res
ax.imshow(img, extent=extent, cmap='gray')
if DO_PLOTS:
fig, ax = plt.subplots(figsize=(20, 30))
ax.set_xlabel("mm")
ax.set_ylabel("mm")
extent = np.array([0, img.shape[1], img.shape[0], 0]) * img_res
ax.imshow(cc0, extent=extent)
if title is not None:
ax.set_title(title)
for line_x0, line_y0 in zip(line_x, line_y):
ax.plot(line_x0, line_y0, "b.")
ax.plot(edge_x, edge_y, "r.")
def plot_contour(ax, coords, name=None, color='r', title=None, linewidth=1, **kwargs):
ax.set_xlabel("mm")
ax.set_ylabel("mm")
ax.set_title("Largest connected component + normals")
coords = np.array(coords)
ax.plot(coords[:, 0], coords[:, 1], color=color, linewidth=linewidth, **kwargs)
fig.savefig(f"{OUTDIR}/normals.png", bbox_inches="tight", dpi=300)
if name is not None:
cog = np.mean(coords, axis=0)
ax.text(cog[0], cog[1], name, color=color)
# sample image along normal line
if title is not None:
ax.set_title(title)
line_x = np.round(line_x / img_res).astype(int)
line_y = np.round(line_y / img_res).astype(int)
ax.invert_yaxis()
line_int = cc0[line_y, line_x]
min_idx = np.argmax(np.diff(line_int, axis=-1), axis=-1)
def enhance(img0, kernel_size=None, lower_percentile=2, upper_percentile=98, sigma=5):
'''Enhance image exposure and contrast'''
if kernel_size is None:
h, w = img0.shape
kernel_size=(h//8, w//8)
img0 = exposure.equalize_adapthist(img0, kernel_size=kernel_size)
refined_edge_coords = np.stack(
[
line_x[np.arange(min_idx.size), min_idx] * img_res,
line_y[np.arange(min_idx.size), min_idx] * img_res,
],
axis=-1,
)
p_lower, p_upper = np.percentile(img0, (lower_percentile, upper_percentile))
img0 = exposure.rescale_intensity(img0, in_range=(p_lower, p_upper))
# TODO: plot edge_coords + refined_edge_coords
if DO_PLOTS:
fig, ax = plt.subplots(figsize=(20, 30))
img0 = filters.gaussian(img0, sigma)
return img0
extent = np.array([0, img.shape[1], img.shape[0], 0]) * img_res
ax.imshow(img, extent=extent)
ax.plot(edge_x, edge_y, "r.-")
ax.plot(refined_edge_coords[:, 0], refined_edge_coords[:, 1], "g.-")
def segment_foreground(img, marker_threshold=(0.02, 0.2), min_component_size=10000, selem=morphology.disk(30)):
'''Segment image foreground from background'''
ax.set_xlabel("mm")
ax.set_ylabel("mm")
# ax.set_title('edge_co')
# calculate elevation map
elevation_map = filters.sobel(img)
ax.legend(["edge_coords", "refined_edge_coords"])
# extract background and foreground features
markers = np.zeros_like(img)
markers[img < marker_threshold[0]] = 1
markers[img > marker_threshold[1]] = 2
fig.savefig(f"{OUTDIR}/refined_coords.png", bbox_inches="tight", dpi=150)
# segment using watershed algorithm
labels = segmentation.watershed(elevation_map, markers)
labels = ndi.binary_fill_holes(labels-1)+1
return refined_edge_coords
# find connected components and exclude components < min_component_size
cc = measure.label(labels, background=1)
p = measure.regionprops(cc, img)
ridx = np.where([p0.area>min_component_size for p0 in p])[0]
brainmask = np.sum(np.stack([(cc == p[r].label) for r in ridx], axis=2), axis=2)
def _img_props(img, img_resolution, verbose=False, closing=0):
# erode and dilate to clean up edges
brainmask = morphology.binary_erosion(brainmask, selem=selem)
brainmask = morphology.binary_dilation(brainmask, selem=selem)
# # convert to grayscale
# image = rgb2gray(img)
return brainmask
# # find threshold for background
# threshold_value = filters.threshold_otsu(image)
image = rgb2hsv(img)[..., -1]
image = exposure.equalize_hist(image)
threshold_value = filters.threshold_li(image)
def refine_edge_coord(img, img_res, edge_coords, xfm_init):
"""
Refine edge_coord by sampling image along normal (to edge) and looking for big step change.
"""
if verbose:
print(f"threshold={threshold_value}")
# calculate normal line to boundary points
edge_coords_init = apply_xfm(xfm_init, edge_coords)
normals = normal(edge_coords_init)
# binarise foreground
# NOTE: this assumes a bright background!
labeled_foreground = (image < threshold_value).astype(int)
# calculate normal line (to edge_coords)
edge_init_x, edge_init_y = edge_coords_init.T
nrml_x, nrml_y = normals.T
if closing > 0:
labeled_foreground = morphology.binary_closing(labeled_foreground, selem=morphology.disk(closing))
# TODO: move these line extents to the config file
# line_smpl = np.linspace(-0.03, 0.15, 20)
line_smpl = np.linspace(-0.2, 0.2, 20)
# calculate connected components
cc = label(labeled_foreground, background=0)
line_x = edge_init_x[:, np.newaxis] + nrml_x[:, np.newaxis] * line_smpl
line_y = edge_init_y[:, np.newaxis] + nrml_y[:, np.newaxis] * line_smpl
# get region properties for each cc and select the largest cc (in terms of area)
p = regionprops(cc, image)
brainmask = segment_foreground(img)
ridx = np.argmax([p0.area for p0 in p])
p = p[ridx]
# if do_plots:
# fig, ax = plt.subplots(figsize=(20, 30))
bbox = np.array(p.bbox) * img_resolution
# extent = np.array([0, img.shape[1], img.shape[0], 0]) * img_res
# ax.imshow(brainmask, extent=extent, cmap='gray')
centroid = (
(bbox[0] + bbox[2]) / 2,
(bbox[1] + bbox[3]) / 2,
# for line_x0, line_y0 in zip(line_x, line_y):
# ax.plot(line_x0, line_y0, "b.")
# ax.plot(edge_init_x, edge_init_y, "r.")
# ax.set_xlabel("mm")
# ax.set_ylabel("mm")
# ax.set_title("Brainmask + normals")
# plt.show()
# # fig.savefig(f"{OUTDIR}/normals.png", bbox_inches="tight", dpi=300)
# sample image along normal line
line_x = np.round(line_x / img_res).astype(int)
line_y = np.round(line_y / img_res).astype(int)
y, x = brainmask.shape
line_int = brainmask[np.clip(line_y, 0, y-1), np.clip(line_x, 0, x-1)]
# exlude constant rows
constant_idx = np.all(line_int == line_int[:, 0][:, np.newaxis], axis=1)
min_idx = np.argmax(np.abs(np.diff(line_int, axis=-1)), axis=-1)
refined_edge_coords = np.stack(
[
line_x[np.arange(min_idx.size), min_idx] * img_res,
line_y[np.arange(min_idx.size), min_idx] * img_res,
],
axis=-1,
)
if DO_PLOTS:
opt_xfm = transform.SimilarityTransform()
opt_xfm.estimate(edge_coords[~constant_idx, :], refined_edge_coords[~constant_idx, :])
fig, ax = plt.subplots()
extent = np.array([0, img.shape[1], img.shape[0], 0]) * img_resolution
return opt_xfm
im = ax.imshow(cc == p.label, extent=extent)
fig.colorbar(im)
rect = patches.Rectangle(
(bbox[1], bbox[0]),
bbox[3] - bbox[1],
bbox[2] - bbox[0],
linewidth=1,
edgecolor="r",
facecolor="none",
)
ax.add_patch(rect)
def image_props(img, img_resolution):
ax.plot(centroid[1], centroid[0], "ro")
brainmask = segment_foreground(img)
ax.set_xlabel("mm")
ax.set_ylabel("mm")
ax.set_title("Image bounding box")
# get region properties for each cc and select the largest cc (in terms of area)
brainmask = measure.label(brainmask, background=0)
p = measure.regionprops(brainmask, img)
fig.savefig(f"{OUTDIR}/image_bounding_box.png", bbox_inches="tight", dpi=150)
ridx = np.argmax([p0.area for p0 in p])
p = p[ridx]
bbox = np.array(p.bbox) * img_resolution
centroid = (
(bbox[0] + bbox[2]) / 2,
(bbox[1] + bbox[3]) / 2,
)
return {
"bbox": bbox,
"bbox_centroid": centroid,
"shape": (bbox[2] - bbox[0], bbox[3] - bbox[1]),
"aspect_ratio": (bbox[3] - bbox[1]) / (bbox[2] - bbox[0]),
"mask": brainmask,
}
def _pnt_props(pnts, verbose=False):
def point_props(pnts):
x = pnts[:, 0]
y = pnts[:, 1]
......@@ -322,49 +386,59 @@ def _pnt_props(pnts, verbose=False):
(bbox[1] + bbox[3]) / 2,
)
if verbose:
print(f"bbox={bbox}")
print(f"centroid={centroid}")
return {
"bbox": bbox,
"bbox_centroid": centroid,
"shape": (bbox[2] - bbox[0], bbox[3] - bbox[1]),
"aspect_ratio": (bbox[3] - bbox[1]) / (bbox[2] - bbox[0]),