Commit b2314e38 authored by Sean Fitzgibbon's avatar Sean Fitzgibbon
Browse files

Performance improvements with left/right justify

parent 42b2a829
......@@ -14,57 +14,40 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
from operator import index
import json
import os
import os.path as op
import json
from xml.dom import INDEX_SIZE_ERR
import yaml
from scipy.ndimage.interpolation import affine_transform
from slider import util
import warnings
import glymur
# from skimage.measure import regionprops, label
from skimage.color import rgb2gray, rgb2hsv
from skimage import filters, transform, exposure, morphology, color, segmentation, measure
from matplotlib import patches
import numpy as np
import matplotlib.pyplot as plt
import numpy as np
import yaml
from matplotlib import patches
from scipy import ndimage as ndi
from skimage import (
color,
exposure,
filters,
measure,
morphology,
segmentation,
transform,
)
from typing import Optional
import warnings
# DO_PLOTS = False
# OUTDIR = None
def estimate_field(img):
'''Estimate field of slide as being light or dark'''
# calculate mean intensity of the four corners of the image
corners = [img[:20, :20], img[:20, -20:], img[-20:, :20], img[-20:, -20:]]
corners = np.median(np.concatenate(corners, axis=0))
# calculate mean intensity of the centre of the image
h, w = img.shape
h = h//2
w = w//2
centre = img[h-100:h+100, w-100:w+100]
centre = np.mean(centre)
# estimate field (light or dark)
if corners > centre:
return 'light'
else:
return 'dark'
from slider import util
def register_chart_to_slide(chart, slide, slide_res, outdir, boundary_key=None, config=None, do_plots=None, justify=None):
def register_chart_to_slide(
chart: str,
slide: str,
slide_res: float,
outdir: str,
boundary_key: Optional[str] = None,
config: Optional[str] = None,
do_plots: Optional[bool] = None,
justify: Optional[str] = None,
):
if config is None:
config = util.get_resource("chart.yaml")
......@@ -89,10 +72,12 @@ def register_chart_to_slide(chart, slide, slide_res, outdir, boundary_key=None,
chart = util.Chart.from_neurolucida_ascii(chart)
outline = chart.get_contours(name=boundary_key, closed=True)
if len(outline)<1: raise ValueError(f'Boundary key {boundary_key} not found in chart')
if len(outline)>1: warnings.warn(f'Repeated entries of boundary key in chart: {boundary_key}')
if len(outline) < 1:
raise ValueError(f"Boundary key {boundary_key} not found in chart")
if len(outline) > 1:
warnings.warn(f"Repeated entries of boundary key in chart: {boundary_key}")
edge_crds = [x.points[:, :2]*[1, -1] for x in outline]
edge_crds = [x.points[:, :2] * [1, -1] for x in outline]
edge_crds_cat = np.concatenate(edge_crds)
# load slide, convert to grayscale, and invert if light-field
......@@ -106,14 +91,16 @@ def register_chart_to_slide(chart, slide, slide_res, outdir, boundary_key=None,
field = estimate_field(img)
if field == 'light':
if field == "light":
img = 1 - img
img = enhance(img)
# initial alignment based on boundng boxes
# initial alignment based on bounding boxes
init_xfm, img_props, coord_props = init_scale(img, slide_res, edge_crds_cat, justify=justify)
init_xfm, img_props, coord_props = initialise_xfm(
img, slide_res, edge_crds_cat, justify=justify
)
# print(init_xfm)
tr_x, tr_y = init_xfm.translation
......@@ -123,11 +110,9 @@ def register_chart_to_slide(chart, slide, slide_res, outdir, boundary_key=None,
np.savetxt(f"{outdir}/chart-to-image-init.xfm", init_xfm.params)
# refine alignmnet (to mask edges)
# refine alignment (to mask edges)
opt_xfm = refine_edge_coord(
img, slide_res, edge_crds_cat, init_xfm
)
opt_xfm = optimise_xfm(img, slide_res, edge_crds_cat, init_xfm)
# print(opt_xfm)
tr_x, tr_y = opt_xfm.translation
......@@ -140,17 +125,28 @@ def register_chart_to_slide(chart, slide, slide_res, outdir, boundary_key=None,
# apply optimised xfm to contours and cells and save
contour_xfm = [
(contour.name, apply_xfm(opt_xfm, contour.points[:, :2] * [1, -1]).tolist(), contour.closed) for contour in chart.contours
(
contour.name,
apply_xfm(opt_xfm, contour.points[:, :2] * [1, -1]).tolist(),
contour.closed,
)
for contour in chart.contours
]
with open(f"{outdir}/contour.json", "w") as fp:
json.dump(contour_xfm, fp, indent=4)
contours = [
{"name": name, "xy": xy, "closed": closed} for name, xy, closed in contour_xfm
]
with open(f"{outdir}/contours.json", "w") as fp:
json.dump(contours, fp)
if chart.n_cells > 0:
cells = np.concatenate([cell.point[:2][np.newaxis, :] for cell in chart.cells]) * [1, -1]
cells_xfm = apply_xfm(init_xfm, cells )
cells = np.concatenate(
[cell.point[:2][np.newaxis, :] for cell in chart.cells]
) * [1, -1]
cells_xfm = apply_xfm(init_xfm, cells)
with open(f"{outdir}/cells.json", "w") as fp:
json.dump(cells_xfm.tolist(), fp, indent=4)
json.dump(cells_xfm.tolist(), fp)
# do plots
......@@ -161,40 +157,52 @@ def register_chart_to_slide(chart, slide, slide_res, outdir, boundary_key=None,
# chart bounding box
plot_contour(ax[0], edge_crds_cat, title="Boundary contour + bounding box", linestyle='none', marker='.', color='b')
plot_box(ax[0], coord_props['bbox'])
ax[0].axis("equal")
plot_contour(
ax[0],
edge_crds_cat,
title="Boundary contour + bounding box",
linestyle="none",
marker=".",
color="b",
)
plot_box(ax[0], coord_props["bbox"])
ax[0].axis("equal")
# image bounding box
plot_slide(ax[1], img_props['mask'], slide_res, title="Slide mask + bounding box")
plot_box(ax[1], img_props['bbox'])
plot_slide(
ax[1], img_props["mask"], slide_res, title="Slide mask + bounding box"
)
plot_box(ax[1], img_props["bbox"])
# boundary plots
plot_slide(ax[2], img, slide_res, title='Boundary alignment')
plot_slide(ax[2], img, slide_res, title="Boundary alignment")
for contour in edge_crds:
plot_contour(ax[2], apply_xfm(init_xfm, contour), color=(1, 0, 0), marker='.')
plot_contour(ax[2], apply_xfm(opt_xfm, contour), color=(0, 1, 0), marker='.')
plot_contour(
ax[2], apply_xfm(init_xfm, contour), color=(1, 0, 0), marker="."
)
plot_contour(
ax[2], apply_xfm(opt_xfm, contour), color=(0, 1, 0), marker="."
)
ax[2].legend(["boundary_init", "boundary_optimised"])
# aligned chart
plot_slide(ax[3], img, slide_res, title="Aligned Chart")
cmap = plt.get_cmap("tab10")
for idx, (name, coords, closed) in enumerate(contour_xfm):
if not closed: continue
if not closed:
continue
plot_contour(ax[3], coords, name, color=cmap(idx))
fig.savefig(f'{outdir}/alignment.png', bbox_inches='tight', dpi=300)
fig.savefig(f"{outdir}/alignment.png", bbox_inches="tight", dpi=300)
def plot_box(ax, bbox, edgecolor='r', facecolor='none', linewidth=1):
def plot_box(ax, bbox, edgecolor="r", facecolor="none", linewidth=1):
rect = patches.Rectangle(
(bbox[1], bbox[0]),
bbox[3] - bbox[1],
......@@ -209,7 +217,7 @@ def plot_box(ax, bbox, edgecolor='r', facecolor='none', linewidth=1):
def plot_slide(ax, img, slide_res, title=None):
extent = np.array([0, img.shape[1], img.shape[0], 0]) * slide_res
ax.imshow(img, extent=extent, cmap='gray')
ax.imshow(img, extent=extent, cmap="gray")
ax.set_xlabel("mm")
ax.set_ylabel("mm")
......@@ -217,9 +225,8 @@ def plot_slide(ax, img, slide_res, title=None):
if title is not None:
ax.set_title(title)
def plot_contour(ax, coords, name=None, color='r', title=None, linewidth=1, **kwargs):
def plot_contour(ax, coords, name=None, color="r", title=None, linewidth=1, **kwargs):
coords = np.array(coords)
ax.plot(coords[:, 0], coords[:, 1], color=color, linewidth=linewidth, **kwargs)
......@@ -231,28 +238,57 @@ def plot_contour(ax, coords, name=None, color='r', title=None, linewidth=1, **kw
if title is not None:
ax.set_title(title)
ax.invert_yaxis()
if not ax.yaxis_inverted():
ax.invert_yaxis()
def estimate_field(img):
"""Estimate field of slide as being light or dark"""
# calculate mean intensity of the four corners of the image
corners = [img[:20, :20], img[:20, -20:], img[-20:, :20], img[-20:, -20:]]
corners = np.median(np.concatenate(corners, axis=0))
# calculate mean intensity of the centre of the image
h, w = img.shape
h = h // 2
w = w // 2
centre = img[h - 100 : h + 100, w - 100 : w + 100]
centre = np.mean(centre)
# estimate field (light or dark)
if corners > centre:
return "light"
else:
return "dark"
def enhance(img0, kernel_size=None, lower_percentile=2, upper_percentile=98, sigma=5):
'''Enhance image exposure and contrast'''
"""Enhance image exposure and contrast"""
if kernel_size is None:
h, w = img0.shape
kernel_size=(h//8, w//8)
kernel_size = (h // 8, w // 8)
img0 = exposure.equalize_adapthist(img0, kernel_size=kernel_size)
p_lower, p_upper = np.percentile(img0, (lower_percentile, upper_percentile))
img0 = exposure.rescale_intensity(img0, in_range=(p_lower, p_upper))
img0 = filters.gaussian(img0, sigma)
return img0
def segment_foreground(img, marker_threshold=(0.02, 0.2), min_component_size=10000, selem=morphology.disk(30)):
'''Segment image foreground from background'''
def segment_foreground(
img,
marker_threshold=(0.02, 0.2),
min_component_size=10000,
selem=morphology.disk(30),
):
"""Segment image foreground from background"""
# calculate elevation map
elevation_map = filters.sobel(img)
......@@ -264,13 +300,13 @@ def segment_foreground(img, marker_threshold=(0.02, 0.2), min_component_size=100
# segment using watershed algorithm
labels = segmentation.watershed(elevation_map, markers)
labels = ndi.binary_fill_holes(labels-1)+1
labels = ndi.binary_fill_holes(labels - 1) + 1
# find connected components and exclude components < min_component_size
cc = measure.label(labels, background=1)
p = measure.regionprops(cc, img)
ridx = np.where([p0.area>min_component_size for p0 in p])[0]
ridx = np.where([p0.area > min_component_size for p0 in p])[0]
brainmask = np.sum(np.stack([(cc == p[r].label) for r in ridx], axis=2), axis=2)
# erode and dilate to clean up edges
......@@ -280,13 +316,14 @@ def segment_foreground(img, marker_threshold=(0.02, 0.2), min_component_size=100
return brainmask
def refine_edge_coord(img, img_res, edge_coords, xfm_init):
def optimise_xfm(image, image_resolution, contour, xfm_init):
"""
Refine edge_coord by sampling image along normal (to edge) and looking for big step change.
Refine contour by sampling image along normal (to each edge point) and looking for big step change.
Calculate a new transform (xfm) using refined contour points.
"""
# calculate normal line to boundary points
edge_coords_init = apply_xfm(xfm_init, edge_coords)
edge_coords_init = apply_xfm(xfm_init, contour)
normals = normal(edge_coords_init)
# calculate normal line (to edge_coords)
......@@ -300,7 +337,7 @@ def refine_edge_coord(img, img_res, edge_coords, xfm_init):
line_x = edge_init_x[:, np.newaxis] + nrml_x[:, np.newaxis] * line_smpl
line_y = edge_init_y[:, np.newaxis] + nrml_y[:, np.newaxis] * line_smpl
brainmask = segment_foreground(img)
brainmask = segment_foreground(image)
# if do_plots:
# fig, ax = plt.subplots(figsize=(20, 30))
......@@ -322,11 +359,11 @@ def refine_edge_coord(img, img_res, edge_coords, xfm_init):
# sample image along normal line
line_x = np.round(line_x / img_res).astype(int)
line_y = np.round(line_y / img_res).astype(int)
line_x = np.round(line_x / image_resolution).astype(int)
line_y = np.round(line_y / image_resolution).astype(int)
y, x = brainmask.shape
line_int = brainmask[np.clip(line_y, 0, y-1), np.clip(line_x, 0, x-1)]
line_int = brainmask[np.clip(line_y, 0, y - 1), np.clip(line_x, 0, x - 1)]
# exlude constant rows
constant_idx = np.all(line_int == line_int[:, 0][:, np.newaxis], axis=1)
......@@ -335,25 +372,25 @@ def refine_edge_coord(img, img_res, edge_coords, xfm_init):
refined_edge_coords = np.stack(
[
line_x[np.arange(min_idx.size), min_idx] * img_res,
line_y[np.arange(min_idx.size), min_idx] * img_res,
line_x[np.arange(min_idx.size), min_idx] * image_resolution,
line_y[np.arange(min_idx.size), min_idx] * image_resolution,
],
axis=-1,
)
opt_xfm = transform.SimilarityTransform()
opt_xfm.estimate(edge_coords[~constant_idx, :], refined_edge_coords[~constant_idx, :])
opt_xfm.estimate(
contour[~constant_idx, :], refined_edge_coords[~constant_idx, :]
)
return opt_xfm
def image_props(img, img_resolution):
brainmask = segment_foreground(img)
# get region properties for each cc and select the largest cc (in terms of area)
brainmask = measure.label(brainmask, background=0)
p = measure.regionprops(brainmask, img)
brainmask = measure.label(img, background=0)
p = measure.regionprops(brainmask)
ridx = np.argmax([p0.area for p0 in p])
p = p[ridx]
......@@ -394,61 +431,83 @@ def point_props(pnts):
}
def justify_bbox(bbox, aspect_ratio, justify):
"""Force bbox to aspect_ratio and justify left or right."""
print(f"justify chart bbox to {justify} in image bbox")
w, h = bbox[3] - bbox[1], bbox[2] - bbox[0]
def init_scale(img, img_resolution, crd, tol=0.05, justify=None):
scale_factor = aspect_ratio / (w / h)
img_p = image_props(img, img_resolution)
crd_p = point_props(crd)
new_w = w * scale_factor
# print(img_p)
# print(crd_p)
if justify == "right":
bbox[1] += w - new_w
elif justify == "left":
bbox[3] -= w - new_w
else:
raise ValueError(
f'Unknown value for justify ("{justify}"). Can only be "left" or "right".'
)
return bbox
def initialise_xfm(image, image_resolution, contour, tol=0.05, justify=None):
'''Calculate image and contour bounding boxes, then estimate the transform (xfm) based on bounding box corners.'''
brainmask = segment_foreground(image)
image_p = image_props(brainmask, image_resolution)
contour_p = point_props(contour)
if justify is not None:
print(f'justify chart bbox to {justify} in image bbox')
# force image bbox aspect ratio to be the same as contour bbox, and justify left or right.
bbox = img_p['bbox']
h, w = img_p['shape']
bbox = justify_bbox(image_p["bbox"], contour_p["aspect_ratio"], justify)
scale_factor = crd_p['aspect_ratio'] / img_p['aspect_ratio']
# exclude voxels from brainmask that are outside the bounding box
new_w = w * scale_factor
bbox = np.round(np.array(bbox) / image_resolution).astype(int)
if justify=='right':
bbox[1] += (w - new_w)
elif justify=='left':
bbox[3] -= (w - new_w)
else:
raise ValueError(f'Unknown value for justify ("{justify}"). Can only be "left" or "right".')
cropped_brainmask = brainmask.copy()
cropped_brainmask[:, : bbox[1]] = 0
cropped_brainmask[:, bbox[3] :] = 0
image_p = image_props(cropped_brainmask, image_resolution)
# recalculate justified bounding box on cropped brainmask
# force image bbox aspect ratio to be the same as contour bbox, and justify left or right.
bbox = justify_bbox(image_p["bbox"], contour_p["aspect_ratio"], justify)
centroid = (
(bbox[0] + bbox[2]) / 2,
(bbox[1] + bbox[3]) / 2,
)
img_p = {
image_p = {
"bbox": bbox,
"bbox_centroid": centroid,
"shape": (bbox[2] - bbox[0], bbox[3] - bbox[1]),
"aspect_ratio": (bbox[3] - bbox[1]) / (bbox[2] - bbox[0]),
"mask": img_p['mask'],
"mask": brainmask,
}
# print(img_p)
# print(crd_p)
if np.abs(img_p['aspect_ratio'] - crd_p['aspect_ratio']) > tol:
raise RuntimeError('Slide and chart aspect ratios appear to be different.')
if np.abs(image_p["aspect_ratio"] - contour_p["aspect_ratio"]) > tol:
raise RuntimeError("Slide and chart aspect ratios appear to be different. Consider correcting aspect-ratio with left/right justification.")
idx = np.array([[1, 2], [1, 0], [3, 2], [3, 0]])
src = np.array(crd_p["bbox"])[idx]
dest = np.array(img_p["bbox"])[idx]
src = np.array(contour_p["bbox"])[idx]
dest = np.array(image_p["bbox"])[idx]
a = transform.SimilarityTransform()
a.estimate(src, dest)
xfm = transform.SimilarityTransform()
xfm.estimate(src, dest)
return a, img_p, crd_p
return xfm, image_p, contour_p
def apply_xfm(xfm, pnts):
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment