Commit 395170ba authored by Sean Fitzgibbon's avatar Sean Fitzgibbon
Browse files

Initial commit of new chart_reg

parent 3f1720fd
......@@ -24,18 +24,91 @@ from slider.external import neurolucida
from slider import util
import glymur
from skimage.measure import regionprops, label
# from skimage.measure import regionprops, label
from skimage.color import rgb2gray, rgb2hsv
from skimage import filters, transform, exposure, morphology
from skimage import filters, transform, exposure, morphology, color, segmentation, measure
from matplotlib import patches
import numpy as np
import matplotlib.pyplot as plt
DO_PLOTS = False
OUTDIR = None
from scipy import ndimage as ndi
import warnings
def register_chart_to_slide(chart, slide, slide_res, out, boundary_key=None, config=None):
# DO_PLOTS = False
# OUTDIR = None
def estimate_field(img):
'''Estimate field of slide as being light or dark'''
# calculate mean intensity of the four corners of the image
corners = [img[:20, :20], img[:20, -20:], img[-20:, :20], img[-20:, -20:]]
corners = np.median(np.concatenate(corners, axis=0))
# calculate mean intensity of the centre of the image
h, w = img.shape
h = h//2
w = w//2
centre = img[h-100:h+100, w-100:w+100]
centre = np.mean(centre)
# estimate field (light or dark)
if corners > centre:
return 'light'
else:
return 'dark'
def enhance(img0, kernel_size=None, lower_percentile=2, upper_percentile=98, sigma=5):
'''Enhance image exposure and contrast'''
if kernel_size is None:
h, w = img0.shape
kernel_size=(h//8, w//8)
img0 = exposure.equalize_adapthist(img0, kernel_size=kernel_size)
p_lower, p_upper = np.percentile(img0, (lower_percentile, upper_percentile))
img0 = exposure.rescale_intensity(img0, in_range=(p_lower, p_upper))
img0 = filters.gaussian(img0, sigma)
return img0
def segment_foreground(img, marker_threshold=(0.02, 0.2), min_component_size=10000, selem=morphology.disk(10)):
'''Segment image foreground from background'''
# calculate elevation map
elevation_map = filters.sobel(img)
# extract background and foreground features
markers = np.zeros_like(img)
markers[img < marker_threshold[0]] = 1
markers[img > marker_threshold[1]] = 2
# segment using watershed algorithm
labels = segmentation.watershed(elevation_map, markers)
labels = ndi.binary_fill_holes(labels-1)+1
# find connected components and exclude components < min_component_size
cc = measure.label(labels, background=1)
p = measure.regionprops(cc, img)
ridx = np.where([p0.area>min_component_size for p0 in p])[0]
brainmask = np.sum(np.stack([(cc == p[r].label) for r in ridx], axis=2), axis=2)
# erode and dilate to clean up edges
brainmask = morphology.binary_erosion(brainmask, selem=selem)
brainmask = morphology.binary_dilation(brainmask, selem=selem)
return brainmask
def register_chart_to_slide(chart, slide, slide_res, outdir, boundary_key=None, config=None, do_plots=None):
if config is None:
config = util.get_resource("chart.yaml")
......@@ -48,85 +121,96 @@ def register_chart_to_slide(chart, slide, slide_res, out, boundary_key=None, con
if boundary_key is None:
boundary_key = config["chart"]["boundary_key"]
print(boundary_key)
# set global variables required for summary plots
plots = config["general"]["plots"]
DO_PLOTS = plots
OUTDIR = out
if do_plots is None:
do_plots = config["general"]["plots"]
# create output dir
if not op.exists(out):
os.makedirs(out)
if not op.exists(outdir):
os.makedirs(outdir)
# load chart
contour, cells = neurolucida.read(chart)
boundary_idx = np.where([f.lower()==boundary_key.lower() for f,_ in contour])[0]
# if boundary_idx.size > 1:
# raise RuntimeError(f'Repeated entries of boundary key in chart: {boundary_key}')
# edge_crds = contour[boundary_idx[0]][1][:, :2] * [1, -1]
edge_crds = [contour[bidx][1][:, :2] * [1, -1] for bidx in boundary_idx]
edge_crds = np.concatenate([e for e in edge_crds[:2]], axis=0)
# load slide, convert to grayscale, and invert if light-field
slide_res = slide_res * (2 ** rlevel)
# load slide
jp2 = glymur.Jp2k(slide)
img = jp2.read(rlevel=rlevel)
img_orig = jp2.read(rlevel=rlevel)
# initial scaling based on boundng boxes
boundary_idx = np.where([f==boundary_key for f,_ in contour])[0]
if boundary_idx.size > 1:
raise RuntimeError(f'Repeated entries of boundary key in chart: {boundary_key}')
print(boundary_idx)
img = color.rgb2gray(img_orig)
edge_crds = contour[boundary_idx[0]][1][:, :2] * [1, -1]
init = _init_scale(img, slide_res, edge_crds, verbose=True)
print(init)
# print(
# f"Rotation: {init.rotation}, Translation: {init.translation}, Scale: {init.scale}, Shear: {init.shear}"
# )
field = estimate_field(img)
# calculate normal line to boundary points
xfm_edge_coords = _apply_xfm(init, edge_crds)
init_nrmls = normal(xfm_edge_coords)
if field == 'light':
img = 1 - img
# refine edge_coords (to image)
refined_edge_coords = _refine_edge_coord(
img, slide_res, xfm_edge_coords, init_nrmls
img = enhance(img)
# initial scaling based on boundng boxes
init = init_scale(img, slide_res, edge_crds)
print(init)
print(
f"Rotation: {init.rotation}, Translation: {init.translation}, Scale: {init.scale}"
)
# estimate opimised affine transform
opt = transform.SimilarityTransform()
opt.estimate(edge_crds, refined_edge_coords)
print(opt)
# print(
# f"Rotation:\t{opt.rotation}\nTranslation:\t{opt.translation}\nScale:\t\t{list(opt.scale)}\nShear:\t\t{opt.shear}"
# # calculate normal line to boundary points
# xfm_edge_coords = _apply_xfm(init, edge_crds)
# init_nrmls = normal(xfm_edge_coords)
# # refine edge_coords (to image)
# refined_edge_coords = _refine_edge_coord(
# img, slide_res, xfm_edge_coords, init_nrmls
# )
# save opt transform
np.savetxt(f"{out}/chart-to-image.xfm", opt.params)
# # estimate opimised affine transform
# opt = transform.SimilarityTransform()
# opt.estimate(edge_crds, refined_edge_coords)
# print(opt)
# # print(
# # f"Rotation:\t{opt.rotation}\nTranslation:\t{opt.translation}\nScale:\t\t{list(opt.scale)}\nShear:\t\t{opt.shear}"
# # )
# # save opt transform
# np.savetxt(f"{out}/chart-to-image.xfm", opt.params)
# apply opt-xfm to contours and cells and save
contour_xfm = [
(k, _apply_xfm(opt, v[:, :2] * [1, -1]).tolist()) for k, v in contour
(k, apply_xfm(init, v[:, :2] * [1, -1]).tolist()) for k, v in contour
]
if DO_PLOTS:
if do_plots:
fig, ax = plt.subplots(figsize=(10, 10))
extent = np.array([0, img.shape[1], img.shape[0], 0]) * slide_res
ax.imshow(img, extent=extent)
ax.imshow(img, extent=extent, cmap='gray')
for name, coords in contour_xfm:
coords = np.array(coords)
cog = np.mean(coords, axis=0)
ax.text(cog[0], cog[1], name)
ax.plot(coords[:, 0], coords[:, 1], "k-")
ax.text(cog[0], cog[1], name, color='r')
ax.plot(coords[:, 0], coords[:, 1], "r-")
ax.set_xlabel("mm")
ax.set_ylabel("mm")
ax.set_title("Aligned Chart")
fig.savefig(f"{OUTDIR}/aligned_chart.png", bbox_inches="tight", dpi=300)
fig.savefig(f"{outdir}/aligned_chart.png", bbox_inches="tight", dpi=300)
# fig, ax = plt.subplots(1, 5, figsize=(25, 5))
# ax[0].imshow(plt.imread(f'{OUTDIR}/chart_bounding_box.png'))
......@@ -140,18 +224,18 @@ def register_chart_to_slide(chart, slide, slide_res, out, boundary_key=None, con
# fig.savefig(f'{OUTDIR}/alignment.png', bbox_inches='tight', dpi=150)
with open(f"{out}/contour.json", "w") as fp:
with open(f"{outdir}/contour.json", "w") as fp:
json.dump(contour_xfm, fp, indent=4)
if cells.size > 0:
cells_xfm = _apply_xfm(opt, cells[:, :2] * [1, -1])
with open(f"{out}/cells.json", "w") as fp:
cells_xfm = apply_xfm(init, cells[:, :2] * [1, -1])
with open(f"{outdir}/cells.json", "w") as fp:
json.dump(cells_xfm.tolist(), fp, indent=4)
def _refine_edge_coord(img, img_res, edge_coords, normals):
def refine_edge_coord(img, img_res, edge_coords, normals, do_plots=True):
"""
Refine edge_coord by sampling binarised image along normal (to edge) and looking for big step change.
Refine edge_coord by sampling image along normal (to edge) and looking for big step change.
"""
# calculate normal line (to edge_coords)
......@@ -159,36 +243,17 @@ def _refine_edge_coord(img, img_res, edge_coords, normals):
nrml_x, nrml_y = normals.T
# TODO: move these line extents to the config file
line_smpl = np.linspace(-0.03, 0.15, 20)
# line_smpl = np.linspace(-0.03, 0.15, 20)
line_smpl = np.linspace(-0.1, 0.15, 20)
line_x = edge_x[:, np.newaxis] + nrml_x[:, np.newaxis] * line_smpl
line_y = edge_y[:, np.newaxis] + nrml_y[:, np.newaxis] * line_smpl
# find threshold for background
image = rgb2gray(img)
threshold_value = filters.threshold_otsu(image)
print(f"threshold={threshold_value}")
# binarise foreground
# NOTE: this assumes a bright background!
labeled_foreground = (image < threshold_value).astype(int)
# calculate connected components
cc = label(labeled_foreground, background=0)
# get region properties for each cc and select the largest cc (in terms of area)
p = regionprops(cc, image)
ridx = np.argmax([p0.area for p0 in p])
p = p[ridx]
cc0 = cc == p.label
if DO_PLOTS:
if do_plots:
fig, ax = plt.subplots(figsize=(20, 30))
extent = np.array([0, img.shape[1], img.shape[0], 0]) * img_res
ax.imshow(cc0, extent=extent)
ax.imshow(img, extent=extent, cmap='gray')
for line_x0, line_y0 in zip(line_x, line_y):
ax.plot(line_x0, line_y0, "b.")
......@@ -199,14 +264,16 @@ def _refine_edge_coord(img, img_res, edge_coords, normals):
ax.set_ylabel("mm")
ax.set_title("Largest connected component + normals")
fig.savefig(f"{OUTDIR}/normals.png", bbox_inches="tight", dpi=300)
plt.show()
# fig.savefig(f"{OUTDIR}/normals.png", bbox_inches="tight", dpi=300)
# sample image along normal line
line_x = np.round(line_x / img_res).astype(int)
line_y = np.round(line_y / img_res).astype(int)
line_int = cc0[line_y, line_x]
# line_int = brainmask[line_y, line_x]
line_int = img[line_y, line_x]
min_idx = np.argmax(np.diff(line_int, axis=-1), axis=-1)
......@@ -219,11 +286,11 @@ def _refine_edge_coord(img, img_res, edge_coords, normals):
)
# TODO: plot edge_coords + refined_edge_coords
if DO_PLOTS:
if do_plots:
fig, ax = plt.subplots(figsize=(20, 30))
extent = np.array([0, img.shape[1], img.shape[0], 0]) * img_res
ax.imshow(img, extent=extent)
ax.imshow(img, extent=extent, cmap='gray')
ax.plot(edge_x, edge_y, "r.-")
ax.plot(refined_edge_coords[:, 0], refined_edge_coords[:, 1], "g.-")
......@@ -234,38 +301,19 @@ def _refine_edge_coord(img, img_res, edge_coords, normals):
ax.legend(["edge_coords", "refined_edge_coords"])
fig.savefig(f"{OUTDIR}/refined_coords.png", bbox_inches="tight", dpi=150)
plt.show()
# fig.savefig(f"{OUTDIR}/refined_coords.png", bbox_inches="tight", dpi=150)
return refined_edge_coords
def _img_props(img, img_resolution, verbose=False, closing=0):
# # convert to grayscale
# image = rgb2gray(img)
# # find threshold for background
# threshold_value = filters.threshold_otsu(image)
image = rgb2hsv(img)[..., -1]
image = exposure.equalize_hist(image)
threshold_value = filters.threshold_li(image)
def image_props(img, img_resolution, do_plots=False, plot_name=None):
if verbose:
print(f"threshold={threshold_value}")
# binarise foreground
# NOTE: this assumes a bright background!
labeled_foreground = (image < threshold_value).astype(int)
if closing > 0:
labeled_foreground = morphology.binary_closing(labeled_foreground, selem=morphology.disk(closing))
# calculate connected components
cc = label(labeled_foreground, background=0)
brainmask = segment_foreground(img)
# get region properties for each cc and select the largest cc (in terms of area)
p = regionprops(cc, image)
brainmask = measure.label(brainmask, background=0)
p = measure.regionprops(brainmask, img)
ridx = np.argmax([p0.area for p0 in p])
p = p[ridx]
......@@ -277,12 +325,12 @@ def _img_props(img, img_resolution, verbose=False, closing=0):
(bbox[1] + bbox[3]) / 2,
)
if DO_PLOTS:
if do_plots:
fig, ax = plt.subplots()
extent = np.array([0, img.shape[1], img.shape[0], 0]) * img_resolution
im = ax.imshow(cc == p.label, extent=extent)
im = ax.imshow(brainmask, extent=extent)
fig.colorbar(im)
rect = patches.Rectangle(
......@@ -301,16 +349,22 @@ def _img_props(img, img_resolution, verbose=False, closing=0):
ax.set_ylabel("mm")
ax.set_title("Image bounding box")
fig.savefig(f"{OUTDIR}/image_bounding_box.png", bbox_inches="tight", dpi=150)
if plot_name is None:
plt.show()
else:
# image_bounding_box.png
fig.savefig(plot_name, bbox_inches="tight", dpi=150)
return {
"bbox": bbox,
"bbox_centroid": centroid,
"shape": (bbox[2] - bbox[0], bbox[3] - bbox[1]),
"aspect_ratio": (bbox[3] - bbox[1]) / (bbox[2] - bbox[0]),
"mask": brainmask,
}
def _pnt_props(pnts, verbose=False):
def point_props(pnts, do_plots=False, plot_name=None):
x = pnts[:, 0]
y = pnts[:, 1]
......@@ -322,16 +376,12 @@ def _pnt_props(pnts, verbose=False):
(bbox[1] + bbox[3]) / 2,
)
if verbose:
print(f"bbox={bbox}")
print(f"centroid={centroid}")
if DO_PLOTS:
if do_plots:
fig, ax = plt.subplots()
ax.plot(x, y, "b.")
ax.axis("equal")
ax.axis("equal")
ax.invert_yaxis()
rect = patches.Rectangle(
......@@ -343,28 +393,61 @@ def _pnt_props(pnts, verbose=False):
facecolor="none",
)
ax.add_patch(rect)
plt.plot(centroid[1], centroid[0], "ro")
ax.set_title("Chart bounding box")
fig.savefig(f"{OUTDIR}/chart_bounding_box.png", bbox_inches="tight", dpi=150)
if plot_name is None:
plt.show()
else:
# chart_bounding_box.png
fig.savefig(plot_name, bbox_inches="tight", dpi=150)
return {
"bbox": bbox,
"bbox_centroid": centroid,
"shape": (bbox[2] - bbox[0], bbox[3] - bbox[1]),
"aspect_ratio": (bbox[3] - bbox[1]) / (bbox[2] - bbox[0]),
}
def _init_scale(img, img_resolution, crd, closing=0, verbose=False):
img_p = _img_props(img, img_resolution, closing=closing)
crd_p = _pnt_props(crd)
def init_scale(img, img_resolution, crd):
img_p = image_props(img, img_resolution, do_plots=True)
crd_p = point_props(crd, do_plots=True)
print(img_p)
print(crd_p)
# justify='right'
# if justify=='right':
# bbox = img_p['bbox']
# h, w = img_p['shape']
# scale_factor = crd_p['aspect_ratio'] / img_p['aspect_ratio']
# new_w = w * scale_factor
# bbox[1] += (w - new_w)
# centroid = (
# (bbox[0] + bbox[2]) / 2,
# (bbox[1] + bbox[3]) / 2,
# )
# img_p = {
# "bbox": bbox,
# "bbox_centroid": centroid,
# "shape": (bbox[2] - bbox[0], bbox[3] - bbox[1]),
# "aspect_ratio": (bbox[3] - bbox[1]) / (bbox[2] - bbox[0]),
# }
# print(img_p)
# print(crd_p)
if verbose:
print(f"image props = {img_p}")
print(f"chart props = {crd_p}")
if np.abs(img_p['aspect_ratio'] - crd_p['aspect_ratio']) > 0.005:
raise RuntimeError('Slide and chart aspect ratios appear to be different.')
idx = np.array([[1, 2], [1, 0], [3, 2], [3, 0]])
......@@ -377,7 +460,7 @@ def _init_scale(img, img_resolution, crd, closing=0, verbose=False):
return a
def _apply_xfm(xfm, pnts):
def apply_xfm(xfm, pnts):
return (
xfm.params @ np.concatenate((pnts, np.ones((pnts.shape[0], 1))), axis=-1).T
).T[:, :2]
......
......@@ -5,6 +5,6 @@ chart:
# key for the contour to use for aligning with image
boundary_key: outline
slide:
# set the resolution at which registration will be performed
# set the resolution at which alignment will be performed
# resolution = native-image-resolution * (2^resolution_level)
resolution_level: 4
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment