diff --git a/slider/chart_reg.py b/slider/chart_reg.py index 0d82446a4c3a41343076b7de60b5d58b566fd211..d3c6720c11614b91f9ababe3d12072cf9724c6dd 100644 --- a/slider/chart_reg.py +++ b/slider/chart_reg.py @@ -47,6 +47,7 @@ def register_chart_to_slide( do_plots: Optional[bool] = None, justify: Optional[str] = None, field: Optional[str] = None, + do_convex_hull: bool = False, ): ''' It takes a chart and a slide, and aligns the chart to the slide. @@ -61,6 +62,8 @@ def register_chart_to_slide( justify (Optional[str]): str ''' + chart_name, slide_name = chart, slide + if config is None: config = util.get_resource("chart.yaml") @@ -88,8 +91,9 @@ def register_chart_to_slide( edge_crds = [x.points[:, :2] * [1, -1] for x in outline] edge_crds_cat = np.concatenate(edge_crds) - # hull = ConvexHull(edge_crds_cat) - # edge_crds_cat = np.concatenate([edge_crds_cat[simplex,:] for simplex in hull.simplices], axis=0) + if do_convex_hull: + hull = ConvexHull(edge_crds_cat) + edge_crds_cat = np.concatenate([edge_crds_cat[simplex,:] for simplex in hull.simplices], axis=0) # load slide, convert to grayscale, and invert if light-field @@ -283,6 +287,8 @@ def register_chart_to_slide( continue plot_contour(ax[7], coords, name, color=cmap(idx)) + plt.suptitle(f'{chart_name}\n{slide_name}') + fig.savefig(f"{outdir}/alignment.png", bbox_inches="tight", dpi=300) # plt.close(fig) @@ -352,8 +358,6 @@ def estimate_field(img, mask=None): centre = img[h - 100 : h + 100, w - 100 : w + 100] centre_m = np.median(centre) - # print(edges_m, centre_m) - # estimate field (light or dark) if edges_m > centre_m: return "light" @@ -381,7 +385,6 @@ def enhance(img0, kernel_size=None, lower_percentile=2, upper_percentile=98, sig def segment_foreground( img, marker_threshold=(0.02, 0.2), - min_component_size=10000, selem=morphology.disk(30), ): """Segment image foreground from background""" @@ -398,12 +401,16 @@ def segment_foreground( labels = segmentation.watershed(elevation_map, markers) labels = ndi.binary_fill_holes(labels - 1) + 1 - # find connected components and exclude components < min_component_size + # find connected components and exclude components < 25% of max component size cc = measure.label(labels, background=1) p = measure.regionprops(cc, img) - ridx = np.where([p0.area > min_component_size for p0 in p])[0] - brainmask = np.sum(np.stack([(cc == p[r].label) for r in ridx], axis=2), axis=2) + # ridx = np.where([p0.area > min_component_size for p0 in p])[0] + # brainmask = np.sum(np.stack([(cc == p[r].label) for r in ridx], axis=2), axis=2) + a = np.array([p0.area for p0 in p]) + a = a/np.max(a) + ridx = np.where(a>0.25)[0] + brainmask = np.isin(cc, [p[i].label for i in ridx]) # erode and dilate to clean up edges brainmask = morphology.binary_erosion(brainmask, selem=selem) @@ -457,6 +464,8 @@ def optimise_xfm_worker(image, image_resolution, contour, xfm_init, line_extent= edge_coords_init = apply_xfm(xfm_init, contour) normals = normal(edge_coords_init) + normals[np.isnan(normals)] = 0 + # calculate normal line (to edge_coords) edge_init_x, edge_init_y = edge_coords_init.T nrml_x, nrml_y = normals.T @@ -522,16 +531,25 @@ def optimise_xfm_worker(image, image_resolution, contour, xfm_init, line_extent= return opt_xfm, mdist, iter_props -def image_props(img, img_resolution): - - # get region properties for each cc and select the largest cc (in terms of area) - brainmask = measure.label(img, background=0) - p = measure.regionprops(brainmask) +def image_props(mask, mask_resolution): + ''' + Given a mask, return a dictionary of properties about the mask + + Args: + mask: The binary mask. + mask_resolution: The resolution of the mask. + + Returns: + A dictionary with the following keys: + - bbox: The bounding box of the object in the original image. + - bbox_centroid: The centroid of the bounding box. + - shape: The shape of the bounding box. + - aspect_ratio + ''' - ridx = np.argmax([p0.area for p0 in p]) - p = p[ridx] + p = measure.regionprops(mask.astype(int)) - bbox = np.array(p.bbox) * img_resolution + bbox = np.array(p[0].bbox) * mask_resolution centroid = ( (bbox[0] + bbox[2]) / 2, @@ -543,7 +561,7 @@ def image_props(img, img_resolution): "bbox_centroid": centroid, "shape": (bbox[2] - bbox[0], bbox[3] - bbox[1]), "aspect_ratio": (bbox[3] - bbox[1]) / (bbox[2] - bbox[0]), - "mask": brainmask, + "mask": mask, } @@ -634,6 +652,9 @@ def initialise_xfm(image, image_resolution, contour, tol=0.05, justify=None): directions = ("left", "right") + hull = ConvexHull(contour) + contour_hull = np.concatenate([contour[simplex,:] for simplex in hull.simplices], axis=0) + for dir_idx, dir in enumerate(directions): bbox = justify_bbox(image_p["bbox"], contour_p["aspect_ratio"], dir) dest = np.array(bbox)[bbox_idx] @@ -645,7 +666,7 @@ def initialise_xfm(image, image_resolution, contour, tol=0.05, justify=None): # fit is determined by the number of contour normals that cross the image mask boundary only once single_crossing_idx = optimise_xfm_worker( - brainmask, image_resolution, contour, xfm, line_extent=1 + brainmask, image_resolution, contour_hull, xfm, line_extent=1 )[2]["single_crossing_idx"] goodness_fit[dir_idx] = np.sum(single_crossing_idx) @@ -662,20 +683,20 @@ def initialise_xfm(image, image_resolution, contour, tol=0.05, justify=None): bbox = justify_bbox(image_p["bbox"], contour_p["aspect_ratio"], justify) - # # exclude voxels from brainmask that are outside the bounding box + # exclude voxels from brainmask that are outside the bounding box - # bbox = np.round(np.array(bbox) / image_resolution).astype(int) + bbox = np.round(np.array(bbox) / image_resolution).astype(int) - # cropped_brainmask = brainmask.copy() - # cropped_brainmask[:, : bbox[1]] = 0 - # cropped_brainmask[:, bbox[3] :] = 0 + cropped_brainmask = brainmask.copy() + cropped_brainmask[:, : bbox[1]] = 0 + cropped_brainmask[:, bbox[3] :] = 0 - # image_p = image_props(cropped_brainmask, image_resolution) + image_p = image_props(cropped_brainmask, image_resolution) - # # recalculate justified bounding box on cropped brainmask - # # force image bbox aspect ratio to be the same as contour bbox, and justify left or right. + # recalculate justified bounding box on cropped brainmask + # force image bbox aspect ratio to be the same as contour bbox, and justify left or right. - # bbox = justify_bbox(image_p["bbox"], contour_p["aspect_ratio"], justify) + bbox = justify_bbox(image_p["bbox"], contour_p["aspect_ratio"], justify) centroid = ( (bbox[0] + bbox[2]) / 2, @@ -707,14 +728,36 @@ def initialise_xfm(image, image_resolution, contour, tol=0.05, justify=None): def apply_xfm(xfm, pnts): + ''' + Given a transformation matrix and a set of points, apply the transformation to the points and return + the result + + Args: + xfm: the transformation matrix + pnts: the points to transform + + Returns: + The transformed points. + ''' return ( xfm.params @ np.concatenate((pnts, np.ones((pnts.shape[0], 1))), axis=-1).T ).T[:, :2] def normal(pnts): + ''' + Given a set of points, find the normal vector to the surface defined by those points + + Args: + pnts: the points to be triangulated + + Returns: + The normal vector of the plane defined by the two points. + ''' + d_pnts = np.roll(pnts, 1, axis=0) - np.roll(pnts, -1, axis=0) normal = np.stack([d_pnts[:, 1], -d_pnts[:, 0]], axis=-1) + normal = normal / np.linalg.norm(normal, axis=-1, keepdims=True) return normal