Commit f284f826 authored by Paul McCarthy's avatar Paul McCarthy 🚵
Browse files

ENH: Proper depth picking for volume overlays, Works well when

blendByIntensity is disabled, but need to play with approaches for when it is
enabled (to cut through haze).
parent bf007395
......@@ -261,9 +261,19 @@ class Volume3DOpts(object):
#
# The projection matrix puts depth into
# [-1, 1], but we want it in [0, 1]
zscale = affine.scaleOffsetXform([1, 1, 0.5], [0, 0, 0.5])
zscale = affine.scaleOffsetXform([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
xform = affine.concat(zscale, proj, xform)
# The Scene3DViewProfile needs to know the image
# texture to screen transformation so it can
# transform screen locations into image
# coordinates. So we construct an appropriate
# transform and cache it in the overlay list
# whenever it is recalculated.
scr2disp = affine.concat(t2dmat, affine.invert(xform))
self.overlayList.setData(
self.overlay, 'screen2DisplayXform_{}'.format(id(self)), scr2disp)
return rayStep, xform
......
......@@ -231,6 +231,7 @@ def draw3D(self, xform=None, bbox=None):
:arg bbox: An optional bounding box.
"""
ovl = self.overlay
opts = self.opts
canvas = self.canvas
copts = canvas.opts
......@@ -240,9 +241,9 @@ def draw3D(self, xform=None, bbox=None):
rayStep , texform = opts.calculateRayCastSettings(xform, proj)
rayStep = affine.transformNormal(
rayStep, self.imageTexture.texCoordXform(self.overlay.shape))
rayStep, self.imageTexture.texCoordXform(ovl.shape))
texform = affine.concat(
texform, self.imageTexture.invTexCoordXform(self.overlay.shape))
texform, self.imageTexture.invTexCoordXform(ovl.shape))
# If lighting is enabled, we specify the light
# position in image texture coordinates, to make
......
......@@ -11,8 +11,9 @@
import logging
import wx
import numpy as np
import wx
import numpy as np
import OpenGL.GL as gl
import fsleyes_props as props
import fsl.transform.affine as affine
......@@ -153,8 +154,8 @@ class Scene3DViewProfile(profiles.Profile):
self.__rotateMousePos = mousePos
canvas.opts.rotation = affine.concat(rot,
self.__lastRot,
self.__baseXform)
self.__lastRot,
self.__baseXform)
def _rotateModeLeftMouseUp(self, ev, canvas, mousePos, canvasPos):
......@@ -228,30 +229,51 @@ class Scene3DViewProfile(profiles.Profile):
Updates the :attr:`DisplayContext.location` property.
"""
from fsl.data.mesh import Mesh
from fsl.data.mesh import Mesh
from fsl.data.image import Image
displayCtx = self.displayCtx
ovl = displayCtx.getSelectedOverlay()
displayCtx = self.displayCtx
overlayList = self.overlayList
ovl = displayCtx.getSelectedOverlay()
if ovl is None:
return
opts = self.displayCtx.getOpts(ovl)
# The canvasPos is located on the near clipping
# plane (see Scene3DCanvas.canvasToWorld).
# We also need the corresponding point on the
# far clipping plane.
farPos = canvas.canvasToWorld(mousePos[0], mousePos[1], near=False)
# For non-mesh overlays, we select a point which
# is in between the near/far clipping planes.
if not isinstance(ovl, Mesh):
# For image overlays, we transform screen
# coordinates into display coordinates, via
# a texture to screen coord affine, which
# is cached by the glvolume draw functions.
if isinstance(ovl, Image):
screen2Display = overlayList.getData(
ovl, 'screen2DisplayXform_{}'.format(id(opts)), None)
posDir = farPos - canvasPos
dist = affine.veclength(posDir)
posDir = affine.normalise(posDir)
midPos = canvasPos + 0.5 * dist * posDir
if screen2Display is None:
return
self.displayCtx.location.xyz = midPos
# Transform the mouse coords into normalised device
# coordinates (NDCs, in the range [0, 1] - see
# Volume3DOpts.calculateRayCastSettings), and query
# the depth for the current fragment (this is saved
# by the glvolume 3d fragment shader).
x, y = mousePos
w, h = canvas.GetSize()
z = gl.glReadPixels(
x, y, 1, 1, gl.GL_DEPTH_COMPONENT, gl.GL_FLOAT)
x = x / w
y = y / h
# Transform NDCs into display coordinates
xyz = affine.transform((x, y, z), screen2Display)
self.displayCtx.location.xyz = xyz
else:
opts = self.displayCtx.getOpts(ovl)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment