Commit 8a07fd7a authored by Jiewon's avatar Jiewon
Browse files

fslcourse data works fine

parent 042200fa
......@@ -4,7 +4,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"# ANALYSIS OF DIFFUSION FODs - COMPARISON OF PX AND DIPY\n",
"# ANALYSIS OF DIFFUSION FODs - COMPARISON OF BPX AND DIPY\n",
"\n",
"- we compare bpx and dipy on the fsl_course data (fdt2)\n",
"- also try out dipy on the MGH high res data\n"
......@@ -12,7 +12,7 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
......@@ -23,29 +23,15 @@
"\n",
"\n",
"\n",
"\n",
"# from dipy.core.gradients import gradient_table\n",
"\n",
"# \n",
"# from dipy.io.image import load_nifti_data\n",
"\n",
"# from dipy.reconst.dti import fractional_anisotropy\n",
"# from dipy.reconst.csdeconv import recursive_response\n",
"\n",
"# \n",
"\n",
"# from dipy.reconst.csdeconv import auto_response_ssst\n",
"\n",
"# from dipy.sims.voxel import single_tensor_odf\n",
"\n",
"# from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel\n",
"\n",
"# from dipy.viz import window, actor\n"
"\n"
]
},
{
"cell_type": "code",
"execution_count": 18,
"execution_count": 2,
"metadata": {
"tags": []
},
......@@ -54,9 +40,9 @@
"name": "stdout",
"output_type": "stream",
"text": [
"(116, 116, 76, 115)\n",
"(115,)\n",
"(115, 3)\n"
"(292, 1, 192, 2808)\n",
"(2808,)\n",
"(2808, 3)\n"
]
}
],
......@@ -67,10 +53,10 @@
"from fsl.data.image import Image\n",
"\n",
"# FSL course data\n",
"datadir = '/Users/saad/Git/diffusion/fsl_course_data/'\n",
"datadir = '/Users/jiewonkang/LAYNII/diffusion/mgh/'#'/Users/jiewonkang/LAYNII/diffusion/fslcourse/' #'/Users/saad/Git/diffusion/fsl_course_data/'\n",
"\n",
"# Get data\n",
"data = Image(os.path.join(datadir,'data'))\n",
"data = Image(os.path.join(datadir,'slice_xy'))\n",
"print(data.shape)\n",
"\n",
"# Brain mask\n",
......@@ -98,18 +84,18 @@
},
{
"cell_type": "code",
"execution_count": 23,
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[[0.00145238]\n",
" [0.00030023]\n",
" [0.00030023]]\n",
"(array([0.00145238, 0.00030023, 0.00030023]), 1428.1447)\n",
"0.206712291263422\n"
"[[0.00389526]\n",
" [0.0010381 ]\n",
" [0.0010381 ]]\n",
"(array([0.00389526, 0.0010381 , 0.0010381 ]), 1163.474)\n",
"0.2665042647810753\n"
]
}
],
......@@ -140,30 +126,22 @@
},
{
"cell_type": "code",
"execution_count": 26,
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/miniconda3/envs/py38/lib/python3.8/site-packages/dipy-1.2.0-py3.8-macosx-10.9-x86_64.egg/dipy/reconst/csdeconv.py:696: UserWarning: maximum number of iterations exceeded - failed to converge\n",
"/Users/jiewonkang/opt/anaconda3/lib/python3.8/site-packages/dipy/reconst/csdeconv.py:700: UserWarning: maximum number of iterations exceeded - failed to converge\n",
" warnings.warn(msg)\n"
]
},
{
"ename": "KeyboardInterrupt",
"evalue": "",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m/var/folders/b7/p54_r2qn6kl4nxy4f7_28hrc0000gs/T/ipykernel_46994/2571807881.py\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mdipy\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreconst\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcsdeconv\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mConstrainedSphericalDeconvModel\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0mcsd_model\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mConstrainedSphericalDeconvModel\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mgtab\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mresponse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 5\u001b[0;31m \u001b[0mcsd_fit\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcsd_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 6\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcsd_fit\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 7\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/opt/miniconda3/envs/py38/lib/python3.8/site-packages/dipy-1.2.0-py3.8-macosx-10.9-x86_64.egg/dipy/reconst/multi_voxel.py\u001b[0m in \u001b[0;36mnew_fit\u001b[0;34m(self, data, mask)\u001b[0m\n\u001b[1;32m 31\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mijk\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mndindex\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 32\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mmask\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mijk\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 33\u001b[0;31m \u001b[0mfit_array\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mijk\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msingle_voxel_fit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdata\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mijk\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 34\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mMultiVoxelFit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfit_array\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmask\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 35\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mnew_fit\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/opt/miniconda3/envs/py38/lib/python3.8/site-packages/dipy-1.2.0-py3.8-macosx-10.9-x86_64.egg/dipy/reconst/csdeconv.py\u001b[0m in \u001b[0;36mfit\u001b[0;34m(self, data)\u001b[0m\n\u001b[1;32m 287\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mfit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdata\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 288\u001b[0m \u001b[0mdwi_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdata\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_where_dwi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 289\u001b[0;31m shm_coeff, _ = csdeconv(dwi_data, self._X, self.B_reg, self.tau,\n\u001b[0m\u001b[1;32m 290\u001b[0m convergence=self.convergence, P=self._P)\n\u001b[1;32m 291\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mSphHarmFit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mshm_coeff\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/opt/miniconda3/envs/py38/lib/python3.8/site-packages/dipy-1.2.0-py3.8-macosx-10.9-x86_64.egg/dipy/reconst/csdeconv.py\u001b[0m in \u001b[0;36mcsdeconv\u001b[0;34m(dwsignal, X, B_reg, tau, convergence, P)\u001b[0m\n\u001b[1;32m 682\u001b[0m \u001b[0;31m# We use the Cholesky decomposition to solve for the SH coefficients.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 683\u001b[0m \u001b[0mQ\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mP\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdot\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mH\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mT\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mH\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 684\u001b[0;31m \u001b[0mfodf_sh\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_solve_cholesky\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mQ\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mz\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 685\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 686\u001b[0m \u001b[0;31m# Sample the FOD using the regularization sphere and compute k.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/opt/miniconda3/envs/py38/lib/python3.8/site-packages/dipy-1.2.0-py3.8-macosx-10.9-x86_64.egg/dipy/reconst/csdeconv.py\u001b[0m in \u001b[0;36m_solve_cholesky\u001b[0;34m(Q, z)\u001b[0m\n\u001b[1;32m 518\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 519\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_solve_cholesky\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mQ\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mz\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 520\u001b[0;31m \u001b[0mL\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minfo\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpotrf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mQ\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlower\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moverwrite_a\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mclean\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 521\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0minfo\u001b[0m \u001b[0;34m>\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 522\u001b[0m \u001b[0mmsg\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m\"%d-th leading minor not positive definite\"\u001b[0m \u001b[0;34m%\u001b[0m \u001b[0minfo\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mKeyboardInterrupt\u001b[0m: "
"name": "stdout",
"output_type": "stream",
"text": [
"(292, 1, 192)\n"
]
}
],
......@@ -171,7 +149,7 @@
"# Fit ODF\n",
"# Use response function to reconstruct ODF \n",
"from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel\n",
"csd_model = ConstrainedSphericalDeconvModel(gtab,response)\n",
"csd_model = ConstrainedSphericalDeconvModel(gtab,response,convergence=500)#200\n",
"csd_fit = csd_model.fit(data)\n",
"print(csd_fit.shape)\n",
"\n",
......@@ -190,30 +168,22 @@
},
{
"cell_type": "code",
"execution_count": 16,
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Saving illustration as slice_xy_2_csd_peaks.png\n"
]
}
],
"outputs": [],
"source": [
"# Define output directory\n",
"output_dir = '/Users/saad/Desktop/odf_results/'\n",
"output_dir = '/Users/jiewonkang/LAYNII/diffusion/odf_results/'#'/Users/saad/Desktop/odf_results/'\n",
"\n",
"# Define basename string for output files\n",
"filespec = \"fsl_course_data\" \n",
"filespec = \"mgh_data_slice\"#\"fsl_course_data\" \n",
"\n",
"# Extract peak directions (maxima) of ODFs\n",
"from dipy.direction import peaks_from_model\n",
"from dipy.data import default_sphere\n",
"\n",
"csd_peaks = peaks_from_model(model=csd_model,\n",
" data=data,\n",
" data=data.data,\n",
" sphere=default_sphere,\n",
" relative_peak_threshold=.5,\n",
" min_separation_angle=25,\n",
......@@ -234,15 +204,14 @@
},
{
"cell_type": "code",
"execution_count": 17,
"execution_count": 19,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"(190, 1, 170, 3)\n",
"(190, 1, 170, 1)\n"
"(292, 1, 192, 1)\n"
]
}
],
......@@ -255,9 +224,7 @@
"print(crossing_fibre_no.shape)\n",
"\n",
"# Save as nifti\n",
"func = nib.load(filename) # diffusion data\n",
"ni_img = nib.Nifti1Image(crossing_fibre_no,func.affine)\n",
"nib.save(ni_img, outputdir+filespec+'_cross_fib_count.nii.gz')"
"Image(crossing_fibre_no, xform=np.eye(4)).save(output_dir+filespec+'_cross_fib_count.nii.gz')\n"
]
},
{
......@@ -269,30 +236,14 @@
},
{
"cell_type": "code",
"execution_count": 18,
"execution_count": 20,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"(190, 1, 170)\n",
"[[[0. 0.00406759 0.06974851 ... 0.12878251 0.11782357 0.12736678]]\n",
"\n",
" [[0. 0.0171325 0.06137189 ... 0.15239174 0.10964878 0.15413291]]\n",
"\n",
" [[0. 0.01435646 0.08884249 ... 0.15292671 0.13944273 0.15151466]]\n",
"\n",
" ...\n",
"\n",
" [[0. 0. 0.05125385 ... 0.20745192 0.29377308 0.19453879]]\n",
"\n",
" [[0. 0. 0.04139549 ... 0.27782337 0.23287452 0.22019032]]\n",
"\n",
" [[0. 0. 0.03380958 ... 0.2319051 0.20727103 0.20715439]]]\n",
"next\n",
"(190, 1, 170, 3)\n",
"(190, 1, 170)\n"
"(292, 1, 192)\n"
]
}
],
......@@ -302,8 +253,7 @@
"print(maxpeak.shape)\n",
"\n",
"# Save as nifti\n",
"ni_img_2 = nib.Nifti1Image(maxpeak, func.affine)\n",
"nib.save(ni_img_2, outputdir+filespec+'_cross_fib_max.nii.gz')\n"
"Image(maxpeak, xform=np.eye(4)).save(output_dir+filespec+'_cross_fib_max.nii.gz')\n"
]
},
{
......@@ -315,28 +265,15 @@
},
{
"cell_type": "code",
"execution_count": 19,
"execution_count": 21,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"(190, 1, 170, 3, 3)\n",
"(190, 1, 170, 3)\n",
"[[ 0. -0.6915564 0.56038515 ... -0.44015013 0.20771889\n",
" 0.26458817]\n",
" [ 0. -0.50392443 -0.84258134 ... 0.51692592 0.65072818\n",
" -0.53681715]\n",
" [ 0. -0.51957521 0.04987662 ... 0.09420113 -0.99918788\n",
" -0.86214037]\n",
" ...\n",
" [ 0. 0. 0.87974526 ... 0.16864329 -0.60805016\n",
" -0.60336986]\n",
" [ 0. 0. -0.50392443 ... -0.71068281 0.72916496\n",
" -0.70624532]\n",
" [ 0. 0. -0.75554233 ... -0.33071052 -0.96108153\n",
" 0.83096984]]\n"
"(292, 1, 192, 3, 3)\n",
"(292, 1, 192, 3)\n"
]
}
],
......@@ -355,27 +292,23 @@
"# Direction for Peak 2\n",
"peak2=peak_dir_xyz[:,:,:,1,:]\n",
"# Magnitude of Peak 2\n",
"peak2_val=peakvals[:,:,:,1]\n",
"peak2_vals=peakvals[:,:,:,1]\n",
"\n",
"# Direction for Peak 3\n",
"peak3=peak_dir_xyz[:,:,:,2,:]\n",
"# Magnitude of Peak 3\n",
"peak3_val=peakvals[:,:,:,2]\n",
"peak3_vals=peakvals[:,:,:,2]\n",
"\n",
"# Save each peak information into separate nifti\n",
"ni_img1 = nib.Nifti1Image(np.multiply(np.stack(np.tile(peak1_vals,(3,1,1,1)),axis=3),peak1),func.affine)\n",
"nib.save(ni_img1, outputdir+filespec+'_crossfib_peak1.nii.gz')\n",
"\n",
"ni_img2 = nib.Nifti1Image(np.multiply(np.stack(np.tile(peak2_vals,(3,1,1,1)),axis=3),peak2),func.affine)\n",
"nib.save(ni_img2, outputdir+filespec+'_crossfib_peak2.nii.gz')\n",
"\n",
"ni_img3 = nib.Nifti1Image(np.multiply(np.stack(np.tile(peak3_vals,(3,1,1,1)),axis=3),peak3),func.affine)\n",
"nib.save(ni_img3, outputdir+filespec+'_crossfib_peak3.nii.gz')"
"Image(np.multiply(np.stack(np.tile(peak1_vals,(3,1,1,1)),axis=3),peak1), xform=np.eye(4)).save(output_dir+filespec+'_crossfib_peak1.nii.gz')\n",
"Image(np.multiply(np.stack(np.tile(peak2_vals,(3,1,1,1)),axis=3),peak2), xform=np.eye(4)).save(output_dir+filespec+'_crossfib_peak2.nii.gz')\n",
"Image(np.multiply(np.stack(np.tile(peak3_vals,(3,1,1,1)),axis=3),peak3), xform=np.eye(4)).save(output_dir+filespec+'_crossfib_peak3.nii.gz')\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
......
%% Cell type:markdown id: tags:
# ANALYSIS OF DIFFUSION FODs - COMPARISON OF PX AND DIPY
# ANALYSIS OF DIFFUSION FODs - COMPARISON OF BPX AND DIPY
- we compare bpx and dipy on the fsl_course data (fdt2)
- also try out dipy on the MGH high res data
%% Cell type:code id: tags:
``` python
import os
import numpy as np
import matplotlib.pyplot as plt
# from dipy.core.gradients import gradient_table
#
# from dipy.io.image import load_nifti_data
# from dipy.reconst.dti import fractional_anisotropy
# from dipy.reconst.csdeconv import recursive_response
#
# from dipy.reconst.csdeconv import auto_response_ssst
# from dipy.sims.voxel import single_tensor_odf
# from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel
# from dipy.viz import window, actor
```
%% Cell type:code id: tags:
``` python
# Load data, brain mask, bvals, bvecs
from fsl.data.image import Image
# FSL course data
datadir = '/Users/saad/Git/diffusion/fsl_course_data/'
datadir = '/Users/jiewonkang/LAYNII/diffusion/mgh/'#'/Users/jiewonkang/LAYNII/diffusion/fslcourse/' #'/Users/saad/Git/diffusion/fsl_course_data/'
# Get data
data = Image(os.path.join(datadir,'data'))
data = Image(os.path.join(datadir,'slice_xy'))
print(data.shape)
# Brain mask
mask = Image(os.path.join(datadir,'nodif_brain_mask')).data>0
# bvals, bvecs text to array
bvals = np.loadtxt(os.path.join(datadir,"bvals"))
bvecs = np.loadtxt(os.path.join(datadir,"bvecs")).T
# Set gradient table
from dipy.core.gradients import gradient_table
gtab = gradient_table(bvals, bvecs)
print(gtab.bvals.shape)
print(gtab.bvecs.shape)
```
%%%% Output: stream
(116, 116, 76, 115)
(115,)
(115, 3)
(292, 1, 192, 2808)
(2808,)
(2808, 3)
%% Cell type:markdown id: tags:
### First need to estimate fibre response function
%% Cell type:code id: tags:
``` python
# response function estimated (FA > 0.7)
from dipy.reconst.csdeconv import auto_response_ssst
response, ratio = auto_response_ssst(gtab, data.data, fa_thr=0.7)
# Response: diffusivity along x,y,z and S0
# Ratio : anisotropy (lowest to highest diff)
evals_d = response[0]
print(response[0][:,None])
print(response)
print(ratio)
# # response function for multi cell fibre response
# response_mcsd=MultiShellResponse(response[0][:,None],sh_order=3,shells=2,S0=np.array(response[1]))
```
%%%% Output: stream
[[0.00145238]
[0.00030023]
[0.00030023]]
(array([0.00145238, 0.00030023, 0.00030023]), 1428.1447)
0.206712291263422
[[0.00389526]
[0.0010381 ]
[0.0010381 ]]
(array([0.00389526, 0.0010381 , 0.0010381 ]), 1163.474)
0.2665042647810753
%% Cell type:markdown id: tags:
### Fitting ODF
%% Cell type:code id: tags:
``` python
# Fit ODF
# Use response function to reconstruct ODF
from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel
csd_model = ConstrainedSphericalDeconvModel(gtab,response)
csd_model = ConstrainedSphericalDeconvModel(gtab,response,convergence=500)#200
csd_fit = csd_model.fit(data)
print(csd_fit.shape)
```
%%%% Output: stream
/opt/miniconda3/envs/py38/lib/python3.8/site-packages/dipy-1.2.0-py3.8-macosx-10.9-x86_64.egg/dipy/reconst/csdeconv.py:696: UserWarning: maximum number of iterations exceeded - failed to converge
/Users/jiewonkang/opt/anaconda3/lib/python3.8/site-packages/dipy/reconst/csdeconv.py:700: UserWarning: maximum number of iterations exceeded - failed to converge
warnings.warn(msg)
%%%% Output: error
%%%% Output: stream
---------------------------------------------------------------------------
KeyboardInterrupt Traceback (most recent call last)
/var/folders/b7/p54_r2qn6kl4nxy4f7_28hrc0000gs/T/ipykernel_46994/2571807881.py in <module>
3 from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel
4 csd_model = ConstrainedSphericalDeconvModel(gtab,response)
----> 5 csd_fit = csd_model.fit(data)
6 print(csd_fit.shape)
7
/opt/miniconda3/envs/py38/lib/python3.8/site-packages/dipy-1.2.0-py3.8-macosx-10.9-x86_64.egg/dipy/reconst/multi_voxel.py in new_fit(self, data, mask)
31 for ijk in ndindex(data.shape[:-1]):
32 if mask[ijk]:
---> 33 fit_array[ijk] = single_voxel_fit(self, data[ijk])
34 return MultiVoxelFit(self, fit_array, mask)
35 return new_fit
/opt/miniconda3/envs/py38/lib/python3.8/site-packages/dipy-1.2.0-py3.8-macosx-10.9-x86_64.egg/dipy/reconst/csdeconv.py in fit(self, data)
287 def fit(self, data):
288 dwi_data = data[self._where_dwi]
--> 289 shm_coeff, _ = csdeconv(dwi_data, self._X, self.B_reg, self.tau,
290 convergence=self.convergence, P=self._P)
291 return SphHarmFit(self, shm_coeff, None)
/opt/miniconda3/envs/py38/lib/python3.8/site-packages/dipy-1.2.0-py3.8-macosx-10.9-x86_64.egg/dipy/reconst/csdeconv.py in csdeconv(dwsignal, X, B_reg, tau, convergence, P)
682 # We use the Cholesky decomposition to solve for the SH coefficients.
683 Q = P + np.dot(H.T, H)
--> 684 fodf_sh = _solve_cholesky(Q, z)
685
686 # Sample the FOD using the regularization sphere and compute k.
/opt/miniconda3/envs/py38/lib/python3.8/site-packages/dipy-1.2.0-py3.8-macosx-10.9-x86_64.egg/dipy/reconst/csdeconv.py in _solve_cholesky(Q, z)
518
519 def _solve_cholesky(Q, z):
--> 520 L, info = potrf(Q, lower=False, overwrite_a=False, clean=False)
521 if info > 0:
522 msg = "%d-th leading minor not positive definite" % info
KeyboardInterrupt:
(292, 1, 192)
%% Cell type:markdown id: tags:
### Extract the peaks and save as dyads
%% Cell type:code id: tags:
``` python
# Define output directory
output_dir = '/Users/saad/Desktop/odf_results/'
output_dir = '/Users/jiewonkang/LAYNII/diffusion/odf_results/'#'/Users/saad/Desktop/odf_results/'
# Define basename string for output files
filespec = "fsl_course_data"
filespec = "mgh_data_slice"#"fsl_course_data"
# Extract peak directions (maxima) of ODFs
from dipy.direction import peaks_from_model
from dipy.data import default_sphere
csd_peaks = peaks_from_model(model=csd_model,
data=data,
data=data.data,
sphere=default_sphere,
relative_peak_threshold=.5,
min_separation_angle=25,
npeaks=3,
parallel=True)
```
%%%% Output: stream
Saving illustration as slice_xy_2_csd_peaks.png
%% Cell type:markdown id: tags:
### No of crossing fibres
%% Cell type:code id: tags:
``` python
# Peak values
peakvals=csd_peaks.peak_values
# Count number of crossing fibres from peaks (max 3 peaks)
crossing_fibre_no=np.count_nonzero(peakvals,axis=3,keepdims=True)
print(crossing_fibre_no.shape)
# Save as nifti
func = nib.load(filename) # diffusion data
ni_img = nib.Nifti1Image(crossing_fibre_no,func.affine)
nib.save(ni_img, outputdir+filespec+'_cross_fib_count.nii.gz')
Image(crossing_fibre_no, xform=np.eye(4)).save(output_dir+filespec+'_cross_fib_count.nii.gz')
```
%%%% Output: stream
(190, 1, 170, 3)
(190, 1, 170, 1)
(292, 1, 192, 1)
%% Cell type:markdown id: tags:
### Size of maximum peak
%% Cell type:code id: tags:
``` python
# Use max peak value
maxpeak=np.amax(peakvals,axis=3)
print(maxpeak.shape)
# Save as nifti
ni_img_2 = nib.Nifti1Image(maxpeak, func.affine)
nib.save(ni_img_2, outputdir+filespec+'_cross_fib_max.nii.gz')
Image(maxpeak, xform=np.eye(4)).save(output_dir+filespec+'_cross_fib_max.nii.gz')
```
%%%% Output: stream
(190, 1, 170)
[[[0. 0.00406759 0.06974851 ... 0.12878251 0.11782357 0.12736678]]
[[0. 0.0171325 0.06137189 ... 0.15239174 0.10964878 0.15413291]]
[[0. 0.01435646 0.08884249 ... 0.15292671 0.13944273 0.15151466]]
...
[[0. 0. 0.05125385 ... 0.20745192 0.29377308 0.19453879]]
[[0. 0. 0.04139549 ... 0.27782337 0.23287452 0.22019032]]
[[0. 0. 0.03380958 ... 0.2319051 0.20727103 0.20715439]]]
next
(190, 1, 170, 3)
(190, 1, 170)
(292, 1, 192)
%% Cell type:markdown id: tags:
### Peak orientations (x,y,z directions)
%% Cell type:code id: tags:
``` python
# Save peak directions as dyads
peak_dir_xyz=csd_peaks.peak_dirs
print(peak_dir_xyz.shape)
# Save peak values corresponding to each peak direction
# Direction for Peak 1
peak1=peak_dir_xyz[:,:,:,0,:]
print(peak1.shape)
# Magnitude of Peak 1
peak1_vals=peakvals[:,:,:,0]
# Direction for Peak 2
peak2=peak_dir_xyz[:,:,:,1,:]
# Magnitude of Peak 2
peak2_val=peakvals[:,:,:,1]
peak2_vals=peakvals[:,:,:,1]
# Direction for Peak 3
peak3=peak_dir_xyz[:,:,:,2,:]
# Magnitude of Peak 3
peak3_val=peakvals[:,:,:,2]
peak3_vals=peakvals[:,:,:,2]
# Save each peak information into separate nifti
ni_img1 = nib.Nifti1Image(np.multiply(np.stack(np.tile(peak1_vals,(3,1,1,1)),axis=3),peak1),func.affine)
nib.save(ni_img1, outputdir+filespec+'_crossfib_peak1.nii.gz')
ni_img2 = nib.Nifti1Image(np.multiply(np.stack(np.tile(peak2_vals,(3,1,1,1)),axis=3),peak2),func.affine)
nib.save(ni_img2, outputdir+filespec+'_crossfib_peak2.nii.gz')
Image(np.multiply(np.stack(np.tile(peak1_vals,(3,1,1,1)),axis=3),peak1), xform=np.eye(4)).save(output_dir+filespec+'_crossfib_peak1.nii.gz')
Image(np.multiply(np.stack(np.tile(peak2_vals,(3,1,1,1)),axis=3),peak2), xform=np.eye(4)).save(output_dir+filespec+'_crossfib_peak2.nii.gz')
Image(np.multiply(np.stack(np.tile(peak3_vals,(3,1,1,1)),axis=3),peak3), xform=np.eye(4)).save(output_dir+filespec+'_crossfib_peak3.nii.gz')
ni_img3 = nib.Nifti1Image(np.multiply(np.stack(np.tile(peak3_vals,(3,1,1,1)),axis=3),peak3),func.affine)
nib.save(ni_img3, outputdir+filespec+'_crossfib_peak3.nii.gz')
```
%%%% Output: stream
(190, 1, 170, 3, 3)
(190, 1, 170, 3)
[[ 0. -0.6915564 0.56038515 ... -0.44015013 0.20771889
0.26458817]
[ 0. -0.50392443 -0.84258134 ... 0.51692592 0.65072818
-0.53681715]
[ 0. -0.51957521 0.04987662 ... 0.09420113 -0.99918788
-0.86214037]
...
[ 0. 0. 0.87974526 ... 0.16864329 -0.60805016
-0.60336986]
[ 0. 0. -0.50392443 ... -0.71068281 0.72916496
-0.70624532]
[ 0. 0. -0.75554233 ... -0.33071052 -0.96108153
0.83096984]]
(292, 1, 192, 3, 3)
(292, 1, 192, 3)
%% Cell type:code id: tags:
``` python
# Full volume (:,:,:,:)
```
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment