@inproceedings{e91edf5bea234645b258bd5c4f5c55b6,
title = "Deformable MR-CT image registration using an unsupervised synthesis and registration network for neuro-endoscopic surgery",
abstract = "Purpose: Deep-brain stimulation via neuro-endoscopic surgery is a challenging procedure that requires accurate targeting of deep-brain structures that can undergo deformations (up to 10 mm). Conventional deformable registration methods have the potential to resolve such geometric error between preoperative MR and intraoperative CT but at the expense of long computation time. New advances in deep learning methods offer benefits to inter-modality image registration accuracy and runtime using novel similarity metrics and network architectures. Method: An unsupervised deformable registration network is reported that first generates a synthetic CT from MR using CycleGAN and then registers the synthetic CT to the intraoperative CT using an inverse-consistent registration network. Diffeomorphism of the registration is maintained using deformation exponentiation {"}squaring and scaling{"}layers. The method was trained and tested on a dataset of CT and T1-weighted MR images with randomly simulated deformations that mimic deep-brain deformation during surgery. The method was compared to a baseline method using inter-modality deep learning registration, VoxelMorph. Results: The methods were tested on 10 pairs of CT/MR images from 5 subjects. The proposed method achieved a Dice score of 0.84±0.04 for the lateral ventricles, 0.72±0.09 for the 3rd ventricle, and 0.63±0.10 for the 4th ventricle, with target registration error (TRE) of 0.95±0.54 mm. The proposed method showed statistically significant improvement in both Dice score and TRE in comparison to inter-modality VoxelMorph, while maintaining a fast runtime of less than 3 seconds for a typical MR-CT pair of volume images. Conclusion: The proposed unsupervised image synthesis and registration network demonstrates the capability for accurate volumetric deformable MR-CT registration with near real-time performance. The method will be further developed for application in intraoperative CT (or cone-beam CT) guided neurosurgery. ",
keywords = "Image Registration, Image Synthesis, Multimodality Registration, Unsupervised Learning",
author = "R. Han and Jones, {C. K.} and Ketcha, {M. D.} and P. Wu and P. Vagdargi and Ali Uneri and J. Lee and M. Luciano and Anderson, {W. S.} and Siewerdsen, {J. H.}",
note = "Funding Information: This research was supported by NIH grant U01-NS-107133. The authors extend their thanks to collaborators from Department of Neurosurgery at Johns Hopkins Hospital and Department of Radiation Oncology at Johns Hopkins University. Publisher Copyright: {\textcopyright} 2021 SPIE.; Medical Imaging 2021: Image-Guided Procedures, Robotic Interventions, and Modeling ; Conference date: 15-02-2021 Through 19-02-2021",
year = "2021",
doi = "10.1117/12.2581567",
language = "English (US)",
series = "Progress in Biomedical Optics and Imaging - Proceedings of SPIE",
publisher = "SPIE",
editor = "Linte, {Cristian A.} and Siewerdsen, {Jeffrey H.}",
booktitle = "Medical Imaging 2021",
}