File size: 25,604 Bytes
fad35ef
1
{"forum": "BkgjbQ30yN", "submission_url": "https://openreview.net/forum?id=BkgjbQ30yN", "submission_content": {"title": "High-quality segmentation of low quality cardiac MR images using k-space artefact correction", "authors": ["Ilkay Oksuz", "James Clough", "Wenjia Bai", "Bram Ruijsink", "Esther Puyol Anton", "Gastao Cruz", "Claudia Prieto", "Andrew P. King", "Julia A. Schnabel"], "authorids": ["ilkay.oksuz@kcl.ac.uk", "james.clough@kcl.ac.uk", "w.bai@imperial.ac.uk", "jacobus.ruijsink@kcl.ac.uk", "esther.puyol_anton@kcl.ac.uk", "gastao.cruz@kcl.ac.uk", "claudia.prieto@kcl.ac.uk", "andrew.king@kcl.ac.uk", "julia.schnabel@kcl.ac.uk"], "keywords": ["Cardiac MR Segmentation", "Image Quality", "Image Artefacts", "Image Artefact Correction", "Deep Learning", "UK Biobank", "Automap"], "TL;DR": "Improved segmentation of low quality cardiac MR images using an artefact correction model using k-space input", "abstract": "Deep learning methods have shown great success in segmenting the anatomical and pathological structures in medical images. This success is closely bounded with the quality of the  images in the dataset that are being segmented. A commonly overlooked issue in the medical image analysis community is the vast amount of clinical images that have severe image artefacts. In this paper, we discuss the implications of image artefacts on cardiac MR segmentation and compare a variety of approaches for motion artefact correction with our proposed method Automap-GAN. Our method is based on the recently developed Automap reconstruction method, which directly reconstructs high quality MR images from k-space using deep learning. We propose to use a loss function that combines mean square error with structural similarity index to robustly segment poor-quality images. We train the reconstruction network to automatically correct for motion-related artefacts using synthetically corrupted CMR k-space data and uncorrected reconstructed images. In the experiments, we apply the proposed method to correct for motion artefacts on a large dataset of 1,400 subjects to improve image quality. The improvement of image quality is quantitatively assessed using segmentation accuracy as a metric. The segmentation is improved from 0.63 to 0.72 dice overlap after artefact correction. We quantitatively compare our method with a variety of techniques for recovering image quality to showcase the influence on segmentation. In addition, we qualitatively evaluate the proposed technique using k-space data containing real motion artefacts.", "pdf": "/pdf/d552d832a21d0071c282b33f9611d3aeeacb5c50.pdf", "code of conduct": "I have read and accept the code of conduct.", "remove if rejected": "(optional) Remove submission if paper is rejected.", "paperhash": "oksuz|highquality_segmentation_of_low_quality_cardiac_mr_images_using_kspace_artefact_correction", "_bibtex": "@inproceedings{oksuz:MIDLFull2019a,\ntitle={High-quality segmentation of low quality cardiac {\\{}MR{\\}} images using k-space artefact correction},\nauthor={Oksuz, Ilkay and Clough, James and Bai, Wenjia and Ruijsink, Bram and Anton, Esther Puyol and Cruz, Gastao and Prieto, Claudia and King, Andrew P. and Schnabel, Julia A.},\nbooktitle={International Conference on Medical Imaging with Deep Learning -- Full Paper Track},\naddress={London, United Kingdom},\nyear={2019},\nmonth={08--10 Jul},\nurl={https://openreview.net/forum?id=BkgjbQ30yN},\nabstract={Deep learning methods have shown great success in segmenting the anatomical and pathological structures in medical images. This success is closely bounded with the quality of the  images in the dataset that are being segmented. A commonly overlooked issue in the medical image analysis community is the vast amount of clinical images that have severe image artefacts. In this paper, we discuss the implications of image artefacts on cardiac MR segmentation and compare a variety of approaches for motion artefact correction with our proposed method Automap-GAN. Our method is based on the recently developed Automap reconstruction method, which directly reconstructs high quality MR images from k-space using deep learning. We propose to use a loss function that combines mean square error with structural similarity index to robustly segment poor-quality images. We train the reconstruction network to automatically correct for motion-related artefacts using synthetically corrupted CMR k-space data and uncorrected reconstructed images. In the experiments, we apply the proposed method to correct for motion artefacts on a large dataset of 1,400 subjects to improve image quality. The improvement of image quality is quantitatively assessed using segmentation accuracy as a metric. The segmentation is improved from 0.63 to 0.72 dice overlap after artefact correction. We quantitatively compare our method with a variety of techniques for recovering image quality to showcase the influence on segmentation. In addition, we qualitatively evaluate the proposed technique using k-space data containing real motion artefacts.},\n}"}, "submission_cdate": 1544631043354, "submission_tcdate": 1544631043354, "submission_tmdate": 1561398298528, "submission_ddate": null, "review_id": ["HkxCNVFh7N", "H1gZQyynmV", "SyxN8fWn74"], "review_url": ["https://openreview.net/forum?id=BkgjbQ30yN&noteId=HkxCNVFh7N", "https://openreview.net/forum?id=BkgjbQ30yN&noteId=H1gZQyynmV", "https://openreview.net/forum?id=BkgjbQ30yN&noteId=SyxN8fWn74"], "review_cdate": [1548682294503, 1548640025236, 1548649035918], "review_tcdate": [1548682294503, 1548640025236, 1548649035918], "review_tmdate": [1548856755398, 1548856748155, 1548856689869], "review_readers": [["everyone"], ["everyone"], ["everyone"]], "review_writers": [["MIDL.io/2019/Conference/Paper24/AnonReviewer2"], ["MIDL.io/2019/Conference/Paper24/AnonReviewer3"], ["MIDL.io/2019/Conference/Paper24/AnonReviewer1"]], "review_reply_count": [{"replyCount": 0}, {"replyCount": 0}, {"replyCount": 0}], "review_replyto": ["BkgjbQ30yN", "BkgjbQ30yN", "BkgjbQ30yN"], "review_content": [{"pros": "The authors propose a method to correct k-space of cardiac MR images affected by motion artefacts. The method employs a previously proposed deep learning method (Automap) and extend it with a GAN. The Automap is trained to map K-space images to an output image. By introducing synthetically created motion artefacts by modifying K-space, Automap is trained to generate original images from corrupted K-space images. Evaluation is performed by automatic segmentation of images after artefact correction.\n\nThe paper is clearly written.\n\nThe authors provide extensive experiments, comparing their method with other artefact correction methods.\n", "cons": "Figure 3 does not provide clear evidence that the proposed method corrects motion artefacts in k-space images \u201cin the wild\u201d. Both segmentation results are very similar and have a major error in left ventricle myocardium segmentation. It seems that the proposed method only seems to correct synthetically created motion artefacts.\n\nIt is unclear if the method would reconstruct K-space images correctly if motion artefacts are absent. What is the influence on segmentation performance applying artefact correction on k-space images without motion artefacts?\n\nOnly average scores are provided. It would be interesting to see boxplots (including visualization of outliers).\n\nHow does the Dice of 0.91 on original images provide a clarification for higher scores in Table 1?\n\nIt is unclear how activity regularization was performed\n\nFigure 2 is hard to assess because input images are absent.\n", "rating": "3: accept", "confidence": "3: The reviewer is absolutely certain that the evaluation is correct and very familiar with the relevant literature"}, {"pros": "Summary:\nIn this work the authors have described a deep network (Automap-GAN)-based k-space artifact correction algorithm that improves image quality, which leads to improved segmentation accuracy. \n\n* Demonstrates useful results of motion artifact correction for improving segmentation of synthetic and real data. \n* Interesting network design (though there appear to be significant errors in the description) \n", "cons": "* The network architecture is rather hastily described and not clear to me.\nThe discriminator architecture description lacks details such as number of filters, filter-size, final layer activation function etc. A picture will help.\n\n* The network architecture has been called an Automap-GAN but there is no mention of a discriminator loss in the eventual loss function. The training of GANs is tricky to control depending on how many iterations are the generator and the discriminator trained before updates, the details of which are missing. \n\n* What if the adversarial loss (if it exists) is removed? \n\n* What was the k-space dimension size $n$ that was eventually used?\n\n* Not sure if the novelty claim of using SSIM and MSE as loss functions is true. These are available in tensorflow and are used frequently for image synthesis tasks.\n\n* The details of the activity regularizer are not mentioned. \n\n* I assume the training and test subject sets were non-overlapping? The description in terms of the 2D images does not make this clear.\n\n* Standard deviations for the metrics in Table 1 and Table 2 will give a better idea of the improvement in performance.\n\n\n\nMinor:\nTypo in \"prerequisite\" caption of Fig 1. ", "rating": "3: accept", "confidence": "2: The reviewer is fairly confident that the evaluation is correct"}, {"pros": "This work builds on Automap-GAN, a framework previously proposed by the authors to directly reconstruct good quality images from corrupted k-space acquisitions. They had proven this framework to be able to remove motion artifacts in cardiac magnetic resonance (CMR) imaging. They had measured this improvement both qualitatively and quantitatively using MSE loss in image space, using artificially corrupted data. They noted that the MSE loss may not be the best loss to train with and evaluate results. For training, they introduce here an additional SSIM loss. This loss may be able to reduce the blurring effect of reconstruction using only the MSE loss. \n\nTo evaluate results, the MSE of the reconstruction is replaced by the evalutaion of the improvement for an important downstream task, i.e. semantic segmentation quality, measured by classical metrics (Dice, Hausdorff distance\u2026). This consists the main originality of the paper.\n\nPros :\nTackles a difficult problem, i.e. images with possibly big motion artifacts, which are typically excluded from medical imaging datasets.\nThe paper investigates the interesting influence of artefact correction on segmentation quality.\nThe proposed method is compared against with a variety other standard reconstruction methods (4 in total). \nThe method is consistently better than all the others as measured by 3 segmentation metrics.\n\nThe paper is clear and easy to follow.", "cons": "\nThe SSIM loss was introduced after \u201csmoothed-out\u201d and blurred looking reconstruction images were observed in previous work by authors. Here though, there is no analysis of how the additional SSIM loss improves this matter:\n-No presentation of improvement in reconstruction metrics (i.e. MSE\u2026 which could be calculated for the artificially corrupted data), if there was some ?\n-In fact, the reconstructed images of the proposed method shown in Fig 1 and Fig. 3 still seem blurry (more so than with WIN5). The comment \u201cthe proposed method corrects the artefact but loses some structural information\u201d from previous paper still seems to hold. No qualitative comparison of reconstructed images with and without SSIM loss is provided.\n-Improved segmentation with no improvement of image quality might not be well accepted in practice by clinicians, so it may be better that both be demonstrated. This is actually what is anticipated for future work by others.\nThe SSIM calculation is not clear. What are the \u201cregions\u201d x and y in this case ? Are they parts of the images around a pixel location p ? Or the whole images (consistently with the notations a few lines above) ? Is Lssim the same for all pixel, otherwise should it be averaged on the whole image - like Lmse  ? Could this be better explained ? \nA limitation of the method is the memory burden for motion correction, as acknowledged by authors.\n\nNote: it might be judicious to drop the few lines defining Dice and Hausdorff distance, which are well-known, and use this space to spend a few more lines explaining the adversarial setting, which isn't so obvious to understand.\n", "rating": "3: accept", "confidence": "2: The reviewer is fairly confident that the evaluation is correct"}], "comment_id": ["rJlo-h9HN4", "Bkezpsqr4N", "Byxw_35HVV"], "comment_cdate": [1549278211438, 1549278138230, 1549278319045], "comment_tcdate": [1549278211438, 1549278138230, 1549278319045], "comment_tmdate": [1555946018084, 1555946017829, 1555946017563], "comment_readers": [["everyone"], ["everyone"], ["everyone"]], "comment_writers": [["MIDL.io/2019/Conference/Paper24/Authors", "MIDL.io/2019/Conference"], ["MIDL.io/2019/Conference/Paper24/Authors", "MIDL.io/2019/Conference"], ["MIDL.io/2019/Conference/Paper24/Authors", "MIDL.io/2019/Conference"]], "comment_reply_content": [{"replyCount": 0}, {"replyCount": 0}, {"replyCount": 0}], "comment_content": [{"title": "Reviewer 2 Responses", "comment": "\nA2.0  We would like the thank the reviewer for pointing extensive experiments and the positive evaluation of the paper. The main purpose of our  work was not to propose the artefact correction, but its application to improve segmentation performance.\n\nR2.1 Figure 3 does not provide clear evidence that the proposed method corrects motion artefacts in k-space images \u201cin the wild\u201d. Both segmentation results are very similar and have a major error in left ventricle myocardium segmentation. It seems that the proposed method only seems to correct synthetically created motion artefacts.\n\nA2.1  We agree with the reviewer that the achieved image segmentation is still not perfect, however, we believe the image quality in Fig.3 to be promising for future work (see also A1.1, A1.2), especially given the fact we have only trained our network only on synthetic and not real artefacts.\n\nR2.2 It is unclear if the method would reconstruct K-space images correctly if motion artefacts are absent. What is the influence on segmentation performance applying artefact correction on k-space images without motion artefacts?\n\nA2.2  In  our current pipeline setup, we aim to first detect low quality images and only correct these images. It is an interesting suggestion to have a global high quality image reconstruction pipeline that is independent of the quality of the input. Given that for the training of the network,  high-quality target images have been used , for k-space inputs without motion artefacts, our network has the potential to be a global image reconstructor.  \n\nR2.3 Only average scores are provided. It would be interesting to see boxplots (including visualization of outliers).\n\nA2.3 Standard deviation number are inline wtih the mean results. As an example in Table 1 proposed combined Loss Dice has mean and standard deviation of 0.722+/- 0.014. WIN5 has a mean and standard deviation of 0.681+/-0.018.  We will add the standard deviation scores for all methods to better evaluate variance of the results.\n\nR2.4 How does the Dice of 0.91 on original images provide a clarification for higher scores in Table 1?\n\nA2.4 The segmentation output of the network using the original images as input is not perfect and generates a Dice of 0.91 as pointed out by the reviewer. These under- or over-segmentations are likely to generate a better overlap with the low quality image segmentation output as illustrated with the results.\n\nR2.5 It is unclear how activity regularization was performed\n\nA2. 5 We follow the original Automap work (Zhu et al.,  Nature 2018) to apply the activity regularization. Even though their regularizer helps to produce sharper images at the final epochs of the training, we found the training to be  sensitive to the regularization, causing it to get  get trapped in local minima. Therefore, we initialized our training without using the regularizer and introduced it only after 10 epochs. \n\nR2.6 Figure 2 is hard to assess because input images are absent.\n\nA2.6  We would like to refer the reviewer to Fig.1 which  shows examples of both the improved image quality and resulting segmentations. In contrast, Fig.2 accommodates more correction, methods, but  with a focus on their segmentation performance (lack of space prohibits us to also show the corresponding images).\n"}, {"title": "Reviewer 1 Responses", "comment": "\nA1.0 We would like to thank the reviewer for pointing out the novelty of the paper and the positive evaluation.\n\nR1.1 The SSIM loss was introduced after \u201csmoothed-out\u201d and blurred looking reconstruction images were observed in previous work by authors. Here though, there is no analysis of how the additional SSIM loss improves this matter:\n-No presentation of improvement in reconstruction metrics (i.e. MSE\u2026 which could be calculated for the artificially corrupted data), if there was some ? In fact, the reconstructed images of the proposed method shown in Fig 1 and Fig. 3 still seem blurry (more so than with WIN5). The comment \u201cthe proposed method corrects the artefact but loses some structural information\u201d from previous paper still seems to hold. No qualitative comparison of reconstructed images with and without SSIM loss is provided.\n\nA1.1 There is an improvement on both MSE and SSIM score for the reconstructed images with the new loss function. This information is evident in computer vision literature in reference [18] and we have seen the same pattern in our experiments. We aimed to evaluate with segmentation performance in this work. The images improved both in quality and segmentation performance with the new combined loss function. The focal point of this work is on the implication on the segmentation performance, and therefore the image quality metrics are omitted (but have been reported elsewhere already [18]). In the updated manuscript, we will illustrate the quantitative result improvement achieved  using the new loss function.  To give an example, the SSIM score achieved for the MSE loss was 0.85, and the average SSIM improved to 0.89 with the combined loss function.  Though still looking quite smooth, the images are sharper compared to only using the MSE  loss.\n\nR1.2 Improved segmentation with no improvement of image quality might not be well accepted in practice by clinicians, so it may be better that both be demonstrated. This is actually what is anticipated for future work by others.\n\nA1.2 It is not the aim of this work to present the clinicians with an artefact corrected image (which indeed may not find their acceptance), but with a good-quality segmentation obtained after using our artefact correction method. The images quality increased both in terms of SSIM and MSE and additional results will be  provided (see also A1.1,A1.2).  We aim to do an extensive study on both factors in an additional study.\n\nR1.3 The SSIM calculation is not clear. What are the \u201cregions\u201d x and y in this case ? Are they parts of the images around a pixel location p ? Or the whole images (consistently with the notations a few lines above) ? Is Lssim the same for all pixel, otherwise should it be averaged on the whole image - like Lmse  ? Could this be better explained ? \nA limitation of the method is the memory burden for motion correction, as acknowledged by authors.\n\nA1.3 We thank the reviewer for pointing a potential lack of clarity in our description of the  SSIM calculation.  The Lssim term is averaged over all images which we will further clarify in the updated manuscript. \u2018p\u2019 denotes the pixel values, \u2018x\u2019 and \u2018y\u2019 denote regions in the image. We use 5 by 5 patches with a stride of 2. In the updated manuscript, we will also clarify these region definitions in SSIM by using a more consistent variables notation..\n\nR1.4 Note: it might be judicious to drop the few lines defining Dice and Hausdorff distance, which are well-known, and use this space to spend a few more lines explaining the adversarial setting, which isn't so obvious to understand.\n\nA1.4 We focused on describing the novel aspects of algorithm in the manuscript and relied on citations to describe the method architecture. We thank the reviewer for this suggestion and will indeed use this opportunity to further expand on our method description. \n"}, {"title": "Reviewer 3 Responses", "comment": "\nA3.0  We thank the reviewer for pointing out the novel network design and useful results of our work.\n\nR3.1 The network architecture is rather hastily described and not clear to me.\nThe discriminator architecture description lacks details such as number of filters, filter-size, final layer activation function etc. A picture will help. The network architecture has been called an Automap-GAN but there is no mention of a discriminator loss in the eventual loss function. The training of GANs is tricky to control depending on how many iterations are the generator and the discriminator trained before updates, the details of which are missing. \n\nA3.1  Space permitting, we aim to add the information regarding the discriminator architecture in the updated manuscript. Most of the details about the discriminator network architecture and training mechanisms are already thoroughly explained in our previous work (Oksuz et al. MLMIR 2018).  We will add the important information regarding the discriminator architecture in the updated manuscript. \n\nR3.2 What if the adversarial loss (if it exists) is removed?\n\nA3.2  We agree that such a comparison would be  an important evaluation, which have reported already in our previous work [reference 9] in regards to impact on  image quality. When the adversarial component of the network is removed, in other words when Automap (Zhu et al., Nature 2018) method is used the mean Dice in Table 1 is 0.635+/-0.022. In this current work, we focus on segmentation performance of Automap, and will include our previously unreported results without adversarial setup in the updated manuscript.\n\nR3.3 What was the k-space dimension size $n$ that was eventually used?\n\nA3.3  We use a 128*128 image size for all our experiments and we will add this information to the manuscript.\n\nR3.4 Not sure if the novelty claim of using SSIM and MSE as loss functions is true. These are available in tensorflow and are used frequently for image synthesis tasks.\n\nA3.4  We agree with the reviewer that both loss functions are not novel per se. The key novelty here is their combined use in an artefact correction setup.\n\nR3.5 The details of the activity regularizer are not mentioned. \n\nA3.5  The original Automap paper (Zhu et al., Nature, 2018) proposes to use an activity regularizer (for a more detailed response see also A2.5). \n\nR3.6  I assume the training and test subject sets were non-overlapping? The description in terms of the 2D images does not make this clear.\n\nA3.6 We make sure not to use same subjects in training and testing.  There is no overlap between training and test subjects, which we will further clarify in the updated manuscript.\n\nR3.7  Standard deviations for the metrics in Table 1 and Table 2 will give a better idea of the improvement in performance.\n\nA3.7 The standard deviation numbers are in line with the average scores. We will add the standard deviation numbers in the updated manuscript (see also A2.3). \n\nMinor:\nR3.8 Typo in \"prerequisite\" caption of Fig 1.\n\nA3.8 We thank the reviewer for pointing out the typo which we have now corrected.  \n"}], "comment_replyto": ["HkxCNVFh7N", "SyxN8fWn74", "H1gZQyynmV"], "comment_url": ["https://openreview.net/forum?id=BkgjbQ30yN&noteId=rJlo-h9HN4", "https://openreview.net/forum?id=BkgjbQ30yN&noteId=Bkezpsqr4N", "https://openreview.net/forum?id=BkgjbQ30yN&noteId=Byxw_35HVV"], "meta_review_cdate": 1551356581539, "meta_review_tcdate": 1551356581539, "meta_review_tmdate": 1551881979128, "meta_review_ddate ": null, "meta_review_title": "Acceptance Decision", "meta_review_metareview": "Quality: Reviewers were impressed with the number of experiments and comparisons to other techniques.  Some minor issues with missing error bars were addressed during the rebuttal phase.\n\nClarity:  Reviewers agree that the paper is clearly written.  Some minor missing details and clarifications were added after they were pointed out during the review.\n\nOriginality:  The primary originality of the proposal lies in the assessment of the effect on segmentation, rather than image quality.\n\nSignificance:  The strength of the work is that it could improve segmentation, which is an important processing step for a number of further analyses.  The weakness is that it does not necessarily provide image quality at the level clinicians expect, making it harder for them to accept the resulting segmentation.  ", "meta_review_readers": ["everyone"], "meta_review_writers": ["MIDL.io/2019/Conference"], "meta_review_reply_count": {"replyCount": 0}, "meta_review_url": ["https://openreview.net/forum?id=BkgjbQ30yN&noteId=SJgRiGLrUE"], "decision": "Accept"}