File size: 6,442 Bytes
fad35ef
1
{"forum": "02X3kfP6W4", "submission_url": "https://openreview.net/forum?id=gTBQ7gN1t", "submission_content": {"track": "short paper", "keywords": ["Saliency maps", "localization", "anomaly detection", "medical imaging", "deep learning."], "abstract": "Saliency maps have become a widely used method to assess which areas of the input image are most pertinent to the prediction of a trained neural network.  However, in the context of medical imaging, there is no study to our knowledge that has examined the efficacy of these techniques and quantified them using overlap with ground truth bounding boxes. In this work, we explored the credibility of the various existing saliency map methods on the RSNA  Pneumonia  dataset. We  found  that  GradCAM  was  the  most  sensitive  to  model parameter and label randomization, and was highly agnostic to model architecture.", "authors": ["Nishanth Thumbavanam Arun", "Nathan Gaw", "Praveer Singh", "Ken Chang", "Katharina Viktoria Hoebel", "Jay Patel", "Mishka Gidwani", "Jayashree Kalpathy-Cramer"], "authorids": ["nt608@snu.edu.in", "ngaw1@asu.edu", "psingh19@mgh.harvard.edu", "kenchang@mit.edu", "khoebel@mit.edu", "jbpatel@mit.edu", "gidwanm@ccf.org", "jkalpathy-cramer@mgh.harvard.edu"], "pdf": "/pdf/33232420abe90eebd3547ce13e1b27b50f224dff.pdf", "paper_type": "well-validated application", "title": "Assessing the validity of saliency maps for abnormality localization in medical imaging", "paperhash": "arun|assessing_the_validity_of_saliency_maps_for_abnormality_localization_in_medical_imaging", "_bibtex": "@inproceedings{\narun2020assessing,\ntitle={Assessing the validity of saliency maps for abnormality localization in medical imaging},\nauthor={Nishanth Thumbavanam Arun and Nathan Gaw and Praveer Singh and Ken Chang and Katharina Viktoria Hoebel and Jay Patel and Mishka Gidwani and Jayashree Kalpathy-Cramer},\nbooktitle={Medical Imaging with Deep Learning},\nyear={2020},\nurl={https://openreview.net/forum?id=gTBQ7gN1t}\n}"}, "submission_cdate": 1579955675677, "submission_tcdate": 1579955675677, "submission_tmdate": 1587172186092, "submission_ddate": null, "review_id": ["XslHL-U2NqJ", "49gTuszt22", "IC6D-Qll2"], "review_url": ["https://openreview.net/forum?id=gTBQ7gN1t&noteId=XslHL-U2NqJ", "https://openreview.net/forum?id=gTBQ7gN1t&noteId=49gTuszt22", "https://openreview.net/forum?id=gTBQ7gN1t&noteId=IC6D-Qll2"], "review_cdate": [1584657016451, 1584128556454, 1584073401412], "review_tcdate": [1584657016451, 1584128556454, 1584073401412], "review_tmdate": [1585229551059, 1585229550533, 1585229550034], "review_readers": [["everyone"], ["everyone"], ["everyone"]], "review_writers": [["MIDL.io/2020/Conference/Paper107/AnonReviewer2"], ["MIDL.io/2020/Conference/Paper107/AnonReviewer4"], ["MIDL.io/2020/Conference/Paper107/AnonReviewer1"]], "review_reply_count": [{"replyCount": 0}, {"replyCount": 0}, {"replyCount": 0}], "review_replyto": ["02X3kfP6W4", "02X3kfP6W4", "02X3kfP6W4"], "review_content": [{"title": "Fair Comparison of Methods for Model Interpretability for 2D Chest X-Ray Classification", "review": "The paper compares different state-of-the-art approaches for visual interpretability in 2D chest X-ray classification. The comparison was made based on their localization capabilities, robustness to model parameter, label randomization, and repeatability/reproducibility with model architectures. And the abnormality localization is evaluated with the Dice's score.\n1. The paper is well-written and well-organized.\n2. The submission relates to the application of deep learning in the field of chest X-ray classification, which is highly relevant to the MIDL audience.\n3. The proposed method is technically sound.\n4. Experimental results support the claim made in the paper.\n", "rating": "3: Weak accept", "confidence": "5: The reviewer is absolutely certain that the evaluation is correct and very familiar with the relevant literature"}, {"title": "A study most needed", "review": "CNN interpretability methods are used more and more in medical image analysis. The authors present a thourough evaluation of several of these methods (localisation capabilities, robustness to model parameter and label randomisation, repeatability and reproducibility with model architectures) extending the work first proposed by Adebayo et al.. This work is very interesting and was most needed. ", "rating": "4: Strong accept", "confidence": "4: The reviewer is confident but not absolutely certain that the evaluation is correct"}, {"title": "Assessing saliency map validity", "review": "This paper concerns the assessment of saliency map validity.  It was shown that GradCAM is superior to other methods in terms of model and parameter randomization. This is a useful results, as the interpretability that saliency mapping enables is becoming more and more important to help visualize why deep networks are making their decisions. However, there was a lack of discussion of these results in this paper - are there any possible explanations for why GradCAM is performing better? Furthermore, the images in the figures hard to see. They should be larger and as much whitespace should be removed between images.", "rating": "3: Weak accept", "confidence": "2: The reviewer is willing to defend the evaluation, but it is quite likely that the reviewer did not understand central parts of the paper"}], "comment_id": [], "comment_cdate": [], "comment_tcdate": [], "comment_tmdate": [], "comment_readers": [], "comment_writers": [], "comment_reply_content": [], "comment_content": [], "comment_replyto": [], "comment_url": [], "meta_review_cdate": 1586233704961, "meta_review_tcdate": 1586233704961, "meta_review_tmdate": 1586233754451, "meta_review_ddate ": null, "meta_review_title": "MetaReview of Paper107 by AreaChair1", "meta_review_metareview": "The interpretability of deep learning models is an important area of research. This work evaluates the usefulness of several methods that aim to visualize the decision making of a neural network. The reviewers are in agreement that the results presented here are of enough interest to warrant acceptance.", "meta_review_readers": ["everyone"], "meta_review_writers": ["MIDL.io/2020/Conference/Program_Chairs", "MIDL.io/2020/Conference/Paper107/Area_Chairs"], "meta_review_reply_count": {"replyCount": 0}, "meta_review_url": ["https://openreview.net/forum?id=gTBQ7gN1t&noteId=GbagBUaisSZ"], "decision": "reject"}