Chris Xiao commited on
Commit
2ca2f68
1 Parent(s): a0d52fb

upload files

Browse files
Files changed (46) hide show
  1. .gitattributes +3 -0
  2. LICENSE +201 -0
  3. README.md +191 -3
  4. assets/nasal.gif +3 -0
  5. deepatlas/loss_function/losses.py +118 -0
  6. deepatlas/metrics/distanceVertex2Mesh.py +64 -0
  7. deepatlas/metrics/get_probability_map.py +194 -0
  8. deepatlas/metrics/lookup_tables.py +463 -0
  9. deepatlas/metrics/metrics.py +355 -0
  10. deepatlas/metrics/surface_distance.py +424 -0
  11. deepatlas/network/network.py +56 -0
  12. deepatlas/preprocess/crop.py +264 -0
  13. deepatlas/preprocess/crop_flip_test.py +303 -0
  14. deepatlas/preprocess/crop_flip_training.py +332 -0
  15. deepatlas/preprocess/generate_info.py +133 -0
  16. deepatlas/preprocess/process_data.py +164 -0
  17. deepatlas/preprocess/registration_test.py +364 -0
  18. deepatlas/preprocess/registration_training.py +379 -0
  19. deepatlas/scripts/deep_atlas.sh +13 -0
  20. deepatlas/scripts/deep_atlas_test.py +80 -0
  21. deepatlas/scripts/deep_atlas_test_customized.py +120 -0
  22. deepatlas/scripts/deep_atlas_test_customized.sh +7 -0
  23. deepatlas/scripts/deep_atlas_train.py +482 -0
  24. deepatlas/scripts/seg_train.py +83 -0
  25. deepatlas/test/test.py +536 -0
  26. deepatlas/train/generators.py +45 -0
  27. deepatlas/train/train.py +444 -0
  28. deepatlas/utils/utils.py +294 -0
  29. deepatlas_config/config_sample.json +28 -0
  30. deepatlas_results/Task002_rnmw153_sim_Zsc/dataset.json +647 -0
  31. deepatlas_results/Task002_rnmw153_sim_Zsc/training_results/RegNet/anatomy_loss_reg.txt +150 -0
  32. deepatlas_results/Task002_rnmw153_sim_Zsc/training_results/RegNet/anatomy_reg_losses.png +0 -0
  33. deepatlas_results/Task002_rnmw153_sim_Zsc/training_results/RegNet/reg_net_best.pth +3 -0
  34. deepatlas_results/Task002_rnmw153_sim_Zsc/training_results/RegNet/reg_net_training_losses.png +0 -0
  35. deepatlas_results/Task002_rnmw153_sim_Zsc/training_results/RegNet/regularization_loss.txt +150 -0
  36. deepatlas_results/Task002_rnmw153_sim_Zsc/training_results/RegNet/regularization_reg_losses.png +0 -0
  37. deepatlas_results/Task002_rnmw153_sim_Zsc/training_results/RegNet/similarity_loss_reg.txt +150 -0
  38. deepatlas_results/Task002_rnmw153_sim_Zsc/training_results/RegNet/similarity_reg_losses.png +0 -0
  39. deepatlas_results/Task002_rnmw153_sim_Zsc/training_results/SegNet/anatomy_loss_seg.txt +150 -0
  40. deepatlas_results/Task002_rnmw153_sim_Zsc/training_results/SegNet/anatomy_seg_losses.png +0 -0
  41. deepatlas_results/Task002_rnmw153_sim_Zsc/training_results/SegNet/seg_net_best.pth +3 -0
  42. deepatlas_results/Task002_rnmw153_sim_Zsc/training_results/SegNet/seg_net_training_losses.png +0 -0
  43. deepatlas_results/Task002_rnmw153_sim_Zsc/training_results/SegNet/supervised_loss_seg.txt +150 -0
  44. deepatlas_results/Task002_rnmw153_sim_Zsc/training_results/SegNet/supervised_seg_losses.png +0 -0
  45. deepatlas_results/Task002_rnmw153_sim_Zsc/training_results/training_log.txt +515 -0
  46. requirements.txt +164 -0
.gitattributes CHANGED
@@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ assets/*.gif filter=lfs diff=lfs merge=lfs -text
37
+ deepatlas/results/Task002_rnmw153_sim_Zsc/training_results/RegNet/*.pth filter=lfs diff=lfs merge=lfs -text
38
+ deepatlas/results/Task002_rnmw153_sim_Zsc/training_results/SegNet/*.pth filter=lfs diff=lfs merge=lfs -text
LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
README.md CHANGED
@@ -1,3 +1,191 @@
1
- ---
2
- license: mit
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <h2 align="center"> [OTO–HNS2024] A Label-Efficient Framework for Automated Sinonasal CT Segmentation in Image-Guided Surgery </h2>
2
+ <p align="center">
3
+ <a href="https://aao-hnsfjournals.onlinelibrary.wiley.com/doi/10.1002/ohn.868"><img src="https://img.shields.io/badge/Wiley-Paper-red"></a>
4
+ <a href="https://pubmed.ncbi.nlm.nih.gov/38922721/"><img src="https://img.shields.io/badge/PubMed-Link-blue"></a>
5
+ <a href="https://github.com/mikami520/AutoSeg4SinonasalCT"><img src="https://img.shields.io/badge/Code-Page-magenta"></a>
6
+ </p>
7
+ <h5 align="center"><em>Manish Sahu<sup>*</sup>, Yuliang Xiao<sup>*</sup>, Jose L. Porras, Ameen Amanian, Aseem Jain, Andrew Thamboo, Russell H. Taylor, Francis X. Creighton, Masaru Ishii</em></h5>
8
+ <p align="center"> <sup>*</sup> Indicates Equal Contribution </p>
9
+ <p align="center">
10
+ <a href="#news">News</a> |
11
+ <a href="#abstract">Abstract</a> |
12
+ <a href="#installation">Installation</a> |
13
+ <a href="#preprocess">Preprocess</a> |
14
+ <a href="#train">Train</a> |
15
+ <a href="#inference">Inference</a>
16
+ </p>
17
+
18
+ ## News
19
+
20
+ **2024.06.07** - The data preprocessing, training, inference, and evaluation code are released.
21
+
22
+ **2024.06.03** - Our paper is accepted to **American Academy of Otolaryngology–Head and Neck Surgery 2024 (OTO-HNS2024)**.
23
+
24
+ ## Abstract
25
+
26
+ - Objective: Segmentation, the partitioning of patient imaging into multiple, labeled segments, has several potential clinical benefits but when performed manually is tedious and resource intensive. Automated deep learning (DL)-based segmentation methods can streamline the process. The objective of this study was to evaluate a label-efficient DL pipeline that requires only a small number of annotated scans for semantic segmentation of sinonasal structures in CT scans.
27
+
28
+ - Methods: Forty CT scans were used in this study including 16 scans in which the nasal septum (NS), inferior turbinate (IT), maxillary sinus (MS), and optic nerve (ON) were manually annotated using an open-source software. A label-efficient DL framework was used to train jointly on a few manually labeled scans and the remaining unlabeled scans. Quantitative analysis was then performed to obtain the number of annotated scans needed to achieve submillimeter average surface distances (ASDs).
29
+
30
+ - Results: Our findings reveal that merely four labeled scans are necessary to achieve median submillimeter ASDs for large sinonasal structures—NS (0.96 mm), IT (0.74 mm), and MS (0.43 mm), whereas eight scans are required for smaller structures—ON (0.80 mm).
31
+
32
+ - Conclusion: We have evaluated a label-efficient pipeline for segmentation of sinonasal structures. Empirical results demonstrate that automated DL methods can achieve submillimeter accuracy using a small number of labeled CT scans. Our pipeline has the potential to improve preoperative planning workflows, robotic- and image-guidance navigation systems, computer-assisted diagnosis, and the construction of statistical shape models to quantify population variations.
33
+
34
+ <p align="center">
35
+ <img src="assets/nasal.gif" />
36
+ <b>Figure 1: 3D Heatmap Visualization of Nasal Septum (NS), Inferior Turbinate (IT), Maxillary Sinus (MS), and Optic Nerve (ON)</b>
37
+ </p>
38
+
39
+ ## Installation
40
+
41
+ ### Step 1: Fork This GitHub Repository
42
+
43
+ ```bash
44
+ git clone https://github.com/mikami520/AutoSeg4SinonasalCT.git
45
+ ```
46
+
47
+ ### Step 2: Set Up Environment Using requirements.txt (virtual environment is recommended)
48
+
49
+ ```bash
50
+ pip install -r requirements.txt
51
+ source /path/to/VIRTUAL_ENVIRONMENT/bin/activate
52
+ ```
53
+
54
+ ## Preprocess
55
+
56
+ ### Step 1: Co-align the data (make sure scan and segmentation are co-aligned)
57
+
58
+ ```bash
59
+ cd <path to repo>/deepatlas/preprocess
60
+ ```
61
+
62
+ Co-align the scans and labels (recommendation: similarity registration)
63
+
64
+ ```bash
65
+ python registration_training.py
66
+ -bp <full path of base dir>
67
+ -ip <relative path to nifti images dir>
68
+ -sp <relative path to labels dir>
69
+ -ti <task id>
70
+ ```
71
+
72
+ If you want to make sure correspondence of the name and value of segmentations, you can add the following commands after above command (**Option for nrrd format**)
73
+
74
+ ```bash
75
+ -sl LabelValue1 LabelName1 LabelValue2 LabelName2 LabelValue3 LabelName3 ...
76
+ ```
77
+
78
+ For example, if I have two labels for maxillary sinus named ```L-MS``` and ```R-MS``` and I want ```L-MS``` matched to ```label 1``` and ```R-MS``` to ```label 2``` (**Pay attention to the order**)
79
+
80
+ ```bash
81
+ python registration_training.py -bp /Users/mikamixiao/Desktop -ip images -sp labels -sl 1 L-MS 2 R-MS
82
+ ```
83
+
84
+ Final output of registered images and segmentations will be saved in
85
+
86
+ ```text
87
+ base_dir/deepatlas_raw_data_base/task_id/Training_dataset/images && base_dir/deepatlas_raw_data_base/task_id/Training_dataset/labels
88
+ ```
89
+
90
+ ### Step 2: Crop Normalize and Flip Data (if needed)
91
+
92
+ Crop,normalize and flip data to extract region of interest (ROI). **Notice: the images and segmentations should be co-registered. We recommend to use the outputs of Step 2.1**
93
+
94
+ ```bash
95
+ python crop_flip_training.py
96
+ -fp <if need to flip data, use flag for true and not use for false>
97
+ -ti <task id>
98
+ -rs <customized resized shape>
99
+ ```
100
+
101
+ **Pay attention to the resized dimension which should not be smaller than cropped dimension**\
102
+ Final output of ROI will be saved in
103
+
104
+ ```text
105
+ base_dir/deepatlas_preprocessed/task_id/Training_dataset/images && base_dir/deepatlas_preprocessed/task_id/Training_dataset/labels
106
+ ```
107
+
108
+ ## Train
109
+
110
+ ```bash
111
+ cd <path to repo>/deepatlas/scripts
112
+ python deep_atlas_train.py
113
+ --config <configuration file of network parameters>
114
+ --continue_training <check if need to resume training>
115
+ --train_only <only training or training plus test>
116
+ --plot_network <whether to plot the neural network architecture>
117
+ ```
118
+
119
+ **For detailed information, use ```-h``` to see more instructions**
120
+ Before training, a folder named ```deepatlas_results``` is created automatically under the repository directory. All training results are stored in this folder. A clear structure is shown below:
121
+
122
+ ```text
123
+ DeepAtlas/deepatlas_results/
124
+ ├── Task001_ET
125
+ | └── results
126
+ | └── RegNet
127
+ | |── anatomy_loss_reg.txt
128
+ | |── anatomy_reg_losses.png
129
+ | |── reg_net_best.pth
130
+ | |── reg_net_training_losses.png
131
+ | |── regularization_loss.txt
132
+ | |── regularization_reg_losses.png
133
+ | |── similarity_loss_reg.txt
134
+ | |── similarity_reg_losses.png
135
+ | └── SegNet
136
+ | |── anatomy_loss_seg.txt
137
+ | |── anatomy_seg_losses.png
138
+ | |── seg_net_best.pth
139
+ | |── seg_net_training_losses.png
140
+ | |── supervised_loss_seg.txt
141
+ | |── supervised_seg_losses.png
142
+ | └── training_log.txt
143
+ | └── dataset.json
144
+ ├── Task002_Nasal_Cavity
145
+ ```
146
+
147
+ ## Inference
148
+
149
+ ```
150
+ python deep_atlas_test.py
151
+ -gpu <id of gpu device to use>
152
+ -op <relative path of the prediction result directory>
153
+ -ti <task id and name>
154
+ ```
155
+
156
+ The final prediction results will be saved in the ```DeepAtlas_dataset/Task_id_and_Name``` directory. For example,
157
+
158
+ ```text
159
+ DeepAtlas/DeepAtlas_dataset/
160
+ ├── Task001_ET
161
+ | └── results
162
+ | └── RegNet
163
+ | |── anatomy_loss_reg.txt
164
+ | |── anatomy_reg_losses.png
165
+ | |── reg_net_best.pth
166
+ | |── reg_net_training_losses.png
167
+ | |── regularization_loss.txt
168
+ | |── regularization_reg_losses.png
169
+ | |── similarity_loss_reg.txt
170
+ | |── similarity_reg_losses.png
171
+ | └── SegNet
172
+ | |── anatomy_loss_seg.txt
173
+ | |── anatomy_seg_losses.png
174
+ | |── seg_net_best.pth
175
+ | |── seg_net_training_losses.png
176
+ | |── supervised_loss_seg.txt
177
+ | |── supervised_seg_losses.png
178
+ | └── training_log.txt
179
+ | └── prediction
180
+ | └── RegNet
181
+ | |── reg_img_losses.txt
182
+ | |── reg_seg_dsc.txt
183
+ | |── figures containing fixed, moving, warped scans deformation field and jacobian determinant
184
+ | |── warped scans and labels in nifti format
185
+ | └── SegNet
186
+ | |── seg_dsc.txt
187
+ | |── predicted labels in nifti format
188
+ | └── dataset.json
189
+ ├── Task002_Nasal_Cavity
190
+ ```
191
+
assets/nasal.gif ADDED

Git LFS Details

  • SHA256: 65b1b443eda67a216acc742597536de04cec28870f7d3be319170dd546b57640
  • Pointer size: 132 Bytes
  • Size of remote file: 9.76 MB
deepatlas/loss_function/losses.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import monai
2
+ import torch
3
+ import numpy as np
4
+ import matplotlib.pyplot as plt
5
+
6
+
7
+ def warp_func():
8
+ warp = monai.networks.blocks.Warp(mode="bilinear", padding_mode="border")
9
+ return warp
10
+
11
+
12
+ def warp_nearest_func():
13
+ warp_nearest = monai.networks.blocks.Warp(
14
+ mode="nearest", padding_mode="border")
15
+ return warp_nearest
16
+
17
+
18
+ def lncc_loss_func():
19
+ lncc_loss = monai.losses.LocalNormalizedCrossCorrelationLoss(
20
+ spatial_dims=3,
21
+ kernel_size=3,
22
+ kernel_type='rectangular',
23
+ reduction="mean",
24
+ smooth_nr=1e-5,
25
+ smooth_dr=1e-5,
26
+ )
27
+ return lncc_loss
28
+
29
+
30
+ def similarity_loss(displacement_field, image_pair):
31
+ warp = warp_func()
32
+ lncc_loss = lncc_loss_func()
33
+ """ Accepts a batch of displacement fields, shape (B,3,H,W,D),
34
+ and a batch of image pairs, shape (B,2,H,W,D). """
35
+ warped_img2 = warp(image_pair[:, [1], :, :, :], displacement_field)
36
+ return lncc_loss(
37
+ warped_img2, # prediction
38
+ image_pair[:, [0], :, :, :] # target
39
+ )
40
+
41
+
42
+ def regularization_loss_func():
43
+ # normalize=True, reduction='mean'
44
+ return monai.losses.BendingEnergyLoss(normalize=True, reduction='mean')
45
+
46
+
47
+ def dice_loss_func():
48
+ dice_loss = monai.losses.DiceLoss(
49
+ include_background=True,
50
+ to_onehot_y=False,
51
+ softmax=False,
52
+ reduction="mean"
53
+ )
54
+ return dice_loss
55
+
56
+
57
+ def dice_loss_func2():
58
+ dice_loss = monai.losses.DiceLoss(
59
+ include_background=True,
60
+ to_onehot_y=True,
61
+ softmax=True,
62
+ reduction="mean"
63
+ )
64
+ return dice_loss
65
+
66
+
67
+ def anatomy_loss(displacement_field, image_pair, seg_net, gt_seg1=None, gt_seg2=None, num_segmentation_classes=None):
68
+ """
69
+ Accepts a batch of displacement fields, shape (B,3,H,W,D),
70
+ and a batch of image pairs, shape (B,2,H,W,D).
71
+ seg_net is the model used to segment an image,
72
+ mapping (B,1,H,W,D) to (B,C,H,W,D) where C is the number of segmentation classes.
73
+ gt_seg1 and gt_seg2 are ground truth segmentations for the images in image_pair, if ground truth is available;
74
+ if unavailable then they can be None.
75
+ gt_seg1 and gt_seg2 are expected to be in the form of class labels, with shape (B,1,H,W,D).
76
+ """
77
+ if gt_seg1 is not None:
78
+ # ground truth seg of target image
79
+ seg1 = monai.networks.one_hot(
80
+ gt_seg1, num_segmentation_classes
81
+ )
82
+ else:
83
+ # seg_net on target image, "noisy ground truth"
84
+ seg1 = seg_net(image_pair[:, [0], :, :, :]).softmax(dim=1)
85
+
86
+ if gt_seg2 is not None:
87
+ # ground truth seg of moving image
88
+ seg2 = monai.networks.one_hot(
89
+ gt_seg2, num_segmentation_classes
90
+ )
91
+ else:
92
+ # seg_net on moving image, "noisy ground truth"
93
+ seg2 = seg_net(image_pair[:, [1], :, :, :]).softmax(dim=1)
94
+
95
+ # seg1 and seg2 are now in the form of class probabilities at each voxel
96
+ # The trilinear interpolation of the function `warp` is then safe to use;
97
+ # it will preserve the probabilistic interpretation of seg2.
98
+ dice_loss = dice_loss_func()
99
+ warp = warp_func()
100
+ return dice_loss(
101
+ warp(seg2, displacement_field), # warp of moving image segmentation
102
+ seg1 # target image segmentation
103
+ )
104
+
105
+
106
+ def reg_losses(batch, device, reg_net, seg_net, num_segmentation_classes):
107
+ img12 = batch['img12'].to(device)
108
+ displacement_field12 = reg_net(img12)
109
+ loss_sim = similarity_loss(displacement_field12, img12)
110
+ regularization_loss = regularization_loss_func()
111
+ loss_reg = regularization_loss(displacement_field12)
112
+
113
+ gt_seg1 = batch['seg1'].to(device) if 'seg1' in batch.keys() else None
114
+ gt_seg2 = batch['seg2'].to(device) if 'seg2' in batch.keys() else None
115
+ loss_ana = anatomy_loss(displacement_field12, img12,
116
+ seg_net, gt_seg1, gt_seg2, num_segmentation_classes)
117
+
118
+ return loss_sim, loss_reg, loss_ana, displacement_field12
deepatlas/metrics/distanceVertex2Mesh.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pyvista as pv
3
+ import argparse
4
+ import os
5
+ import glob
6
+ import trimesh
7
+
8
+
9
+ def parse_command_line():
10
+ print('---'*10)
11
+ print('Parsing Command Line Arguments')
12
+ parser = argparse.ArgumentParser(description='Defacing protocol')
13
+ parser.add_argument('-bp', metavar='base path', type=str,
14
+ help="Absolute path of the base directory")
15
+ parser.add_argument('-gp', metavar='ground truth path', type=str,
16
+ help="Relative path of the ground truth model")
17
+ parser.add_argument('-pp', metavar='prediction path', type=str,
18
+ help="Relative path of the prediction model")
19
+ argv = parser.parse_args()
20
+ return argv
21
+
22
+
23
+ def distanceVertex2Mesh(mesh, vertex):
24
+ faces_as_array = mesh.faces.reshape((mesh.n_faces, 4))[:, 1:]
25
+ mesh_box = trimesh.Trimesh(vertices=mesh.points,
26
+ faces=faces_as_array)
27
+ cp, cd, ci = trimesh.proximity.closest_point(mesh_box, vertex)
28
+ return cd
29
+
30
+
31
+ def main():
32
+ args = parse_command_line()
33
+ base = args.bp
34
+ gt_path = args.gp
35
+ pred_path = args.pp
36
+ output_dir = os.path.join(base, 'output')
37
+ try:
38
+ os.mkdir(output_dir)
39
+ except:
40
+ print(f'{output_dir} already exists')
41
+
42
+ for i in glob.glob(os.path.join(base, gt_path) + '/*.vtk'):
43
+ filename = os.path.basename(i).split('.')[0]
44
+ #side = os.path.basename(i).split('.')[0].split('_')[0]
45
+ #scan_name = os.path.basename(i).split('.')[0].split('_')[0]
46
+ #scan_id = os.path.basename(i).split('.')[0].split('_')[1]
47
+ output_sub_dir = os.path.join(
48
+ base, 'output', filename)
49
+ try:
50
+ os.mkdir(output_sub_dir)
51
+ except:
52
+ print(f'{output_sub_dir} already exists')
53
+
54
+ gt_mesh = pv.read(i)
55
+ pred_mesh = pv.read(os.path.join(
56
+ base, pred_path, filename + '.vtk'))
57
+ pred_vertices = np.array(pred_mesh.points)
58
+ cd = distanceVertex2Mesh(gt_mesh, pred_vertices)
59
+ pred_mesh['dist'] = cd
60
+ pred_mesh.save(os.path.join(output_sub_dir, filename + '.vtk'))
61
+
62
+
63
+ if __name__ == '__main__':
64
+ main()
deepatlas/metrics/get_probability_map.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pyvista as pv
3
+ import argparse
4
+ import os
5
+ import glob
6
+ import skeletor as sk
7
+ import trimesh
8
+ import navis
9
+
10
+
11
+ def parse_command_line():
12
+ print('---'*10)
13
+ print('Parsing Command Line Arguments')
14
+ parser = argparse.ArgumentParser(description='Defacing protocol')
15
+ parser.add_argument('-bp', metavar='base path', type=str,
16
+ help="Absolute path of the base directory")
17
+ parser.add_argument('-gp', metavar='ground truth path', type=str,
18
+ help="Relative path of the ground truth model")
19
+ parser.add_argument('-pp', metavar='prediction path', type=str,
20
+ help="Relative path of the prediction model")
21
+ parser.add_argument('-rr', metavar='ratio to split skeleton', type=int, nargs='+',
22
+ help="Ratio to split the skeleton")
23
+ parser.add_argument('-ps', metavar='probability sequences', type=float, nargs='+',
24
+ help="Proability sequences for each splitted region")
25
+ argv = parser.parse_args()
26
+ return argv
27
+
28
+
29
+ def distanceVertex2Path(mesh, skeleton, probability_map):
30
+ if len(probability_map) == 0:
31
+ print('empty probability_map !!!')
32
+ return np.inf
33
+
34
+ if not mesh.is_all_triangles():
35
+ print('only triangulations is allowed (Faces do not have 3 Vertices)!')
36
+ return np.inf
37
+
38
+ if hasattr(mesh, 'points'):
39
+ points = np.array(mesh.points)
40
+ else:
41
+ print('mesh structure must contain fields ''vertices'' and ''faces''!')
42
+ return np.inf
43
+
44
+ if hasattr(skeleton, 'vertices'):
45
+ vertex = skeleton.vertices
46
+ else:
47
+ print('skeleton structure must contain fields ''vertices'' !!!')
48
+ return np.inf
49
+
50
+ numV, dim = points.shape
51
+ numT, dimT = vertex.shape
52
+
53
+ if dim != dimT or dim != 3:
54
+ print('mesh and vertices must be in 3D space!')
55
+ return np.inf
56
+
57
+ d_min = np.ones(numV, dtype=np.float64) * np.inf
58
+ pm = []
59
+ # first check: find closest distance from vertex to vertex
60
+ for i in range(numV):
61
+ min_idx = -1
62
+ for j in range(numT):
63
+ v1 = points[i, :]
64
+ v2 = vertex[j, :]
65
+ d = distance3DV2V(v1, v2)
66
+ if d < d_min[i]:
67
+ d_min[i] = d
68
+ min_idx = j
69
+
70
+ pm.append(probability_map[min_idx])
71
+
72
+ print("check is finished !!!")
73
+ return pm
74
+
75
+
76
+ def generate_probability_map(skeleton, split_ratio, probability):
77
+ points = skeleton.vertices
78
+ center = skeleton.skeleton.centroid
79
+ x = sorted(points[:, 0])
80
+ left = []
81
+ right = []
82
+ for i in range(len(x)):
83
+ if x[i] < center[0]:
84
+ left.append(x[i])
85
+ else:
86
+ right.append(x[i])
87
+
88
+ right_map = []
89
+ left_map = []
90
+ sec_old = 0
91
+ for j in range(len(split_ratio)):
92
+ if j == len(split_ratio) - 1:
93
+ sec_len = len(left) - sec_old
94
+ else:
95
+ sec_len = int(round(len(left) * split_ratio[j] / 100))
96
+
97
+ for k in range(sec_old, sec_old + sec_len):
98
+ left_map.append(probability[j])
99
+
100
+ sec_old += sec_len
101
+
102
+ sec_old = 0
103
+ for j in range(len(split_ratio)-1, -1, -1):
104
+ if j == 0:
105
+ sec_len = len(right) - sec_old
106
+ else:
107
+ sec_len = int(round(len(right) * split_ratio[j] / 100))
108
+
109
+ for k in range(sec_old, sec_old + sec_len):
110
+ right_map.append(probability[j])
111
+
112
+ sec_old += sec_len
113
+
114
+ final_map = []
115
+ row = points.shape[0]
116
+ assert len(left) + len(right) == row
117
+ for m in range(row):
118
+ ver_x = points[m, 0]
119
+ if ver_x in left:
120
+ index = left.index(ver_x)
121
+ final_map.append(left_map[index])
122
+ else:
123
+ index = right.index(ver_x)
124
+ final_map.append(right_map[index])
125
+
126
+ return final_map
127
+
128
+
129
+ def skeleton(mesh):
130
+ faces_as_array = mesh.faces.reshape((mesh.n_faces, 4))[:, 1:]
131
+ trmesh = trimesh.Trimesh(mesh.points, faces_as_array)
132
+ fixed = sk.pre.fix_mesh(trmesh, remove_disconnected=5, inplace=False)
133
+ skel = sk.skeletonize.by_wavefront(fixed, waves=1, step_size=1)
134
+ # Create a neuron from your skeleton
135
+ n = navis.TreeNeuron(skel, soma=None)
136
+ # keep only the two longest linear section in your skeleton
137
+ long2 = navis.longest_neurite(n, n=2, from_root=False)
138
+
139
+ # This renumbers nodes
140
+ swc = navis.io.swc_io.make_swc_table(long2)
141
+ # We also need to rename some columns
142
+ swc = swc.rename({'PointNo': 'node_id', 'Parent': 'parent_id', 'X': 'x',
143
+ 'Y': 'y', 'Z': 'z', 'Radius': 'radius'}, axis=1).drop('Label', axis=1)
144
+ # Skeletor excepts node IDs to start with 0, but navis starts at 1 for SWC
145
+ swc['node_id'] -= 1
146
+ swc.loc[swc.parent_id > 0, 'parent_id'] -= 1
147
+ # Create the skeletor.Skeleton
148
+ skel2 = sk.Skeleton(swc)
149
+ return skel2
150
+
151
+
152
+ def distance3DV2V(v1, v2):
153
+ d = np.linalg.norm(v1-v2)
154
+ return d
155
+
156
+
157
+ def main():
158
+ args = parse_command_line()
159
+ base = args.bp
160
+ gt_path = args.gp
161
+ pred_path = args.pp
162
+ area_ratio = args.rr
163
+ prob_sequences = args.ps
164
+ output_dir = os.path.join(base, 'output')
165
+ try:
166
+ os.mkdir(output_dir)
167
+ except:
168
+ print(f'{output_dir} already exists')
169
+
170
+ for i in glob.glob(os.path.join(base, gt_path) + '/*.vtk'):
171
+ scan_name = os.path.basename(i).split('.')[0].split('_')[1]
172
+ scan_id = os.path.basename(i).split('.')[0].split('_')[2]
173
+ output_sub_dir = os.path.join(
174
+ base, 'output', scan_name + '_' + scan_id)
175
+ try:
176
+ os.mkdir(output_sub_dir)
177
+ except:
178
+ print(f'{output_sub_dir} already exists')
179
+
180
+ gt_mesh = pv.read(i)
181
+ pred_mesh = pv.read(os.path.join(
182
+ base, pred_path, 'pred_' + scan_name + '_' + scan_id + '.vtk'))
183
+ pred_skel = skeleton(pred_mesh)
184
+ prob_map = generate_probability_map(
185
+ pred_skel, area_ratio, prob_sequences)
186
+ pm = distanceVertex2Path(pred_mesh, pred_skel, prob_map)
187
+ if(pm == np.Inf):
188
+ print('something with mesh, probability map and skeleton are wrong !!!')
189
+ return
190
+ np.savetxt(os.path.join(base, output_sub_dir, scan_id + '.txt'), pm)
191
+
192
+
193
+ if __name__ == '__main__':
194
+ main()
deepatlas/metrics/lookup_tables.py ADDED
@@ -0,0 +1,463 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2018 Google Inc. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS-IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from __future__ import absolute_import
15
+ from __future__ import division
16
+ from __future__ import print_function
17
+
18
+ import math
19
+ import numpy as np
20
+ ENCODE_NEIGHBOURHOOD_3D_KERNEL = np.array([[[128, 64], [32, 16]], [[8, 4],
21
+ [2, 1]]])
22
+
23
+ """
24
+
25
+ lookup_tables.py
26
+
27
+ all of the lookup-tables functions are borrowed from DeepMind surface_distance repository
28
+
29
+ """
30
+
31
+
32
+ # _NEIGHBOUR_CODE_TO_NORMALS is a lookup table.
33
+ # For every binary neighbour code
34
+ # (2x2x2 neighbourhood = 8 neighbours = 8 bits = 256 codes)
35
+ # it contains the surface normals of the triangles (called "surfel" for
36
+ # "surface element" in the following). The length of the normal
37
+ # vector encodes the surfel area.
38
+ #
39
+ # created using the marching_cube algorithm
40
+ # see e.g. https://en.wikipedia.org/wiki/Marching_cubes
41
+ # pylint: disable=line-too-long
42
+ _NEIGHBOUR_CODE_TO_NORMALS = [
43
+ [[0, 0, 0]],
44
+ [[0.125, 0.125, 0.125]],
45
+ [[-0.125, -0.125, 0.125]],
46
+ [[-0.25, -0.25, 0.0], [0.25, 0.25, -0.0]],
47
+ [[0.125, -0.125, 0.125]],
48
+ [[-0.25, -0.0, -0.25], [0.25, 0.0, 0.25]],
49
+ [[0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]],
50
+ [[0.5, 0.0, -0.0], [0.25, 0.25, 0.25], [0.125, 0.125, 0.125]],
51
+ [[-0.125, 0.125, 0.125]],
52
+ [[0.125, 0.125, 0.125], [-0.125, 0.125, 0.125]],
53
+ [[-0.25, 0.0, 0.25], [-0.25, 0.0, 0.25]],
54
+ [[0.5, 0.0, 0.0], [-0.25, -0.25, 0.25], [-0.125, -0.125, 0.125]],
55
+ [[0.25, -0.25, 0.0], [0.25, -0.25, 0.0]],
56
+ [[0.5, 0.0, 0.0], [0.25, -0.25, 0.25], [-0.125, 0.125, -0.125]],
57
+ [[-0.5, 0.0, 0.0], [-0.25, 0.25, 0.25], [-0.125, 0.125, 0.125]],
58
+ [[0.5, 0.0, 0.0], [0.5, 0.0, 0.0]],
59
+ [[0.125, -0.125, -0.125]],
60
+ [[0.0, -0.25, -0.25], [0.0, 0.25, 0.25]],
61
+ [[-0.125, -0.125, 0.125], [0.125, -0.125, -0.125]],
62
+ [[0.0, -0.5, 0.0], [0.25, 0.25, 0.25], [0.125, 0.125, 0.125]],
63
+ [[0.125, -0.125, 0.125], [0.125, -0.125, -0.125]],
64
+ [[0.0, 0.0, -0.5], [0.25, 0.25, 0.25], [-0.125, -0.125, -0.125]],
65
+ [[-0.125, -0.125, 0.125], [0.125, -0.125, 0.125], [0.125, -0.125, -0.125]],
66
+ [[-0.125, -0.125, -0.125], [-0.25, -0.25, -0.25],
67
+ [0.25, 0.25, 0.25], [0.125, 0.125, 0.125]],
68
+ [[-0.125, 0.125, 0.125], [0.125, -0.125, -0.125]],
69
+ [[0.0, -0.25, -0.25], [0.0, 0.25, 0.25], [-0.125, 0.125, 0.125]],
70
+ [[-0.25, 0.0, 0.25], [-0.25, 0.0, 0.25], [0.125, -0.125, -0.125]],
71
+ [[0.125, 0.125, 0.125], [0.375, 0.375, 0.375],
72
+ [0.0, -0.25, 0.25], [-0.25, 0.0, 0.25]],
73
+ [[0.125, -0.125, -0.125], [0.25, -0.25, 0.0], [0.25, -0.25, 0.0]],
74
+ [[0.375, 0.375, 0.375], [0.0, 0.25, -0.25],
75
+ [-0.125, -0.125, -0.125], [-0.25, 0.25, 0.0]],
76
+ [[-0.5, 0.0, 0.0], [-0.125, -0.125, -0.125],
77
+ [-0.25, -0.25, -0.25], [0.125, 0.125, 0.125]],
78
+ [[-0.5, 0.0, 0.0], [-0.125, -0.125, -0.125], [-0.25, -0.25, -0.25]],
79
+ [[0.125, -0.125, 0.125]],
80
+ [[0.125, 0.125, 0.125], [0.125, -0.125, 0.125]],
81
+ [[0.0, -0.25, 0.25], [0.0, 0.25, -0.25]],
82
+ [[0.0, -0.5, 0.0], [0.125, 0.125, -0.125], [0.25, 0.25, -0.25]],
83
+ [[0.125, -0.125, 0.125], [0.125, -0.125, 0.125]],
84
+ [[0.125, -0.125, 0.125], [-0.25, -0.0, -0.25], [0.25, 0.0, 0.25]],
85
+ [[0.0, -0.25, 0.25], [0.0, 0.25, -0.25], [0.125, -0.125, 0.125]],
86
+ [[-0.375, -0.375, 0.375], [-0.0, 0.25, 0.25],
87
+ [0.125, 0.125, -0.125], [-0.25, -0.0, -0.25]],
88
+ [[-0.125, 0.125, 0.125], [0.125, -0.125, 0.125]],
89
+ [[0.125, 0.125, 0.125], [0.125, -0.125, 0.125], [-0.125, 0.125, 0.125]],
90
+ [[-0.0, 0.0, 0.5], [-0.25, -0.25, 0.25], [-0.125, -0.125, 0.125]],
91
+ [[0.25, 0.25, -0.25], [0.25, 0.25, -0.25],
92
+ [0.125, 0.125, -0.125], [-0.125, -0.125, 0.125]],
93
+ [[0.125, -0.125, 0.125], [0.25, -0.25, 0.0], [0.25, -0.25, 0.0]],
94
+ [[0.5, 0.0, 0.0], [0.25, -0.25, 0.25],
95
+ [-0.125, 0.125, -0.125], [0.125, -0.125, 0.125]],
96
+ [[0.0, 0.25, -0.25], [0.375, -0.375, -0.375],
97
+ [-0.125, 0.125, 0.125], [0.25, 0.25, 0.0]],
98
+ [[-0.5, 0.0, 0.0], [-0.25, -0.25, 0.25], [-0.125, -0.125, 0.125]],
99
+ [[0.25, -0.25, 0.0], [-0.25, 0.25, 0.0]],
100
+ [[0.0, 0.5, 0.0], [-0.25, 0.25, 0.25], [0.125, -0.125, -0.125]],
101
+ [[0.0, 0.5, 0.0], [0.125, -0.125, 0.125], [-0.25, 0.25, -0.25]],
102
+ [[0.0, 0.5, 0.0], [0.0, -0.5, 0.0]],
103
+ [[0.25, -0.25, 0.0], [-0.25, 0.25, 0.0], [0.125, -0.125, 0.125]],
104
+ [[-0.375, -0.375, -0.375], [-0.25, 0.0, 0.25],
105
+ [-0.125, -0.125, -0.125], [-0.25, 0.25, 0.0]],
106
+ [[0.125, 0.125, 0.125], [0.0, -0.5, 0.0],
107
+ [-0.25, -0.25, -0.25], [-0.125, -0.125, -0.125]],
108
+ [[0.0, -0.5, 0.0], [-0.25, -0.25, -0.25], [-0.125, -0.125, -0.125]],
109
+ [[-0.125, 0.125, 0.125], [0.25, -0.25, 0.0], [-0.25, 0.25, 0.0]],
110
+ [[0.0, 0.5, 0.0], [0.25, 0.25, -0.25],
111
+ [-0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]],
112
+ [[-0.375, 0.375, -0.375], [-0.25, -0.25, 0.0],
113
+ [-0.125, 0.125, -0.125], [-0.25, 0.0, 0.25]],
114
+ [[0.0, 0.5, 0.0], [0.25, 0.25, -0.25], [-0.125, -0.125, 0.125]],
115
+ [[0.25, -0.25, 0.0], [-0.25, 0.25, 0.0],
116
+ [0.25, -0.25, 0.0], [0.25, -0.25, 0.0]],
117
+ [[-0.25, -0.25, 0.0], [-0.25, -0.25, 0.0], [-0.125, -0.125, 0.125]],
118
+ [[0.125, 0.125, 0.125], [-0.25, -0.25, 0.0], [-0.25, -0.25, 0.0]],
119
+ [[-0.25, -0.25, 0.0], [-0.25, -0.25, 0.0]],
120
+ [[-0.125, -0.125, 0.125]],
121
+ [[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125]],
122
+ [[-0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]],
123
+ [[-0.125, -0.125, 0.125], [-0.25, -0.25, 0.0], [0.25, 0.25, -0.0]],
124
+ [[0.0, -0.25, 0.25], [0.0, -0.25, 0.25]],
125
+ [[0.0, 0.0, 0.5], [0.25, -0.25, 0.25], [0.125, -0.125, 0.125]],
126
+ [[0.0, -0.25, 0.25], [0.0, -0.25, 0.25], [-0.125, -0.125, 0.125]],
127
+ [[0.375, -0.375, 0.375], [0.0, -0.25, -0.25],
128
+ [-0.125, 0.125, -0.125], [0.25, 0.25, 0.0]],
129
+ [[-0.125, -0.125, 0.125], [-0.125, 0.125, 0.125]],
130
+ [[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125], [-0.125, 0.125, 0.125]],
131
+ [[-0.125, -0.125, 0.125], [-0.25, 0.0, 0.25], [-0.25, 0.0, 0.25]],
132
+ [[0.5, 0.0, 0.0], [-0.25, -0.25, 0.25],
133
+ [-0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]],
134
+ [[-0.0, 0.5, 0.0], [-0.25, 0.25, -0.25], [0.125, -0.125, 0.125]],
135
+ [[-0.25, 0.25, -0.25], [-0.25, 0.25, -0.25],
136
+ [-0.125, 0.125, -0.125], [-0.125, 0.125, -0.125]],
137
+ [[-0.25, 0.0, -0.25], [0.375, -0.375, -0.375],
138
+ [0.0, 0.25, -0.25], [-0.125, 0.125, 0.125]],
139
+ [[0.5, 0.0, 0.0], [-0.25, 0.25, -0.25], [0.125, -0.125, 0.125]],
140
+ [[-0.25, 0.0, 0.25], [0.25, 0.0, -0.25]],
141
+ [[-0.0, 0.0, 0.5], [-0.25, 0.25, 0.25], [-0.125, 0.125, 0.125]],
142
+ [[-0.125, -0.125, 0.125], [-0.25, 0.0, 0.25], [0.25, 0.0, -0.25]],
143
+ [[-0.25, -0.0, -0.25], [-0.375, 0.375, 0.375],
144
+ [-0.25, -0.25, 0.0], [-0.125, 0.125, 0.125]],
145
+ [[0.0, 0.0, -0.5], [0.25, 0.25, -0.25], [-0.125, -0.125, 0.125]],
146
+ [[-0.0, 0.0, 0.5], [0.0, 0.0, 0.5]],
147
+ [[0.125, 0.125, 0.125], [0.125, 0.125, 0.125],
148
+ [0.25, 0.25, 0.25], [0.0, 0.0, 0.5]],
149
+ [[0.125, 0.125, 0.125], [0.25, 0.25, 0.25], [0.0, 0.0, 0.5]],
150
+ [[-0.25, 0.0, 0.25], [0.25, 0.0, -0.25], [-0.125, 0.125, 0.125]],
151
+ [[-0.0, 0.0, 0.5], [0.25, -0.25, 0.25],
152
+ [0.125, -0.125, 0.125], [0.125, -0.125, 0.125]],
153
+ [[-0.25, 0.0, 0.25], [-0.25, 0.0, 0.25],
154
+ [-0.25, 0.0, 0.25], [0.25, 0.0, -0.25]],
155
+ [[0.125, -0.125, 0.125], [0.25, 0.0, 0.25], [0.25, 0.0, 0.25]],
156
+ [[0.25, 0.0, 0.25], [-0.375, -0.375, 0.375],
157
+ [-0.25, 0.25, 0.0], [-0.125, -0.125, 0.125]],
158
+ [[-0.0, 0.0, 0.5], [0.25, -0.25, 0.25], [0.125, -0.125, 0.125]],
159
+ [[0.125, 0.125, 0.125], [0.25, 0.0, 0.25], [0.25, 0.0, 0.25]],
160
+ [[0.25, 0.0, 0.25], [0.25, 0.0, 0.25]],
161
+ [[-0.125, -0.125, 0.125], [0.125, -0.125, 0.125]],
162
+ [[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125], [0.125, -0.125, 0.125]],
163
+ [[-0.125, -0.125, 0.125], [0.0, -0.25, 0.25], [0.0, 0.25, -0.25]],
164
+ [[0.0, -0.5, 0.0], [0.125, 0.125, -0.125],
165
+ [0.25, 0.25, -0.25], [-0.125, -0.125, 0.125]],
166
+ [[0.0, -0.25, 0.25], [0.0, -0.25, 0.25], [0.125, -0.125, 0.125]],
167
+ [[0.0, 0.0, 0.5], [0.25, -0.25, 0.25],
168
+ [0.125, -0.125, 0.125], [0.125, -0.125, 0.125]],
169
+ [[0.0, -0.25, 0.25], [0.0, -0.25, 0.25],
170
+ [0.0, -0.25, 0.25], [0.0, 0.25, -0.25]],
171
+ [[0.0, 0.25, 0.25], [0.0, 0.25, 0.25], [0.125, -0.125, -0.125]],
172
+ [[-0.125, 0.125, 0.125], [0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]],
173
+ [[-0.125, 0.125, 0.125], [0.125, -0.125, 0.125],
174
+ [-0.125, -0.125, 0.125], [0.125, 0.125, 0.125]],
175
+ [[-0.0, 0.0, 0.5], [-0.25, -0.25, 0.25],
176
+ [-0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]],
177
+ [[0.125, 0.125, 0.125], [0.125, -0.125, 0.125], [0.125, -0.125, -0.125]],
178
+ [[-0.0, 0.5, 0.0], [-0.25, 0.25, -0.25],
179
+ [0.125, -0.125, 0.125], [0.125, -0.125, 0.125]],
180
+ [[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125], [0.125, -0.125, -0.125]],
181
+ [[0.0, -0.25, -0.25], [0.0, 0.25, 0.25], [0.125, 0.125, 0.125]],
182
+ [[0.125, 0.125, 0.125], [0.125, -0.125, -0.125]],
183
+ [[0.5, 0.0, -0.0], [0.25, -0.25, -0.25], [0.125, -0.125, -0.125]],
184
+ [[-0.25, 0.25, 0.25], [-0.125, 0.125, 0.125],
185
+ [-0.25, 0.25, 0.25], [0.125, -0.125, -0.125]],
186
+ [[0.375, -0.375, 0.375], [0.0, 0.25, 0.25],
187
+ [-0.125, 0.125, -0.125], [-0.25, 0.0, 0.25]],
188
+ [[0.0, -0.5, 0.0], [-0.25, 0.25, 0.25], [-0.125, 0.125, 0.125]],
189
+ [[-0.375, -0.375, 0.375], [0.25, -0.25, 0.0],
190
+ [0.0, 0.25, 0.25], [-0.125, -0.125, 0.125]],
191
+ [[-0.125, 0.125, 0.125], [-0.25, 0.25, 0.25], [0.0, 0.0, 0.5]],
192
+ [[0.125, 0.125, 0.125], [0.0, 0.25, 0.25], [0.0, 0.25, 0.25]],
193
+ [[0.0, 0.25, 0.25], [0.0, 0.25, 0.25]],
194
+ [[0.5, 0.0, -0.0], [0.25, 0.25, 0.25],
195
+ [0.125, 0.125, 0.125], [0.125, 0.125, 0.125]],
196
+ [[0.125, -0.125, 0.125], [-0.125, -0.125, 0.125], [0.125, 0.125, 0.125]],
197
+ [[-0.25, -0.0, -0.25], [0.25, 0.0, 0.25], [0.125, 0.125, 0.125]],
198
+ [[0.125, 0.125, 0.125], [0.125, -0.125, 0.125]],
199
+ [[-0.25, -0.25, 0.0], [0.25, 0.25, -0.0], [0.125, 0.125, 0.125]],
200
+ [[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125]],
201
+ [[0.125, 0.125, 0.125], [0.125, 0.125, 0.125]],
202
+ [[0.125, 0.125, 0.125]],
203
+ [[0.125, 0.125, 0.125]],
204
+ [[0.125, 0.125, 0.125], [0.125, 0.125, 0.125]],
205
+ [[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125]],
206
+ [[-0.25, -0.25, 0.0], [0.25, 0.25, -0.0], [0.125, 0.125, 0.125]],
207
+ [[0.125, 0.125, 0.125], [0.125, -0.125, 0.125]],
208
+ [[-0.25, -0.0, -0.25], [0.25, 0.0, 0.25], [0.125, 0.125, 0.125]],
209
+ [[0.125, -0.125, 0.125], [-0.125, -0.125, 0.125], [0.125, 0.125, 0.125]],
210
+ [[0.5, 0.0, -0.0], [0.25, 0.25, 0.25],
211
+ [0.125, 0.125, 0.125], [0.125, 0.125, 0.125]],
212
+ [[0.0, 0.25, 0.25], [0.0, 0.25, 0.25]],
213
+ [[0.125, 0.125, 0.125], [0.0, 0.25, 0.25], [0.0, 0.25, 0.25]],
214
+ [[-0.125, 0.125, 0.125], [-0.25, 0.25, 0.25], [0.0, 0.0, 0.5]],
215
+ [[-0.375, -0.375, 0.375], [0.25, -0.25, 0.0],
216
+ [0.0, 0.25, 0.25], [-0.125, -0.125, 0.125]],
217
+ [[0.0, -0.5, 0.0], [-0.25, 0.25, 0.25], [-0.125, 0.125, 0.125]],
218
+ [[0.375, -0.375, 0.375], [0.0, 0.25, 0.25],
219
+ [-0.125, 0.125, -0.125], [-0.25, 0.0, 0.25]],
220
+ [[-0.25, 0.25, 0.25], [-0.125, 0.125, 0.125],
221
+ [-0.25, 0.25, 0.25], [0.125, -0.125, -0.125]],
222
+ [[0.5, 0.0, -0.0], [0.25, -0.25, -0.25], [0.125, -0.125, -0.125]],
223
+ [[0.125, 0.125, 0.125], [0.125, -0.125, -0.125]],
224
+ [[0.0, -0.25, -0.25], [0.0, 0.25, 0.25], [0.125, 0.125, 0.125]],
225
+ [[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125], [0.125, -0.125, -0.125]],
226
+ [[-0.0, 0.5, 0.0], [-0.25, 0.25, -0.25],
227
+ [0.125, -0.125, 0.125], [0.125, -0.125, 0.125]],
228
+ [[0.125, 0.125, 0.125], [0.125, -0.125, 0.125], [0.125, -0.125, -0.125]],
229
+ [[-0.0, 0.0, 0.5], [-0.25, -0.25, 0.25],
230
+ [-0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]],
231
+ [[-0.125, 0.125, 0.125], [0.125, -0.125, 0.125],
232
+ [-0.125, -0.125, 0.125], [0.125, 0.125, 0.125]],
233
+ [[-0.125, 0.125, 0.125], [0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]],
234
+ [[0.0, 0.25, 0.25], [0.0, 0.25, 0.25], [0.125, -0.125, -0.125]],
235
+ [[0.0, -0.25, -0.25], [0.0, 0.25, 0.25], [0.0, 0.25, 0.25], [0.0, 0.25, 0.25]],
236
+ [[0.0, 0.0, 0.5], [0.25, -0.25, 0.25],
237
+ [0.125, -0.125, 0.125], [0.125, -0.125, 0.125]],
238
+ [[0.0, -0.25, 0.25], [0.0, -0.25, 0.25], [0.125, -0.125, 0.125]],
239
+ [[0.0, -0.5, 0.0], [0.125, 0.125, -0.125],
240
+ [0.25, 0.25, -0.25], [-0.125, -0.125, 0.125]],
241
+ [[-0.125, -0.125, 0.125], [0.0, -0.25, 0.25], [0.0, 0.25, -0.25]],
242
+ [[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125], [0.125, -0.125, 0.125]],
243
+ [[-0.125, -0.125, 0.125], [0.125, -0.125, 0.125]],
244
+ [[0.25, 0.0, 0.25], [0.25, 0.0, 0.25]],
245
+ [[0.125, 0.125, 0.125], [0.25, 0.0, 0.25], [0.25, 0.0, 0.25]],
246
+ [[-0.0, 0.0, 0.5], [0.25, -0.25, 0.25], [0.125, -0.125, 0.125]],
247
+ [[0.25, 0.0, 0.25], [-0.375, -0.375, 0.375],
248
+ [-0.25, 0.25, 0.0], [-0.125, -0.125, 0.125]],
249
+ [[0.125, -0.125, 0.125], [0.25, 0.0, 0.25], [0.25, 0.0, 0.25]],
250
+ [[-0.25, -0.0, -0.25], [0.25, 0.0, 0.25],
251
+ [0.25, 0.0, 0.25], [0.25, 0.0, 0.25]],
252
+ [[-0.0, 0.0, 0.5], [0.25, -0.25, 0.25],
253
+ [0.125, -0.125, 0.125], [0.125, -0.125, 0.125]],
254
+ [[-0.25, 0.0, 0.25], [0.25, 0.0, -0.25], [-0.125, 0.125, 0.125]],
255
+ [[0.125, 0.125, 0.125], [0.25, 0.25, 0.25], [0.0, 0.0, 0.5]],
256
+ [[0.125, 0.125, 0.125], [0.125, 0.125, 0.125],
257
+ [0.25, 0.25, 0.25], [0.0, 0.0, 0.5]],
258
+ [[-0.0, 0.0, 0.5], [0.0, 0.0, 0.5]],
259
+ [[0.0, 0.0, -0.5], [0.25, 0.25, -0.25], [-0.125, -0.125, 0.125]],
260
+ [[-0.25, -0.0, -0.25], [-0.375, 0.375, 0.375],
261
+ [-0.25, -0.25, 0.0], [-0.125, 0.125, 0.125]],
262
+ [[-0.125, -0.125, 0.125], [-0.25, 0.0, 0.25], [0.25, 0.0, -0.25]],
263
+ [[-0.0, 0.0, 0.5], [-0.25, 0.25, 0.25], [-0.125, 0.125, 0.125]],
264
+ [[-0.25, 0.0, 0.25], [0.25, 0.0, -0.25]],
265
+ [[0.5, 0.0, 0.0], [-0.25, 0.25, -0.25], [0.125, -0.125, 0.125]],
266
+ [[-0.25, 0.0, -0.25], [0.375, -0.375, -0.375],
267
+ [0.0, 0.25, -0.25], [-0.125, 0.125, 0.125]],
268
+ [[-0.25, 0.25, -0.25], [-0.25, 0.25, -0.25],
269
+ [-0.125, 0.125, -0.125], [-0.125, 0.125, -0.125]],
270
+ [[-0.0, 0.5, 0.0], [-0.25, 0.25, -0.25], [0.125, -0.125, 0.125]],
271
+ [[0.5, 0.0, 0.0], [-0.25, -0.25, 0.25],
272
+ [-0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]],
273
+ [[-0.125, -0.125, 0.125], [-0.25, 0.0, 0.25], [-0.25, 0.0, 0.25]],
274
+ [[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125], [-0.125, 0.125, 0.125]],
275
+ [[-0.125, -0.125, 0.125], [-0.125, 0.125, 0.125]],
276
+ [[0.375, -0.375, 0.375], [0.0, -0.25, -0.25],
277
+ [-0.125, 0.125, -0.125], [0.25, 0.25, 0.0]],
278
+ [[0.0, -0.25, 0.25], [0.0, -0.25, 0.25], [-0.125, -0.125, 0.125]],
279
+ [[0.0, 0.0, 0.5], [0.25, -0.25, 0.25], [0.125, -0.125, 0.125]],
280
+ [[0.0, -0.25, 0.25], [0.0, -0.25, 0.25]],
281
+ [[-0.125, -0.125, 0.125], [-0.25, -0.25, 0.0], [0.25, 0.25, -0.0]],
282
+ [[-0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]],
283
+ [[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125]],
284
+ [[-0.125, -0.125, 0.125]],
285
+ [[-0.25, -0.25, 0.0], [-0.25, -0.25, 0.0]],
286
+ [[0.125, 0.125, 0.125], [-0.25, -0.25, 0.0], [-0.25, -0.25, 0.0]],
287
+ [[-0.25, -0.25, 0.0], [-0.25, -0.25, 0.0], [-0.125, -0.125, 0.125]],
288
+ [[-0.25, -0.25, 0.0], [-0.25, -0.25, 0.0],
289
+ [-0.25, -0.25, 0.0], [0.25, 0.25, -0.0]],
290
+ [[0.0, 0.5, 0.0], [0.25, 0.25, -0.25], [-0.125, -0.125, 0.125]],
291
+ [[-0.375, 0.375, -0.375], [-0.25, -0.25, 0.0],
292
+ [-0.125, 0.125, -0.125], [-0.25, 0.0, 0.25]],
293
+ [[0.0, 0.5, 0.0], [0.25, 0.25, -0.25],
294
+ [-0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]],
295
+ [[-0.125, 0.125, 0.125], [0.25, -0.25, 0.0], [-0.25, 0.25, 0.0]],
296
+ [[0.0, -0.5, 0.0], [-0.25, -0.25, -0.25], [-0.125, -0.125, -0.125]],
297
+ [[0.125, 0.125, 0.125], [0.0, -0.5, 0.0],
298
+ [-0.25, -0.25, -0.25], [-0.125, -0.125, -0.125]],
299
+ [[-0.375, -0.375, -0.375], [-0.25, 0.0, 0.25],
300
+ [-0.125, -0.125, -0.125], [-0.25, 0.25, 0.0]],
301
+ [[0.25, -0.25, 0.0], [-0.25, 0.25, 0.0], [0.125, -0.125, 0.125]],
302
+ [[0.0, 0.5, 0.0], [0.0, -0.5, 0.0]],
303
+ [[0.0, 0.5, 0.0], [0.125, -0.125, 0.125], [-0.25, 0.25, -0.25]],
304
+ [[0.0, 0.5, 0.0], [-0.25, 0.25, 0.25], [0.125, -0.125, -0.125]],
305
+ [[0.25, -0.25, 0.0], [-0.25, 0.25, 0.0]],
306
+ [[-0.5, 0.0, 0.0], [-0.25, -0.25, 0.25], [-0.125, -0.125, 0.125]],
307
+ [[0.0, 0.25, -0.25], [0.375, -0.375, -0.375],
308
+ [-0.125, 0.125, 0.125], [0.25, 0.25, 0.0]],
309
+ [[0.5, 0.0, 0.0], [0.25, -0.25, 0.25],
310
+ [-0.125, 0.125, -0.125], [0.125, -0.125, 0.125]],
311
+ [[0.125, -0.125, 0.125], [0.25, -0.25, 0.0], [0.25, -0.25, 0.0]],
312
+ [[0.25, 0.25, -0.25], [0.25, 0.25, -0.25],
313
+ [0.125, 0.125, -0.125], [-0.125, -0.125, 0.125]],
314
+ [[-0.0, 0.0, 0.5], [-0.25, -0.25, 0.25], [-0.125, -0.125, 0.125]],
315
+ [[0.125, 0.125, 0.125], [0.125, -0.125, 0.125], [-0.125, 0.125, 0.125]],
316
+ [[-0.125, 0.125, 0.125], [0.125, -0.125, 0.125]],
317
+ [[-0.375, -0.375, 0.375], [-0.0, 0.25, 0.25],
318
+ [0.125, 0.125, -0.125], [-0.25, -0.0, -0.25]],
319
+ [[0.0, -0.25, 0.25], [0.0, 0.25, -0.25], [0.125, -0.125, 0.125]],
320
+ [[0.125, -0.125, 0.125], [-0.25, -0.0, -0.25], [0.25, 0.0, 0.25]],
321
+ [[0.125, -0.125, 0.125], [0.125, -0.125, 0.125]],
322
+ [[0.0, -0.5, 0.0], [0.125, 0.125, -0.125], [0.25, 0.25, -0.25]],
323
+ [[0.0, -0.25, 0.25], [0.0, 0.25, -0.25]],
324
+ [[0.125, 0.125, 0.125], [0.125, -0.125, 0.125]],
325
+ [[0.125, -0.125, 0.125]],
326
+ [[-0.5, 0.0, 0.0], [-0.125, -0.125, -0.125], [-0.25, -0.25, -0.25]],
327
+ [[-0.5, 0.0, 0.0], [-0.125, -0.125, -0.125],
328
+ [-0.25, -0.25, -0.25], [0.125, 0.125, 0.125]],
329
+ [[0.375, 0.375, 0.375], [0.0, 0.25, -0.25],
330
+ [-0.125, -0.125, -0.125], [-0.25, 0.25, 0.0]],
331
+ [[0.125, -0.125, -0.125], [0.25, -0.25, 0.0], [0.25, -0.25, 0.0]],
332
+ [[0.125, 0.125, 0.125], [0.375, 0.375, 0.375],
333
+ [0.0, -0.25, 0.25], [-0.25, 0.0, 0.25]],
334
+ [[-0.25, 0.0, 0.25], [-0.25, 0.0, 0.25], [0.125, -0.125, -0.125]],
335
+ [[0.0, -0.25, -0.25], [0.0, 0.25, 0.25], [-0.125, 0.125, 0.125]],
336
+ [[-0.125, 0.125, 0.125], [0.125, -0.125, -0.125]],
337
+ [[-0.125, -0.125, -0.125], [-0.25, -0.25, -0.25],
338
+ [0.25, 0.25, 0.25], [0.125, 0.125, 0.125]],
339
+ [[-0.125, -0.125, 0.125], [0.125, -0.125, 0.125], [0.125, -0.125, -0.125]],
340
+ [[0.0, 0.0, -0.5], [0.25, 0.25, 0.25], [-0.125, -0.125, -0.125]],
341
+ [[0.125, -0.125, 0.125], [0.125, -0.125, -0.125]],
342
+ [[0.0, -0.5, 0.0], [0.25, 0.25, 0.25], [0.125, 0.125, 0.125]],
343
+ [[-0.125, -0.125, 0.125], [0.125, -0.125, -0.125]],
344
+ [[0.0, -0.25, -0.25], [0.0, 0.25, 0.25]],
345
+ [[0.125, -0.125, -0.125]],
346
+ [[0.5, 0.0, 0.0], [0.5, 0.0, 0.0]],
347
+ [[-0.5, 0.0, 0.0], [-0.25, 0.25, 0.25], [-0.125, 0.125, 0.125]],
348
+ [[0.5, 0.0, 0.0], [0.25, -0.25, 0.25], [-0.125, 0.125, -0.125]],
349
+ [[0.25, -0.25, 0.0], [0.25, -0.25, 0.0]],
350
+ [[0.5, 0.0, 0.0], [-0.25, -0.25, 0.25], [-0.125, -0.125, 0.125]],
351
+ [[-0.25, 0.0, 0.25], [-0.25, 0.0, 0.25]],
352
+ [[0.125, 0.125, 0.125], [-0.125, 0.125, 0.125]],
353
+ [[-0.125, 0.125, 0.125]],
354
+ [[0.5, 0.0, -0.0], [0.25, 0.25, 0.25], [0.125, 0.125, 0.125]],
355
+ [[0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]],
356
+ [[-0.25, -0.0, -0.25], [0.25, 0.0, 0.25]],
357
+ [[0.125, -0.125, 0.125]],
358
+ [[-0.25, -0.25, 0.0], [0.25, 0.25, -0.0]],
359
+ [[-0.125, -0.125, 0.125]],
360
+ [[0.125, 0.125, 0.125]],
361
+ [[0, 0, 0]]]
362
+ # pylint: enable=line-too-long
363
+
364
+
365
+ def create_table_neighbour_code_to_surface_area(spacing_mm):
366
+ """Returns an array mapping neighbourhood code to the surface elements area.
367
+ Note that the normals encode the initial surface area. This function computes
368
+ the area corresponding to the given `spacing_mm`.
369
+ Args:
370
+ spacing_mm: 3-element list-like structure. Voxel spacing in x0, x1 and x2
371
+ direction.
372
+ """
373
+ # compute the area for all 256 possible surface elements
374
+ # (given a 2x2x2 neighbourhood) according to the spacing_mm
375
+ neighbour_code_to_surface_area = np.zeros([256])
376
+ for code in range(256):
377
+ normals = np.array(_NEIGHBOUR_CODE_TO_NORMALS[code])
378
+ sum_area = 0
379
+ for normal_idx in range(normals.shape[0]):
380
+ # normal vector
381
+ n = np.zeros([3])
382
+ n[0] = normals[normal_idx, 0] * spacing_mm[1] * spacing_mm[2]
383
+ n[1] = normals[normal_idx, 1] * spacing_mm[0] * spacing_mm[2]
384
+ n[2] = normals[normal_idx, 2] * spacing_mm[0] * spacing_mm[1]
385
+ area = np.linalg.norm(n)
386
+ sum_area += area
387
+ neighbour_code_to_surface_area[code] = sum_area
388
+
389
+ return neighbour_code_to_surface_area
390
+
391
+
392
+ # In the neighbourhood, points are ordered: top left, top right, bottom left,
393
+ # bottom right.
394
+ ENCODE_NEIGHBOURHOOD_2D_KERNEL = np.array([[8, 4], [2, 1]])
395
+
396
+
397
+ def create_table_neighbour_code_to_contour_length(spacing_mm):
398
+ """Returns an array mapping neighbourhood code to the contour length.
399
+ For the list of possible cases and their figures, see page 38 from:
400
+ https://nccastaff.bournemouth.ac.uk/jmacey/MastersProjects/MSc14/06/thesis.pdf
401
+ In 2D, each point has 4 neighbors. Thus, are 16 configurations. A
402
+ configuration is encoded with '1' meaning "inside the object" and '0' "outside
403
+ the object". The points are ordered: top left, top right, bottom left, bottom
404
+ right.
405
+ The x0 axis is assumed vertical downward, and the x1 axis is horizontal to the
406
+ right:
407
+ (0, 0) --> (0, 1)
408
+ |
409
+ (1, 0)
410
+ Args:
411
+ spacing_mm: 2-element list-like structure. Voxel spacing in x0 and x1
412
+ directions.
413
+ """
414
+ neighbour_code_to_contour_length = np.zeros([16])
415
+
416
+ vertical = spacing_mm[0]
417
+ horizontal = spacing_mm[1]
418
+ diag = 0.5 * math.sqrt(spacing_mm[0]**2 + spacing_mm[1]**2)
419
+ # pyformat: disable
420
+ neighbour_code_to_contour_length[int("00"
421
+ "01", 2)] = diag
422
+
423
+ neighbour_code_to_contour_length[int("00"
424
+ "10", 2)] = diag
425
+
426
+ neighbour_code_to_contour_length[int("00"
427
+ "11", 2)] = horizontal
428
+
429
+ neighbour_code_to_contour_length[int("01"
430
+ "00", 2)] = diag
431
+
432
+ neighbour_code_to_contour_length[int("01"
433
+ "01", 2)] = vertical
434
+
435
+ neighbour_code_to_contour_length[int("01"
436
+ "10", 2)] = 2*diag
437
+
438
+ neighbour_code_to_contour_length[int("01"
439
+ "11", 2)] = diag
440
+
441
+ neighbour_code_to_contour_length[int("10"
442
+ "00", 2)] = diag
443
+
444
+ neighbour_code_to_contour_length[int("10"
445
+ "01", 2)] = 2*diag
446
+
447
+ neighbour_code_to_contour_length[int("10"
448
+ "10", 2)] = vertical
449
+
450
+ neighbour_code_to_contour_length[int("10"
451
+ "11", 2)] = diag
452
+
453
+ neighbour_code_to_contour_length[int("11"
454
+ "00", 2)] = horizontal
455
+
456
+ neighbour_code_to_contour_length[int("11"
457
+ "01", 2)] = diag
458
+
459
+ neighbour_code_to_contour_length[int("11"
460
+ "10", 2)] = diag
461
+ # pyformat: enable
462
+
463
+ return neighbour_code_to_contour_length
deepatlas/metrics/metrics.py ADDED
@@ -0,0 +1,355 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import nibabel as nib
3
+ import ants
4
+ import argparse
5
+ import pandas as pd
6
+ import glob
7
+ import os
8
+ import surface_distance
9
+ import nrrd
10
+ import shutil
11
+ import distanceVertex2Mesh
12
+ import textwrap
13
+
14
+
15
+ def parse_command_line():
16
+ print('---'*10)
17
+ print('Parsing Command Line Arguments')
18
+ parser = argparse.ArgumentParser(
19
+ description='Inference evaluation pipeline for image registration-segmentation', formatter_class=argparse.RawTextHelpFormatter)
20
+ parser.add_argument('-bp', metavar='base path', type=str,
21
+ help="Absolute path of the base directory")
22
+ parser.add_argument('-gp', metavar='ground truth path', type=str,
23
+ help="Relative path of the ground truth segmentation directory")
24
+ parser.add_argument('-pp', metavar='predicted path', type=str,
25
+ help="Relative path of predicted segmentation directory")
26
+ parser.add_argument('-sp', metavar='save path', type=str,
27
+ help="Relative path of CSV file directory to save, if not specify, default is base directory")
28
+ parser.add_argument('-vt', metavar='validation type', type=str, nargs='+',
29
+ help=textwrap.dedent('''Validation type:
30
+ dsc: Dice Score
31
+ ahd: Average Hausdorff Distance
32
+ whd: Weighted Hausdorff Distance
33
+ '''))
34
+ parser.add_argument('-pm', metavar='probability map path', type=str,
35
+ help="Relative path of text file directory of probability map")
36
+ parser.add_argument('-fn', metavar='file name', type=str,
37
+ help="name of output file")
38
+ parser.add_argument('-reg', action='store_true',
39
+ help="check if the input files are registration predictions")
40
+ parser.add_argument('-tp', metavar='type of segmentation', type=str,
41
+ help=textwrap.dedent('''Segmentation type:
42
+ ET: Eustachian Tube
43
+ NC: Nasal Cavity
44
+ HT: Head Tumor
45
+ '''))
46
+ parser.add_argument('-sl', metavar='segmentation information list', type=str, nargs='+',
47
+ help='a list of label name and corresponding value')
48
+ parser.add_argument('-cp', metavar='current prefix of filenames', type=str,
49
+ help='current prefix of filenames')
50
+ argv = parser.parse_args()
51
+ return argv
52
+
53
+
54
+ def rename(prefix, filename):
55
+ name = filename.split('.')[0][-3:]
56
+ name = prefix + '_' + name
57
+ return name
58
+
59
+ def dice_coefficient_and_hausdorff_distance(filename, img_np_pred, img_np_gt, num_classes, spacing, probability_map, dsc, ahd, whd, average_DSC, average_HD):
60
+ df = pd.DataFrame()
61
+ data_gt, bool_gt = make_one_hot(img_np_gt, num_classes)
62
+ data_pred, bool_pred = make_one_hot(img_np_pred, num_classes)
63
+ for i in range(1, num_classes):
64
+ df1 = pd.DataFrame([[filename, i]], columns=[
65
+ 'File ID', 'Label Value'])
66
+ if dsc:
67
+ if data_pred[i].any():
68
+ volume_sum = data_gt[i].sum() + data_pred[i].sum()
69
+ if volume_sum == 0:
70
+ return np.NaN
71
+
72
+ volume_intersect = (data_gt[i] & data_pred[i]).sum()
73
+ dice = 2*volume_intersect / volume_sum
74
+ df1['Dice Score'] = dice
75
+ average_DSC[i-1] += dice
76
+ else:
77
+ dice = 0.0
78
+ df1['Dice Score'] = dice
79
+ average_DSC[i-1] += dice
80
+ if ahd:
81
+ if data_pred[i].any():
82
+ avd = average_hausdorff_distance(bool_gt[i], bool_pred[i], spacing)
83
+ df1['Average Hausdorff Distance'] = avd
84
+ average_HD[i-1] += avd
85
+ else:
86
+ avd = np.nan
87
+ df1['Average Hausdorff Distance'] = avd
88
+ average_HD[i-1] += avd
89
+ if whd:
90
+ # wgd = weighted_hausdorff_distance(gt, pred, probability_map)
91
+ # df1['Weighted Hausdorff Distance'] = wgd
92
+ pass
93
+
94
+ df = pd.concat([df, df1])
95
+ return df, average_DSC, average_HD
96
+
97
+
98
+ def make_one_hot(img_np, num_classes):
99
+ img_one_hot_dice = np.zeros(
100
+ (num_classes, img_np.shape[0], img_np.shape[1], img_np.shape[2]), dtype=np.int8)
101
+ img_one_hot_hd = np.zeros(
102
+ (num_classes, img_np.shape[0], img_np.shape[1], img_np.shape[2]), dtype=bool)
103
+ for i in range(num_classes):
104
+ a = (img_np == i)
105
+ img_one_hot_dice[i, :, :, :] = a
106
+ img_one_hot_hd[i, :, :, :] = a
107
+
108
+ return img_one_hot_dice, img_one_hot_hd
109
+
110
+
111
+ def average_hausdorff_distance(img_np_gt, img_np_pred, spacing):
112
+ surf_distance = surface_distance.compute_surface_distances(
113
+ img_np_gt, img_np_pred, spacing)
114
+ gp, pg = surface_distance.compute_average_surface_distance(surf_distance)
115
+ return (gp + pg) / 2
116
+
117
+
118
+ def checkSegFormat(base, segmentation, type, prefix=None):
119
+ if type == 'gt':
120
+ save_dir = os.path.join(base, 'gt_reformat_labels')
121
+ path = segmentation
122
+ else:
123
+ save_dir = os.path.join(base, 'pred_reformat_labels')
124
+ path = os.path.join(base, segmentation)
125
+ try:
126
+ os.mkdir(save_dir)
127
+ except:
128
+ print(f'{save_dir} already exists')
129
+
130
+ for file in os.listdir(path):
131
+ if type == 'gt':
132
+ if prefix is not None:
133
+ name = rename(prefix, file)
134
+ else:
135
+ name = file.split('.')[0]
136
+ else:
137
+ name = file.split('.')[0]
138
+
139
+ if file.endswith('seg.nrrd'):
140
+ ants_img = ants.image_read(os.path.join(path, file))
141
+ header = nrrd.read_header(os.path.join(path, file))
142
+ filename = os.path.join(save_dir, name + '.nii.gz')
143
+ nrrd2nifti(ants_img, header, filename)
144
+ elif file.endswith('nii'):
145
+ image = ants.image_read(os.path.join(path, file))
146
+ image.to_file(os.path.join(save_dir, name + '.nii.gz'))
147
+ elif file.endswith('nii.gz'):
148
+ shutil.copy(os.path.join(path, file), os.path.join(save_dir, name + '.nii.gz'))
149
+
150
+ return save_dir
151
+
152
+
153
+ def nrrd2nifti(img, header, filename):
154
+ img_as_np = img.view(single_components=True)
155
+ data = convert_to_one_hot(img_as_np, header)
156
+ foreground = np.max(data, axis=0)
157
+ labelmap = np.multiply(np.argmax(data, axis=0) + 1,
158
+ foreground).astype('uint8')
159
+ segmentation_img = ants.from_numpy(
160
+ labelmap, origin=img.origin, spacing=img.spacing, direction=img.direction)
161
+ print('-- Saving NII Segmentations')
162
+ segmentation_img.to_file(filename)
163
+
164
+
165
+ def convert_to_one_hot(data, header, segment_indices=None):
166
+ print('---'*10)
167
+ print("converting to one hot")
168
+
169
+ layer_values = get_layer_values(header)
170
+ label_values = get_label_values(header)
171
+
172
+ # Newer Slicer NRRD (compressed layers)
173
+ if layer_values and label_values:
174
+
175
+ assert len(layer_values) == len(label_values)
176
+ if len(data.shape) == 3:
177
+ x_dim, y_dim, z_dim = data.shape
178
+ elif len(data.shape) == 4:
179
+ x_dim, y_dim, z_dim = data.shape[1:]
180
+
181
+ num_segments = len(layer_values)
182
+ one_hot = np.zeros((num_segments, x_dim, y_dim, z_dim))
183
+
184
+ if segment_indices is None:
185
+ segment_indices = list(range(num_segments))
186
+
187
+ elif isinstance(segment_indices, int):
188
+ segment_indices = [segment_indices]
189
+
190
+ elif not isinstance(segment_indices, list):
191
+ print("incorrectly specified segment indices")
192
+ return
193
+
194
+ # Check if NRRD is composed of one layer 0
195
+ if np.max(layer_values) == 0:
196
+ for i, seg_idx in enumerate(segment_indices):
197
+ layer = layer_values[seg_idx]
198
+ label = label_values[seg_idx]
199
+ one_hot[i] = 1*(data == label).astype(np.uint8)
200
+
201
+ else:
202
+ for i, seg_idx in enumerate(segment_indices):
203
+ layer = layer_values[seg_idx]
204
+ label = label_values[seg_idx]
205
+ one_hot[i] = 1*(data[layer] == label).astype(np.uint8)
206
+
207
+ # Binary labelmap
208
+ elif len(data.shape) == 3:
209
+ x_dim, y_dim, z_dim = data.shape
210
+ num_segments = np.max(data)
211
+ one_hot = np.zeros((num_segments, x_dim, y_dim, z_dim))
212
+
213
+ if segment_indices is None:
214
+ segment_indices = list(range(1, num_segments + 1))
215
+
216
+ elif isinstance(segment_indices, int):
217
+ segment_indices = [segment_indices]
218
+
219
+ elif not isinstance(segment_indices, list):
220
+ print("incorrectly specified segment indices")
221
+ return
222
+
223
+ for i, seg_idx in enumerate(segment_indices):
224
+ one_hot[i] = 1*(data == seg_idx).astype(np.uint8)
225
+
226
+ # Older Slicer NRRD (already one-hot)
227
+ else:
228
+ return data
229
+
230
+ return one_hot
231
+
232
+
233
+ def get_layer_values(header):
234
+ layer_values = []
235
+ num_segments = len([key for key in header.keys() if "Layer" in key])
236
+ for i in range(num_segments):
237
+ layer_values.append(int(header['Segment{}_Layer'.format(i)]))
238
+ return layer_values
239
+
240
+
241
+ def get_label_values(header):
242
+ label_values = []
243
+ num_segments = len([key for key in header.keys() if "LabelValue" in key])
244
+ for i in range(num_segments):
245
+ label_values.append(int(header['Segment{}_LabelValue'.format(i)]))
246
+ return label_values
247
+
248
+
249
+ def main():
250
+ args = parse_command_line()
251
+ base = args.bp
252
+ gt_path = args.gp
253
+ pred_path = args.pp
254
+ if args.sp is None:
255
+ save_path = base
256
+ else:
257
+ save_path = args.sp
258
+ validation_type = args.vt
259
+ probability_map_path = args.pm
260
+ filename = args.fn
261
+ reg = args.reg
262
+ seg_type = args.tp
263
+ label_list = args.sl
264
+ current_prefix = args.cp
265
+ if probability_map_path is not None:
266
+ probability_map = np.loadtxt(os.path.join(base, probability_map_path))
267
+ else:
268
+ probability_map = None
269
+ dsc = False
270
+ ahd = False
271
+ whd = False
272
+ for i in range(len(validation_type)):
273
+ if validation_type[i] == 'dsc':
274
+ dsc = True
275
+ elif validation_type[i] == 'ahd':
276
+ ahd = True
277
+ elif validation_type[i] == 'whd':
278
+ whd = True
279
+ else:
280
+ print('wrong validation type, please choose correct one !!!')
281
+ return
282
+
283
+ filepath = os.path.join(base, save_path, 'output_' + filename + '.csv')
284
+ save_dir = os.path.join(base, save_path)
285
+ gt_output_path = checkSegFormat(base, gt_path, 'gt', current_prefix)
286
+ pred_output_path = checkSegFormat(base, pred_path, 'pred', current_prefix)
287
+ try:
288
+ os.mkdir(save_dir)
289
+ except:
290
+ print(f'{save_dir} already exists')
291
+
292
+ try:
293
+ os.mknod(filepath)
294
+ except:
295
+ print(f'{filepath} already exists')
296
+
297
+ DSC = pd.DataFrame()
298
+ file = glob.glob(os.path.join(base, gt_output_path) + '/*nii.gz')[0]
299
+ seg_file = ants.image_read(file)
300
+ num_class = np.unique(seg_file.numpy().ravel()).shape[0]
301
+ average_DSC = np.zeros((num_class-1))
302
+ average_HD = np.zeros((num_class-1))
303
+ k = 0
304
+ for i in glob.glob(os.path.join(base, pred_output_path) + '/*nii.gz'):
305
+ k += 1
306
+ pred_img = ants.image_read(i)
307
+ pred_spacing = list(pred_img.spacing)
308
+ if reg and seg_type == 'ET':
309
+ file_name = os.path.basename(i).split('.')[0].split('_')[4] + '_' + os.path.basename(
310
+ i).split('.')[0].split('_')[5] + '_' + os.path.basename(i).split('.')[0].split('_')[6]
311
+ file_name1 = os.path.basename(i).split('.')[0]
312
+ elif reg and seg_type == 'NC':
313
+ file_name = os.path.basename(i).split(
314
+ '.')[0].split('_')[3] + '_' + os.path.basename(i).split('.')[0].split('_')[4]
315
+ file_name1 = os.path.basename(i).split('.')[0]
316
+ elif reg and seg_type == 'HT':
317
+ file_name = os.path.basename(i).split('.')[0].split('_')[2]
318
+ file_name1 = os.path.basename(i).split('.')[0]
319
+ else:
320
+ file_name = os.path.basename(i).split('.')[0]
321
+ file_name1 = os.path.basename(i).split('.')[0]
322
+ gt_seg = os.path.join(base, gt_output_path, file_name + '.nii.gz')
323
+ gt_img = ants.image_read(gt_seg)
324
+ gt_spacing = list(gt_img.spacing)
325
+
326
+ if gt_spacing != pred_spacing:
327
+ print(
328
+ "Spacing of prediction and ground_truth is not matched, please check again !!!")
329
+ return
330
+
331
+ ref = pred_img
332
+ data_ref = ref.numpy()
333
+
334
+ pred = gt_img
335
+ data_pred = pred.numpy()
336
+
337
+ num_class = len(np.unique(data_pred))
338
+ ds, aver_DSC, aver_HD = dice_coefficient_and_hausdorff_distance(
339
+ file_name1, data_ref, data_pred, num_class, pred_spacing, probability_map, dsc, ahd, whd, average_DSC, average_HD)
340
+ DSC = pd.concat([DSC, ds])
341
+ average_DSC = aver_DSC
342
+ average_HD = aver_HD
343
+
344
+ avg_DSC = average_DSC / k
345
+ avg_HD = average_HD / k
346
+ print(avg_DSC)
347
+ with open(os.path.join(base, save_path, "metric.txt"), 'w') as f:
348
+ f.write("Label Value Label Name Average Dice Score Average Mean HD\n")
349
+ for i in range(len(avg_DSC)):
350
+ f.write(f'{str(i+1):^12}{str(label_list[2*i+1]):^12}{str(avg_DSC[i]):^20}{str(avg_HD[i]):^18}\n')
351
+ DSC.to_csv(filepath)
352
+
353
+
354
+ if __name__ == '__main__':
355
+ main()
deepatlas/metrics/surface_distance.py ADDED
@@ -0,0 +1,424 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2018 Google Inc. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS-IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language
13
+ from __future__ import absolute_import
14
+ from __future__ import division
15
+ from __future__ import print_function
16
+
17
+ import lookup_tables # pylint: disable=relative-beyond-top-level
18
+ import numpy as np
19
+ from scipy import ndimage
20
+
21
+ """
22
+
23
+ surface_distance.py
24
+
25
+ all of the surface_distance functions are borrowed from DeepMind surface_distance repository
26
+
27
+ """
28
+ def _assert_is_numpy_array(name, array):
29
+ """Raises an exception if `array` is not a numpy array."""
30
+ if not isinstance(array, np.ndarray):
31
+ raise ValueError("The argument {!r} should be a numpy array, not a "
32
+ "{}".format(name, type(array)))
33
+
34
+
35
+ def _check_nd_numpy_array(name, array, num_dims):
36
+ """Raises an exception if `array` is not a `num_dims`-D numpy array."""
37
+ if len(array.shape) != num_dims:
38
+ raise ValueError("The argument {!r} should be a {}D array, not of "
39
+ "shape {}".format(name, num_dims, array.shape))
40
+
41
+
42
+ def _check_2d_numpy_array(name, array):
43
+ _check_nd_numpy_array(name, array, num_dims=2)
44
+
45
+
46
+ def _check_3d_numpy_array(name, array):
47
+ _check_nd_numpy_array(name, array, num_dims=3)
48
+
49
+
50
+ def _assert_is_bool_numpy_array(name, array):
51
+ _assert_is_numpy_array(name, array)
52
+ if array.dtype != np.bool:
53
+ raise ValueError("The argument {!r} should be a numpy array of type bool, "
54
+ "not {}".format(name, array.dtype))
55
+
56
+
57
+ def _compute_bounding_box(mask):
58
+ """Computes the bounding box of the masks.
59
+ This function generalizes to arbitrary number of dimensions great or equal
60
+ to 1.
61
+ Args:
62
+ mask: The 2D or 3D numpy mask, where '0' means background and non-zero means
63
+ foreground.
64
+ Returns:
65
+ A tuple:
66
+ - The coordinates of the first point of the bounding box (smallest on all
67
+ axes), or `None` if the mask contains only zeros.
68
+ - The coordinates of the second point of the bounding box (greatest on all
69
+ axes), or `None` if the mask contains only zeros.
70
+ """
71
+ num_dims = len(mask.shape)
72
+ bbox_min = np.zeros(num_dims, np.int64)
73
+ bbox_max = np.zeros(num_dims, np.int64)
74
+
75
+ # max projection to the x0-axis
76
+ proj_0 = np.amax(mask, axis=tuple(range(num_dims))[1:])
77
+ idx_nonzero_0 = np.nonzero(proj_0)[0]
78
+ if len(idx_nonzero_0) == 0: # pylint: disable=g-explicit-length-test
79
+ return None, None
80
+
81
+ bbox_min[0] = np.min(idx_nonzero_0)
82
+ bbox_max[0] = np.max(idx_nonzero_0)
83
+
84
+ # max projection to the i-th-axis for i in {1, ..., num_dims - 1}
85
+ for axis in range(1, num_dims):
86
+ max_over_axes = list(range(num_dims)) # Python 3 compatible
87
+ max_over_axes.pop(axis) # Remove the i-th dimension from the max
88
+ max_over_axes = tuple(max_over_axes) # numpy expects a tuple of ints
89
+ proj = np.amax(mask, axis=max_over_axes)
90
+ idx_nonzero = np.nonzero(proj)[0]
91
+ bbox_min[axis] = np.min(idx_nonzero)
92
+ bbox_max[axis] = np.max(idx_nonzero)
93
+
94
+ return bbox_min, bbox_max
95
+
96
+
97
+ def _crop_to_bounding_box(mask, bbox_min, bbox_max):
98
+ """Crops a 2D or 3D mask to the bounding box specified by `bbox_{min,max}`."""
99
+ # we need to zeropad the cropped region with 1 voxel at the lower,
100
+ # the right (and the back on 3D) sides. This is required to obtain the
101
+ # "full" convolution result with the 2x2 (or 2x2x2 in 3D) kernel.
102
+ # TODO: This is correct only if the object is interior to the
103
+ # bounding box.
104
+ cropmask = np.zeros((bbox_max - bbox_min) + 2, np.uint8)
105
+
106
+ num_dims = len(mask.shape)
107
+ # pyformat: disable
108
+ if num_dims == 2:
109
+ cropmask[0:-1, 0:-1] = mask[bbox_min[0]:bbox_max[0] + 1,
110
+ bbox_min[1]:bbox_max[1] + 1]
111
+ elif num_dims == 3:
112
+ cropmask[0:-1, 0:-1, 0:-1] = mask[bbox_min[0]:bbox_max[0] + 1,
113
+ bbox_min[1]:bbox_max[1] + 1,
114
+ bbox_min[2]:bbox_max[2] + 1]
115
+ # pyformat: enable
116
+ else:
117
+ assert False
118
+
119
+ return cropmask
120
+
121
+
122
+ def _sort_distances_surfels(distances, surfel_areas):
123
+ """Sorts the two list with respect to the tuple of (distance, surfel_area).
124
+ Args:
125
+ distances: The distances from A to B (e.g. `distances_gt_to_pred`).
126
+ surfel_areas: The surfel areas for A (e.g. `surfel_areas_gt`).
127
+ Returns:
128
+ A tuple of the sorted (distances, surfel_areas).
129
+ """
130
+ sorted_surfels = np.array(sorted(zip(distances, surfel_areas)))
131
+ return sorted_surfels[:, 0], sorted_surfels[:, 1]
132
+
133
+
134
+ def compute_surface_distances(mask_gt,
135
+ mask_pred,
136
+ spacing_mm):
137
+ """Computes closest distances from all surface points to the other surface.
138
+ This function can be applied to 2D or 3D tensors. For 2D, both masks must be
139
+ 2D and `spacing_mm` must be a 2-element list. For 3D, both masks must be 3D
140
+ and `spacing_mm` must be a 3-element list. The description is done for the 2D
141
+ case, and the formulation for the 3D case is present is parenthesis,
142
+ introduced by "resp.".
143
+ Finds all contour elements (resp surface elements "surfels" in 3D) in the
144
+ ground truth mask `mask_gt` and the predicted mask `mask_pred`, computes their
145
+ length in mm (resp. area in mm^2) and the distance to the closest point on the
146
+ other contour (resp. surface). It returns two sorted lists of distances
147
+ together with the corresponding contour lengths (resp. surfel areas). If one
148
+ of the masks is empty, the corresponding lists are empty and all distances in
149
+ the other list are `inf`.
150
+ Args:
151
+ mask_gt: 2-dim (resp. 3-dim) bool Numpy array. The ground truth mask.
152
+ mask_pred: 2-dim (resp. 3-dim) bool Numpy array. The predicted mask.
153
+ spacing_mm: 2-element (resp. 3-element) list-like structure. Voxel spacing
154
+ in x0 anx x1 (resp. x0, x1 and x2) directions.
155
+ Returns:
156
+ A dict with:
157
+ "distances_gt_to_pred": 1-dim numpy array of type float. The distances in mm
158
+ from all ground truth surface elements to the predicted surface,
159
+ sorted from smallest to largest.
160
+ "distances_pred_to_gt": 1-dim numpy array of type float. The distances in mm
161
+ from all predicted surface elements to the ground truth surface,
162
+ sorted from smallest to largest.
163
+ "surfel_areas_gt": 1-dim numpy array of type float. The length of the
164
+ of the ground truth contours in mm (resp. the surface elements area in
165
+ mm^2) in the same order as distances_gt_to_pred.
166
+ "surfel_areas_pred": 1-dim numpy array of type float. The length of the
167
+ of the predicted contours in mm (resp. the surface elements area in
168
+ mm^2) in the same order as distances_gt_to_pred.
169
+ Raises:
170
+ ValueError: If the masks and the `spacing_mm` arguments are of incompatible
171
+ shape or type. Or if the masks are not 2D or 3D.
172
+ """
173
+ # The terms used in this function are for the 3D case. In particular, surface
174
+ # in 2D stands for contours in 3D. The surface elements in 3D correspond to
175
+ # the line elements in 2D.
176
+
177
+ _assert_is_bool_numpy_array("mask_gt", mask_gt)
178
+ _assert_is_bool_numpy_array("mask_pred", mask_pred)
179
+
180
+ if not len(mask_gt.shape) == len(mask_pred.shape) == len(spacing_mm):
181
+ raise ValueError("The arguments must be of compatible shape. Got mask_gt "
182
+ "with {} dimensions ({}) and mask_pred with {} dimensions "
183
+ "({}), while the spacing_mm was {} elements.".format(
184
+ len(mask_gt.shape),
185
+ mask_gt.shape, len(
186
+ mask_pred.shape), mask_pred.shape,
187
+ len(spacing_mm)))
188
+
189
+ num_dims = len(spacing_mm)
190
+ if num_dims == 2:
191
+ _check_2d_numpy_array("mask_gt", mask_gt)
192
+ _check_2d_numpy_array("mask_pred", mask_pred)
193
+
194
+ # compute the area for all 16 possible surface elements
195
+ # (given a 2x2 neighbourhood) according to the spacing_mm
196
+ neighbour_code_to_surface_area = (
197
+ lookup_tables.create_table_neighbour_code_to_contour_length(spacing_mm))
198
+ kernel = lookup_tables.ENCODE_NEIGHBOURHOOD_2D_KERNEL
199
+ full_true_neighbours = 0b1111
200
+ elif num_dims == 3:
201
+ _check_3d_numpy_array("mask_gt", mask_gt)
202
+ _check_3d_numpy_array("mask_pred", mask_pred)
203
+
204
+ # compute the area for all 256 possible surface elements
205
+ # (given a 2x2x2 neighbourhood) according to the spacing_mm
206
+ neighbour_code_to_surface_area = (
207
+ lookup_tables.create_table_neighbour_code_to_surface_area(spacing_mm))
208
+ kernel = lookup_tables.ENCODE_NEIGHBOURHOOD_3D_KERNEL
209
+ full_true_neighbours = 0b11111111
210
+ else:
211
+ raise ValueError("Only 2D and 3D masks are supported, not "
212
+ "{}D.".format(num_dims))
213
+
214
+ # compute the bounding box of the masks to trim the volume to the smallest
215
+ # possible processing subvolume
216
+ bbox_min, bbox_max = _compute_bounding_box(mask_gt | mask_pred)
217
+ # Both the min/max bbox are None at the same time, so we only check one.
218
+ if bbox_min is None:
219
+ return {
220
+ "distances_gt_to_pred": np.array([]),
221
+ "distances_pred_to_gt": np.array([]),
222
+ "surfel_areas_gt": np.array([]),
223
+ "surfel_areas_pred": np.array([]),
224
+ }
225
+
226
+ # crop the processing subvolume.
227
+ cropmask_gt = _crop_to_bounding_box(mask_gt, bbox_min, bbox_max)
228
+ cropmask_pred = _crop_to_bounding_box(mask_pred, bbox_min, bbox_max)
229
+
230
+ # compute the neighbour code (local binary pattern) for each voxel
231
+ # the resulting arrays are spacially shifted by minus half a voxel in each
232
+ # axis.
233
+ # i.e. the points are located at the corners of the original voxels
234
+ neighbour_code_map_gt = ndimage.filters.correlate(
235
+ cropmask_gt.astype(np.uint8), kernel, mode="constant", cval=0)
236
+ neighbour_code_map_pred = ndimage.filters.correlate(
237
+ cropmask_pred.astype(np.uint8), kernel, mode="constant", cval=0)
238
+
239
+ # create masks with the surface voxels
240
+ borders_gt = ((neighbour_code_map_gt != 0) &
241
+ (neighbour_code_map_gt != full_true_neighbours))
242
+ borders_pred = ((neighbour_code_map_pred != 0) &
243
+ (neighbour_code_map_pred != full_true_neighbours))
244
+
245
+ # compute the distance transform (closest distance of each voxel to the
246
+ # surface voxels)
247
+ if borders_gt.any():
248
+ distmap_gt = ndimage.morphology.distance_transform_edt(
249
+ ~borders_gt, sampling=spacing_mm)
250
+ else:
251
+ distmap_gt = np.Inf * np.ones(borders_gt.shape)
252
+
253
+ if borders_pred.any():
254
+ distmap_pred = ndimage.morphology.distance_transform_edt(
255
+ ~borders_pred, sampling=spacing_mm)
256
+ else:
257
+ distmap_pred = np.Inf * np.ones(borders_pred.shape)
258
+
259
+ # compute the area of each surface element
260
+ surface_area_map_gt = neighbour_code_to_surface_area[neighbour_code_map_gt]
261
+ surface_area_map_pred = neighbour_code_to_surface_area[
262
+ neighbour_code_map_pred]
263
+
264
+ # create a list of all surface elements with distance and area
265
+ distances_gt_to_pred = distmap_pred[borders_gt]
266
+ distances_pred_to_gt = distmap_gt[borders_pred]
267
+ surfel_areas_gt = surface_area_map_gt[borders_gt]
268
+ surfel_areas_pred = surface_area_map_pred[borders_pred]
269
+
270
+ # sort them by distance
271
+ if distances_gt_to_pred.shape != (0,):
272
+ distances_gt_to_pred, surfel_areas_gt = _sort_distances_surfels(
273
+ distances_gt_to_pred, surfel_areas_gt)
274
+
275
+ if distances_pred_to_gt.shape != (0,):
276
+ distances_pred_to_gt, surfel_areas_pred = _sort_distances_surfels(
277
+ distances_pred_to_gt, surfel_areas_pred)
278
+
279
+ return {
280
+ "distances_gt_to_pred": distances_gt_to_pred,
281
+ "distances_pred_to_gt": distances_pred_to_gt,
282
+ "surfel_areas_gt": surfel_areas_gt,
283
+ "surfel_areas_pred": surfel_areas_pred,
284
+ }
285
+
286
+
287
+ def compute_average_surface_distance(surface_distances):
288
+ """Returns the average surface distance.
289
+ Computes the average surface distances by correctly taking the area of each
290
+ surface element into account. Call compute_surface_distances(...) before, to
291
+ obtain the `surface_distances` dict.
292
+ Args:
293
+ surface_distances: dict with "distances_gt_to_pred", "distances_pred_to_gt"
294
+ "surfel_areas_gt", "surfel_areas_pred" created by
295
+ compute_surface_distances()
296
+ Returns:
297
+ A tuple with two float values:
298
+ - the average distance (in mm) from the ground truth surface to the
299
+ predicted surface
300
+ - the average distance from the predicted surface to the ground truth
301
+ surface.
302
+ """
303
+ distances_gt_to_pred = surface_distances["distances_gt_to_pred"]
304
+ distances_pred_to_gt = surface_distances["distances_pred_to_gt"]
305
+ surfel_areas_gt = surface_distances["surfel_areas_gt"]
306
+ surfel_areas_pred = surface_distances["surfel_areas_pred"]
307
+ average_distance_gt_to_pred = (
308
+ np.sum(distances_gt_to_pred * surfel_areas_gt) / np.sum(surfel_areas_gt))
309
+ average_distance_pred_to_gt = (
310
+ np.sum(distances_pred_to_gt * surfel_areas_pred) /
311
+ np.sum(surfel_areas_pred))
312
+ return (average_distance_gt_to_pred, average_distance_pred_to_gt)
313
+
314
+
315
+ def compute_robust_hausdorff(surface_distances, percent):
316
+ """Computes the robust Hausdorff distance.
317
+ Computes the robust Hausdorff distance. "Robust", because it uses the
318
+ `percent` percentile of the distances instead of the maximum distance. The
319
+ percentage is computed by correctly taking the area of each surface element
320
+ into account.
321
+ Args:
322
+ surface_distances: dict with "distances_gt_to_pred", "distances_pred_to_gt"
323
+ "surfel_areas_gt", "surfel_areas_pred" created by
324
+ compute_surface_distances()
325
+ percent: a float value between 0 and 100.
326
+ Returns:
327
+ a float value. The robust Hausdorff distance in mm.
328
+ """
329
+ distances_gt_to_pred = surface_distances["distances_gt_to_pred"]
330
+ distances_pred_to_gt = surface_distances["distances_pred_to_gt"]
331
+ surfel_areas_gt = surface_distances["surfel_areas_gt"]
332
+ surfel_areas_pred = surface_distances["surfel_areas_pred"]
333
+ if len(distances_gt_to_pred) > 0: # pylint: disable=g-explicit-length-test
334
+ surfel_areas_cum_gt = np.cumsum(
335
+ surfel_areas_gt) / np.sum(surfel_areas_gt)
336
+ idx = np.searchsorted(surfel_areas_cum_gt, percent/100.0)
337
+ perc_distance_gt_to_pred = distances_gt_to_pred[
338
+ min(idx, len(distances_gt_to_pred)-1)]
339
+ else:
340
+ perc_distance_gt_to_pred = np.Inf
341
+
342
+ if len(distances_pred_to_gt) > 0: # pylint: disable=g-explicit-length-test
343
+ surfel_areas_cum_pred = (np.cumsum(surfel_areas_pred) /
344
+ np.sum(surfel_areas_pred))
345
+ idx = np.searchsorted(surfel_areas_cum_pred, percent/100.0)
346
+ perc_distance_pred_to_gt = distances_pred_to_gt[
347
+ min(idx, len(distances_pred_to_gt)-1)]
348
+ else:
349
+ perc_distance_pred_to_gt = np.Inf
350
+
351
+ return max(perc_distance_gt_to_pred, perc_distance_pred_to_gt)
352
+
353
+
354
+ def compute_surface_overlap_at_tolerance(surface_distances, tolerance_mm):
355
+ """Computes the overlap of the surfaces at a specified tolerance.
356
+ Computes the overlap of the ground truth surface with the predicted surface
357
+ and vice versa allowing a specified tolerance (maximum surface-to-surface
358
+ distance that is regarded as overlapping). The overlapping fraction is
359
+ computed by correctly taking the area of each surface element into account.
360
+ Args:
361
+ surface_distances: dict with "distances_gt_to_pred", "distances_pred_to_gt"
362
+ "surfel_areas_gt", "surfel_areas_pred" created by
363
+ compute_surface_distances()
364
+ tolerance_mm: a float value. The tolerance in mm
365
+ Returns:
366
+ A tuple of two float values. The overlap fraction in [0.0, 1.0] of the
367
+ ground truth surface with the predicted surface and vice versa.
368
+ """
369
+ distances_gt_to_pred = surface_distances["distances_gt_to_pred"]
370
+ distances_pred_to_gt = surface_distances["distances_pred_to_gt"]
371
+ surfel_areas_gt = surface_distances["surfel_areas_gt"]
372
+ surfel_areas_pred = surface_distances["surfel_areas_pred"]
373
+ rel_overlap_gt = (
374
+ np.sum(surfel_areas_gt[distances_gt_to_pred <= tolerance_mm]) /
375
+ np.sum(surfel_areas_gt))
376
+ rel_overlap_pred = (
377
+ np.sum(surfel_areas_pred[distances_pred_to_gt <= tolerance_mm]) /
378
+ np.sum(surfel_areas_pred))
379
+ return (rel_overlap_gt, rel_overlap_pred)
380
+
381
+
382
+ def compute_surface_dice_at_tolerance(surface_distances, tolerance_mm):
383
+ """Computes the _surface_ DICE coefficient at a specified tolerance.
384
+ Computes the _surface_ DICE coefficient at a specified tolerance. Not to be
385
+ confused with the standard _volumetric_ DICE coefficient. The surface DICE
386
+ measures the overlap of two surfaces instead of two volumes. A surface
387
+ element is counted as overlapping (or touching), when the closest distance to
388
+ the other surface is less or equal to the specified tolerance. The DICE
389
+ coefficient is in the range between 0.0 (no overlap) to 1.0 (perfect overlap).
390
+ Args:
391
+ surface_distances: dict with "distances_gt_to_pred", "distances_pred_to_gt"
392
+ "surfel_areas_gt", "surfel_areas_pred" created by
393
+ compute_surface_distances()
394
+ tolerance_mm: a float value. The tolerance in mm
395
+ Returns:
396
+ A float value. The surface DICE coefficient in [0.0, 1.0].
397
+ """
398
+ distances_gt_to_pred = surface_distances["distances_gt_to_pred"]
399
+ distances_pred_to_gt = surface_distances["distances_pred_to_gt"]
400
+ surfel_areas_gt = surface_distances["surfel_areas_gt"]
401
+ surfel_areas_pred = surface_distances["surfel_areas_pred"]
402
+ overlap_gt = np.sum(surfel_areas_gt[distances_gt_to_pred <= tolerance_mm])
403
+ overlap_pred = np.sum(
404
+ surfel_areas_pred[distances_pred_to_gt <= tolerance_mm])
405
+ surface_dice = (overlap_gt + overlap_pred) / (
406
+ np.sum(surfel_areas_gt) + np.sum(surfel_areas_pred))
407
+ return surface_dice
408
+
409
+
410
+ def compute_dice_coefficient(mask_gt, mask_pred):
411
+ """Computes soerensen-dice coefficient.
412
+ compute the soerensen-dice coefficient between the ground truth mask `mask_gt`
413
+ and the predicted mask `mask_pred`.
414
+ Args:
415
+ mask_gt: 3-dim Numpy array of type bool. The ground truth mask.
416
+ mask_pred: 3-dim Numpy array of type bool. The predicted mask.
417
+ Returns:
418
+ the dice coeffcient as float. If both masks are empty, the result is NaN.
419
+ """
420
+ volume_sum = mask_gt.sum() + mask_pred.sum()
421
+ if volume_sum == 0:
422
+ return np.NaN
423
+ volume_intersect = (mask_gt & mask_pred).sum()
424
+ return 2*volume_intersect / volume_sum
deepatlas/network/network.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import monai
2
+ import torch
3
+ import itk
4
+ import numpy as np
5
+ import matplotlib.pyplot as plt
6
+ from typing import Optional, Sequence, Tuple, Union
7
+ from monai.networks.layers.factories import Act, Norm
8
+
9
+
10
+ def segNet(
11
+ spatial_dim: int,
12
+ in_channel: int,
13
+ out_channel: int,
14
+ channel: Sequence[int],
15
+ stride: Sequence[int],
16
+ num_res_unit: int = 0,
17
+ acts: Union[Tuple, str] = Act.PRELU,
18
+ norms: Union[Tuple, str] = Norm.INSTANCE,
19
+ dropouts: float = 0.0,
20
+ ):
21
+ seg_net = monai.networks.nets.UNet(
22
+ spatial_dims=spatial_dim, # spatial dims
23
+ in_channels=in_channel, # input channels
24
+ out_channels=out_channel, # output channels
25
+ channels=channel, # channel sequence
26
+ strides=stride, # convolutional strides
27
+ dropout=dropouts,
28
+ act=acts,
29
+ norm=norms,
30
+ num_res_units=num_res_unit
31
+ )
32
+ return seg_net
33
+
34
+ def regNet(
35
+ spatial_dim: int,
36
+ in_channel: int,
37
+ out_channel: int,
38
+ channel: Sequence[int],
39
+ stride: Sequence[int],
40
+ num_res_unit: int = 0,
41
+ acts: Union[Tuple, str] = Act.PRELU,
42
+ norms: Union[Tuple, str] = Norm.INSTANCE,
43
+ dropouts: float = 0.0,
44
+ ):
45
+ reg_net = monai.networks.nets.UNet(
46
+ spatial_dims=spatial_dim, # spatial dims
47
+ in_channels=in_channel, # input channels
48
+ out_channels=out_channel, # output channels
49
+ channels=channel, # channel sequence
50
+ strides=stride, # convolutional strides
51
+ dropout=dropouts,
52
+ act=acts,
53
+ norm=norms,
54
+ num_res_units=num_res_unit
55
+ )
56
+ return reg_net
deepatlas/preprocess/crop.py ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import glob
3
+ import ants
4
+ import nibabel as nib
5
+ import os
6
+ import argparse
7
+ import sys
8
+
9
+
10
+ def parse_command_line():
11
+ parser = argparse.ArgumentParser(
12
+ description='pipeline for data preprocessing')
13
+ parser.add_argument('-bp', metavar='base path', type=str,
14
+ help="absolute path of the base directory")
15
+ parser.add_argument('-ip', metavar='image path', type=str,
16
+ help="relative path of the image directory")
17
+ parser.add_argument('-sp', metavar='segmentation path', type=str,
18
+ help="relative path of the image directory")
19
+ parser.add_argument('-op', metavar='preprocessing result output path', type=str, default='output',
20
+ help='relative path of the preprocessing result directory')
21
+ parser.add_argument('-rs', metavar='shape after resizing', type=int, nargs='+',
22
+ help='shape after resizing the image and segmentation. Expected to be 2^N')
23
+ argv = parser.parse_args()
24
+ return argv
25
+
26
+
27
+ def pad(raw_image, bound_x, bound_y, bound_z, resize, seg=False):
28
+ diff_x = resize[0] - (bound_x[1]-bound_x[0])
29
+ diff_y = resize[1] - (bound_y[1]-bound_y[0])
30
+ diff_z = resize[2] - (bound_z[1]-bound_z[0])
31
+ #print(diff_x, diff_y, diff_z)
32
+ if diff_x < 0 or diff_y < 0 or diff_z < 0:
33
+ sys.exit(
34
+ 'the dimension of ROI is larger than the resizing dimension, please choose a different padding dimension')
35
+ left_y, right_y = split(diff_y)
36
+ left_z, right_z = split(diff_z)
37
+ left_x, right_x = split(diff_x)
38
+ new_bound_x_left = bound_x[0] - left_x
39
+ new_bound_x_right = bound_x[1] + right_x
40
+ new_bound_y_left = bound_y[0] - left_y
41
+ new_bound_y_right = bound_y[1] + right_y
42
+ new_bound_z_left = bound_z[0] - left_z
43
+ new_bound_z_right = bound_z[1] + right_z
44
+ # check if x_dim out of bounds
45
+ if new_bound_x_left < 0:
46
+ new_bound_x_left = 0
47
+ new_bound_x_right = bound_x[1] + diff_x - bound_x[0]
48
+
49
+ elif new_bound_x_right > raw_image.shape[0]:
50
+ new_bound_x_right = raw_image.shape[0]
51
+ new_bound_x_left = bound_x[0] - \
52
+ (diff_x - (raw_image.shape[0] - bound_x[1]))
53
+ # check if y_dim out of bounds
54
+ if new_bound_y_left < 0:
55
+ new_bound_y_left = 0
56
+ new_bound_y_right = bound_y[1] + diff_y - bound_y[0]
57
+
58
+ elif new_bound_y_right > raw_image.shape[1]:
59
+ new_bound_y_right = raw_image.shape[1]
60
+ new_bound_y_left = bound_y[0] - \
61
+ (diff_y - (raw_image.shape[1] - bound_y[1]))
62
+ # check if z_dim out of bounds
63
+ if new_bound_z_left < 0:
64
+ new_bound_z_left = 0
65
+ new_bound_z_right = bound_z[1] + diff_z - bound_z[0]
66
+
67
+ elif new_bound_z_right > raw_image.shape[2]:
68
+ new_bound_z_right = raw_image.shape[2]
69
+ new_bound_z_left = bound_z[0] - \
70
+ (diff_z - (raw_image.shape[2] - bound_z[1]))
71
+
72
+ assert new_bound_x_right - new_bound_x_left == resize[0]
73
+ assert new_bound_y_right - new_bound_y_left == resize[1]
74
+ assert new_bound_z_right - new_bound_z_left == resize[2]
75
+ if not seg:
76
+ return raw_image[new_bound_x_left:new_bound_x_right, new_bound_y_left:new_bound_y_right, new_bound_z_left:new_bound_z_right]
77
+ else:
78
+ new_seg = np.zeros_like(raw_image)
79
+ new_seg[bound_x[0]:bound_x[1],
80
+ bound_y[0]:bound_y[1], bound_z[0]:bound_z[1]] = raw_image[bound_x[0]:bound_x[1], bound_y[0]:bound_y[1], bound_z[0]:bound_z[1]]
81
+ return new_seg[new_bound_x_left:new_bound_x_right, new_bound_y_left:new_bound_y_right, new_bound_z_left:new_bound_z_right]
82
+
83
+
84
+ def split(distance):
85
+ if distance == 0:
86
+ return 0, 0
87
+
88
+ half_dist = int(distance / 2)
89
+ left = int(half_dist * 0.8)
90
+ right = distance - left
91
+ return left, right
92
+
93
+
94
+ def crop(nib_img, nib_seg, ants_img, ants_seg, resize):
95
+ img = nib_img.get_fdata()
96
+ seg = nib_seg.get_fdata()
97
+ gem = ants.label_geometry_measures(ants_seg, ants_img)
98
+ low_x = min(list(gem.loc[:, 'BoundingBoxLower_x']))
99
+ upp_x = max(list(gem.loc[:, 'BoundingBoxUpper_x']))
100
+ low_y = min(list(gem.loc[:, 'BoundingBoxLower_y']))
101
+ upp_y = max(list(gem.loc[:, 'BoundingBoxUpper_y']))
102
+ low_z = min(list(gem.loc[:, 'BoundingBoxLower_z']))
103
+ upp_z = max(list(gem.loc[:, 'BoundingBoxUpper_z']))
104
+
105
+ #img = MinMax_normalization(img)
106
+ img = Zscore_normalization(img)
107
+
108
+ tuple_x = tuple([low_x, upp_x])
109
+ tuple_y = tuple([low_y, upp_y])
110
+ tuple_z = tuple([low_z, upp_z])
111
+ img = pad(img, tuple_x, tuple_y, tuple_z, resize, seg=False)
112
+ seg = pad(seg, tuple_x, tuple_y, tuple_z, resize, seg=True)
113
+
114
+ return img, seg
115
+
116
+
117
+ def get_geometry_info(seg_path, img_path):
118
+ abs_low_x = np.Inf
119
+ abs_upp_x = -np.Inf
120
+ abs_low_y = np.Inf
121
+ abs_upp_y = -np.Inf
122
+ abs_low_z = np.Inf
123
+ abs_upp_z = -np.Inf
124
+ for i in sorted(glob.glob(os.path.join(img_path, '*.nii.gz'))):
125
+ name = os.path.basename(i)
126
+ if os.path.exists(os.path.join(seg_path, name)):
127
+ seg = ants.image_read(os.path.join(seg_path, name))
128
+ img = ants.image_read(i)
129
+ gem = ants.label_geometry_measures(seg, img)
130
+ low_x = min(list(gem.loc[:, 'BoundingBoxLower_x']))
131
+ upp_x = max(list(gem.loc[:, 'BoundingBoxUpper_x']))
132
+ low_y = min(list(gem.loc[:, 'BoundingBoxLower_y']))
133
+ upp_y = max(list(gem.loc[:, 'BoundingBoxUpper_y']))
134
+ low_z = min(list(gem.loc[:, 'BoundingBoxLower_z']))
135
+ upp_z = max(list(gem.loc[:, 'BoundingBoxUpper_z']))
136
+ if low_x < abs_low_x:
137
+ abs_low_x = low_x
138
+ if upp_x > abs_upp_x:
139
+ abs_upp_x = upp_x
140
+ if low_y < abs_low_y:
141
+ abs_low_y = low_y
142
+ if upp_y > abs_upp_y:
143
+ abs_upp_y = upp_y
144
+ if low_z < abs_low_z:
145
+ abs_low_z = low_z
146
+ if upp_z > abs_upp_z:
147
+ abs_upp_z = upp_z
148
+
149
+ tuple_x = tuple([abs_low_x, abs_upp_x])
150
+ tuple_y = tuple([abs_low_y, abs_upp_y])
151
+ tuple_z = tuple([abs_low_z, abs_upp_z])
152
+ return [tuple_x, tuple_y, tuple_z]
153
+
154
+
155
+ def cropV2(nib_img, ants_img, resize, geo_info):
156
+ img = nib_img.get_fdata()
157
+ img = Zscore_normalization(img)
158
+ tuple_x = geo_info[0]
159
+ tuple_y = geo_info[1]
160
+ tuple_z = geo_info[2]
161
+ img = Zscore_normalization(img)
162
+ img = pad(img, tuple_x, tuple_y, tuple_z, resize, seg=False)
163
+ return img
164
+
165
+
166
+ def MinMax_normalization(scan):
167
+ lb = np.amin(scan)
168
+ ub = np.amax(scan)
169
+ scan = (scan - lb) / (ub - lb)
170
+ return scan
171
+
172
+
173
+ def Zscore_normalization(scan):
174
+ mean = np.mean(scan)
175
+ std = np.std(scan)
176
+ lb = np.percentile(scan, 0.05)
177
+ ub = np.percentile(scan, 99.5)
178
+ scan = np.clip(scan, lb, ub)
179
+ scan = (scan - mean) / std
180
+ return scan
181
+
182
+
183
+ def load_data(img_path, seg_path):
184
+ nib_seg = nib.load(seg_path)
185
+ nib_img = nib.load(img_path)
186
+ ants_seg = ants.image_read(seg_path)
187
+ ants_img = ants.image_read(img_path)
188
+ return nib_img, nib_seg, ants_img, ants_seg
189
+
190
+
191
+ def path_to_id(path):
192
+ ids = []
193
+ for i in glob.glob(path + '/*nii.gz'):
194
+ id = os.path.basename(i).split('.')[0]
195
+ ids.append(id)
196
+ return ids
197
+
198
+
199
+ def save_file(left_img, left_seg, nib_img, nib_seg, output_img, output_seg, scan_id):
200
+ left_img_nii = nib.Nifti1Image(
201
+ left_img, affine=nib_img.affine, header=nib_img.header)
202
+ left_seg_nii = nib.Nifti1Image(
203
+ left_seg, affine=nib_seg.affine, header=nib_seg.header)
204
+ left_img_nii.to_filename(os.path.join(
205
+ output_img, scan_id + '.nii.gz'))
206
+ left_seg_nii.to_filename(os.path.join(
207
+ output_seg, scan_id + '.nii.gz'))
208
+
209
+
210
+ def save_fileV2(left_img, nib_img, output_img, scan_id):
211
+ left_img_nii = nib.Nifti1Image(
212
+ left_img, affine=nib_img.affine, header=nib_img.header)
213
+ left_img_nii.to_filename(os.path.join(
214
+ output_img, scan_id + '.nii.gz'))
215
+
216
+
217
+ def main():
218
+ args = parse_command_line()
219
+ base_path = args.bp
220
+ image_path = os.path.join(base_path, args.ip)
221
+ seg_path = os.path.join(base_path, args.sp)
222
+ output_path = os.path.join(base_path, args.op)
223
+ resize_shape = args.rs
224
+ output_img = os.path.join(output_path, 'images')
225
+ output_seg = os.path.join(output_path, 'labels')
226
+ label_list = path_to_id(seg_path)
227
+ geo_info = get_geometry_info(seg_path, image_path)
228
+ try:
229
+ os.mkdir(output_path)
230
+ except:
231
+ print(f'{output_path} is already existed')
232
+
233
+ try:
234
+ os.mkdir(output_img)
235
+ except:
236
+ print(f'{output_img} is already existed')
237
+
238
+ try:
239
+ os.mkdir(output_seg)
240
+ except:
241
+ print(f'{output_seg} is already existed')
242
+
243
+ for i in sorted(glob.glob(image_path + '/*nii.gz')):
244
+ id = os.path.basename(i).split('.')[0]
245
+ if id in label_list:
246
+ label_path = os.path.join(seg_path, id + '.nii.gz')
247
+ nib_img, nib_seg, ants_img, ants_seg = load_data(i, label_path)
248
+ left_img, left_seg = crop(
249
+ nib_img, nib_seg, ants_img, ants_seg, resize_shape)
250
+ print(
251
+ 'Scan ID: ' + id + f', before cropping: {nib_img.get_fdata().shape}, after cropping and padding the image and seg: {left_img.shape}')
252
+ save_file(left_img, left_seg, nib_img,
253
+ nib_seg, output_img, output_seg, id)
254
+ else:
255
+ nib_img = nib.load(i)
256
+ ant_img = ants.image_read(i)
257
+ outImg = cropV2(nib_img, ant_img, resize_shape, geo_info)
258
+ print(
259
+ 'Scan ID: ' + id + f', before cropping: {nib_img.get_fdata().shape}, after cropping and padding the image: {outImg.shape}')
260
+ save_fileV2(outImg, nib_img, output_img, id)
261
+
262
+
263
+ if __name__ == '__main__':
264
+ main()
deepatlas/preprocess/crop_flip_test.py ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import glob
3
+ import ants
4
+ import nibabel as nib
5
+ import os
6
+ import argparse
7
+ import sys
8
+ from crop import crop, cropV2, save_fileV2
9
+ from pathlib import Path
10
+
11
+ def parse_command_line():
12
+ parser = argparse.ArgumentParser(
13
+ description='pipeline for data preprocessing')
14
+ parser.add_argument('-rs', metavar='shape after resizing', type=int, nargs='+',
15
+ help='shape after resizing the image and segmentation. Expected to be 2^N')
16
+ parser.add_argument('-fp', action='store_true',
17
+ help='check if need to flip the data')
18
+ parser.add_argument('-ti', metavar='task id and name', type=str,
19
+ help='task name and id')
20
+ parser.add_argument('-op', metavar='output path for both registration & crop step', type=str,
21
+ help="should be same name in the registration, crop and final prediction steps")
22
+ argv = parser.parse_args()
23
+ return argv
24
+
25
+ def path_to_id(path):
26
+ ids = []
27
+ for i in glob.glob(path + '/*nii.gz'):
28
+ id = os.path.basename(i).split('.')[0]
29
+ ids.append(id)
30
+ return ids
31
+
32
+ def pad(raw_image, bound_x, bound_y, bound_z, resize, seg=False):
33
+ diff_x = resize[0] - (bound_x[1]-bound_x[0])
34
+ diff_y = resize[1] - (bound_y[1]-bound_y[0])
35
+ diff_z = resize[2] - (bound_z[1]-bound_z[0])
36
+ if diff_x < 0 or diff_y < 0 or diff_z < 0:
37
+ sys.exit(
38
+ 'the dimension of ROI is larger than the resizing dimension, please choose a different padding dimension')
39
+ left_y, right_y = split(diff_y)
40
+ left_z, right_z = split(diff_z)
41
+ left_x, right_x = split(diff_x)
42
+ new_bound_x_left = bound_x[0] - left_x
43
+ new_bound_x_right = bound_x[1] + right_x
44
+ new_bound_y_left = bound_y[0] - left_y
45
+ new_bound_y_right = bound_y[1] + right_y
46
+ new_bound_z_left = bound_z[0] - left_z
47
+ new_bound_z_right = bound_z[1] + right_z
48
+ # check if x_dim out of bounds
49
+ if new_bound_x_left < 0:
50
+ new_bound_x_left = 0
51
+ new_bound_x_right = bound_x[1] + diff_x - bound_x[0]
52
+
53
+ elif new_bound_x_right > raw_image.shape[0]:
54
+ new_bound_x_right = raw_image.shape[0]
55
+ new_bound_x_left = bound_x[0] - \
56
+ (diff_x - (raw_image.shape[0] - bound_x[1]))
57
+ # check if y_dim out of bounds
58
+ if new_bound_y_left < 0:
59
+ new_bound_y_left = 0
60
+ new_bound_y_right = bound_y[1] + diff_y - bound_y[0]
61
+
62
+ elif new_bound_y_right > raw_image.shape[1]:
63
+ new_bound_y_right = raw_image.shape[1]
64
+ new_bound_y_left = bound_y[0] - \
65
+ (diff_y - (raw_image.shape[1] - bound_y[1]))
66
+ # check if z_dim out of bounds
67
+ if new_bound_z_left < 0:
68
+ new_bound_z_left = 0
69
+ new_bound_z_right = bound_z[1] + diff_z - bound_z[0]
70
+
71
+ elif new_bound_z_right > raw_image.shape[2]:
72
+ new_bound_z_right = raw_image.shape[2]
73
+ new_bound_z_left = bound_z[0] - \
74
+ (diff_z - (raw_image.shape[2] - bound_z[1]))
75
+
76
+ assert new_bound_x_right - new_bound_x_left == resize[0]
77
+ assert new_bound_y_right - new_bound_y_left == resize[1]
78
+ assert new_bound_z_right - new_bound_z_left == resize[2]
79
+ if not seg:
80
+ return raw_image[new_bound_x_left:new_bound_x_right, new_bound_y_left:new_bound_y_right, new_bound_z_left:new_bound_z_right]
81
+ else:
82
+ new_seg = np.zeros_like(raw_image)
83
+ new_seg[bound_x[0]:bound_x[1],
84
+ bound_y[0]:bound_y[1], bound_z[0]:bound_z[1]] = raw_image[bound_x[0]:bound_x[1], bound_y[0]:bound_y[1], bound_z[0]:bound_z[1]]
85
+ return new_seg[new_bound_x_left:new_bound_x_right, new_bound_y_left:new_bound_y_right, new_bound_z_left:new_bound_z_right]
86
+
87
+
88
+ def split(distance):
89
+ if distance == 0:
90
+ return 0, 0
91
+
92
+ half_dist = int(distance / 2)
93
+ left = int(half_dist * 0.8)
94
+ right = distance - left
95
+ return left, right
96
+
97
+
98
+ def crop_and_flip(nib_img, nib_seg, ants_img, ants_seg, resize):
99
+ img = nib_img.get_fdata()
100
+ seg = nib_seg.get_fdata()
101
+ gem = ants.label_geometry_measures(ants_seg, ants_img)
102
+ low_x = min(list(gem.loc[:, 'BoundingBoxLower_x']))
103
+ upp_x = max(list(gem.loc[:, 'BoundingBoxUpper_x']))
104
+ low_y = min(list(gem.loc[:, 'BoundingBoxLower_y']))
105
+ upp_y = max(list(gem.loc[:, 'BoundingBoxUpper_y']))
106
+ low_z = min(list(gem.loc[:, 'BoundingBoxLower_z']))
107
+ upp_z = max(list(gem.loc[:, 'BoundingBoxUpper_z']))
108
+
109
+ img = Zscore_normalization(img)
110
+ #img = MinMax_normalization(img)
111
+ # Compute mid point
112
+ mid_x = int((low_x + upp_x) / 2)
113
+
114
+ tuple_x_left = tuple([low_x, mid_x])
115
+ tuple_x_right = tuple([mid_x, upp_x])
116
+ tuple_y = tuple([low_y, upp_y])
117
+ tuple_z = tuple([low_z, upp_z])
118
+ left_img = pad(img, tuple_x_left, tuple_y, tuple_z, resize, seg=False)
119
+ left_seg = pad(seg, tuple_x_left, tuple_y, tuple_z, resize, seg=True)
120
+ right_img = pad(img, tuple_x_right, tuple_y, tuple_z, resize, seg=False)
121
+ right_seg = pad(seg, tuple_x_right, tuple_y, tuple_z, resize, seg=True)
122
+ flipped_right_img = np.flip(right_img, axis=0)
123
+ flipped_right_seg = np.flip(right_seg, axis=0)
124
+
125
+ return left_img, left_seg, flipped_right_img, flipped_right_seg
126
+
127
+
128
+ def crop_and_flip_V2(nib_img, ants_img, resize, geo_info):
129
+ img = nib_img.get_fdata()
130
+ tuple_x = geo_info[0]
131
+ tuple_y = geo_info[1]
132
+ tuple_z = geo_info[2]
133
+ low_x = tuple_x[0]
134
+ upp_x = tuple_x[1]
135
+ img = Zscore_normalization(img)
136
+ #img = MinMax_normalization(img)
137
+ # Compute mid point
138
+ mid_x = int((low_x + upp_x) / 2)
139
+
140
+ tuple_x_left = tuple([low_x, mid_x])
141
+ tuple_x_right = tuple([mid_x, upp_x])
142
+
143
+ left_img = pad(img, tuple_x_left, tuple_y, tuple_z, resize, seg=False)
144
+ right_img = pad(img, tuple_x_right, tuple_y, tuple_z, resize, seg=False)
145
+ flipped_right_img = np.flip(right_img, axis=0)
146
+
147
+ return left_img, flipped_right_img
148
+
149
+
150
+ def MinMax_normalization(scan):
151
+ lb = np.amin(scan)
152
+ ub = np.amax(scan)
153
+ scan = (scan - lb) / (ub - lb)
154
+ return scan
155
+
156
+
157
+ def Zscore_normalization(scan):
158
+ mean = np.mean(scan)
159
+ std = np.std(scan)
160
+ lb = np.percentile(scan, 0.05)
161
+ ub = np.percentile(scan, 99.5)
162
+ scan = np.clip(scan, lb, ub)
163
+ scan = (scan - mean) / std
164
+ return scan
165
+
166
+
167
+ def load_data(img_path, seg_path):
168
+ nib_seg = nib.load(seg_path)
169
+ nib_img = nib.load(img_path)
170
+ ants_seg = ants.image_read(seg_path)
171
+ ants_img = ants.image_read(img_path)
172
+ return nib_img, nib_seg, ants_img, ants_seg
173
+
174
+
175
+ def crop_flip_save_file(left_img, left_seg, flipped_right_img, flipped_right_seg, nib_img, nib_seg, output_img, output_seg, scan_id):
176
+ left_img_nii = nib.Nifti1Image(
177
+ left_img, affine=nib_img.affine, header=nib_img.header)
178
+ left_seg_nii = nib.Nifti1Image(
179
+ left_seg, affine=nib_seg.affine, header=nib_seg.header)
180
+ right_img_nii = nib.Nifti1Image(
181
+ flipped_right_img, affine=nib_img.affine, header=nib_img.header)
182
+ right_seg_nii = nib.Nifti1Image(
183
+ flipped_right_seg, affine=nib_seg.affine, header=nib_seg.header)
184
+ left_img_nii.to_filename(os.path.join(
185
+ output_img, 'right_' + scan_id + '.nii.gz'))
186
+ left_seg_nii.to_filename(os.path.join(
187
+ output_seg, 'right_' + scan_id + '.nii.gz'))
188
+ right_img_nii.to_filename(os.path.join(
189
+ output_img, 'left_' + scan_id + '.nii.gz'))
190
+ right_seg_nii.to_filename(os.path.join(
191
+ output_seg, 'left_' + scan_id + '.nii.gz'))
192
+
193
+ def get_geometry_info(seg_path, img_path):
194
+ template = (glob.glob(seg_path + '/*nii.gz'))[0]
195
+ template_id = os.path.basename(template).split('.')[0]
196
+ img = ants.image_read(os.path.join(img_path, template_id + '.nii.gz'))
197
+ seg = ants.image_read(template)
198
+ gem = ants.label_geometry_measures(seg, img)
199
+ low_x = min(list(gem.loc[:, 'BoundingBoxLower_x']))
200
+ upp_x = max(list(gem.loc[:, 'BoundingBoxUpper_x']))
201
+ low_y = min(list(gem.loc[:, 'BoundingBoxLower_y']))
202
+ upp_y = max(list(gem.loc[:, 'BoundingBoxUpper_y']))
203
+ low_z = min(list(gem.loc[:, 'BoundingBoxLower_z']))
204
+ upp_z = max(list(gem.loc[:, 'BoundingBoxUpper_z']))
205
+ tuple_x = tuple([low_x, upp_x])
206
+ tuple_y = tuple([low_y, upp_y])
207
+ tuple_z = tuple([low_z, upp_z])
208
+ return [tuple_x, tuple_y, tuple_z]
209
+
210
+
211
+ def crop_flip_save_file_V2(left_img, flipped_right_img, nib_img, output_img, scan_id):
212
+ left_img_nii = nib.Nifti1Image(
213
+ left_img, affine=nib_img.affine, header=nib_img.header)
214
+ right_img_nii = nib.Nifti1Image(
215
+ flipped_right_img, affine=nib_img.affine, header=nib_img.header)
216
+ left_img_nii.to_filename(os.path.join(
217
+ output_img, 'right_' + scan_id + '.nii.gz'))
218
+ right_img_nii.to_filename(os.path.join(
219
+ output_img, 'left_' + scan_id + '.nii.gz'))
220
+
221
+ def crop_save_file(left_img, left_seg, nib_img, nib_seg, output_img, output_seg, scan_id):
222
+ left_img_nii = nib.Nifti1Image(
223
+ left_img, affine=nib_img.affine, header=nib_img.header)
224
+ left_seg_nii = nib.Nifti1Image(
225
+ left_seg, affine=nib_seg.affine, header=nib_seg.header)
226
+ left_img_nii.to_filename(os.path.join(
227
+ output_img, scan_id + '.nii.gz'))
228
+ left_seg_nii.to_filename(os.path.join(
229
+ output_seg, scan_id + '.nii.gz'))
230
+
231
+
232
+ def main():
233
+ ROOT_DIR = str(Path(os.getcwd()).parent.parent.absolute())
234
+ args = parse_command_line()
235
+ resize_shape = args.rs
236
+ flipped = args.fp
237
+ deepatlas_path = ROOT_DIR
238
+ task_id = args.ti
239
+ base_path = os.path.join(deepatlas_path, 'deepatlas_raw_data_base', task_id, 'customize_test_data', args.op)
240
+ image_path = os.path.join(base_path, 'images')
241
+ seg_path = os.path.join(base_path, 'labels')
242
+ task_path = os.path.join(deepatlas_path, 'deepatlas_preprocessed', task_id)
243
+ output_data_path = os.path.join(task_path, 'customize_test_data')
244
+ out_data_path = os.path.join(output_data_path, args.op)
245
+ output_img = os.path.join(out_data_path, 'images')
246
+ output_seg = os.path.join(out_data_path, 'labels')
247
+ label_list = path_to_id(seg_path)
248
+ geo_info = get_geometry_info(seg_path, image_path)
249
+ try:
250
+ os.mkdir(output_data_path)
251
+ except:
252
+ print(f'{output_data_path} is already existed')
253
+
254
+ try:
255
+ os.mkdir(out_data_path)
256
+ except:
257
+ print(f'{out_data_path} is already existed')
258
+
259
+ try:
260
+ os.mkdir(output_img)
261
+ except:
262
+ print(f'{output_img} is already existed')
263
+
264
+ try:
265
+ os.mkdir(output_seg)
266
+ except:
267
+ print(f'{output_seg} is already existed')
268
+
269
+ for i in sorted(glob.glob(image_path + '/*nii.gz')):
270
+ id = os.path.basename(i).split('.')[0]
271
+ if id in label_list:
272
+ label_path = os.path.join(seg_path, id + '.nii.gz')
273
+ nib_img, nib_seg, ants_img, ants_seg = load_data(i, label_path)
274
+ if flipped:
275
+ left_img, left_seg, flipped_right_img, flipped_right_seg = crop_and_flip(
276
+ nib_img, nib_seg, ants_img, ants_seg, resize_shape)
277
+ print(
278
+ 'Scan ID: ' + id + f', img & seg before cropping: {nib_img.get_fdata().shape}, after cropping, flipping and padding: {left_img.shape} and {flipped_right_img.shape}')
279
+ crop_flip_save_file(left_img, left_seg, flipped_right_img, flipped_right_seg,
280
+ nib_img, nib_seg, output_img, output_seg, id)
281
+ else:
282
+ left_img, left_seg = crop(
283
+ nib_img, nib_seg, ants_img, ants_seg, resize_shape)
284
+ print(
285
+ 'Scan ID: ' + id + f', img & seg before cropping: {nib_img.get_fdata().shape}, after cropping and padding the image and seg: {left_img.shape}')
286
+ crop_save_file(left_img, left_seg, nib_img,
287
+ nib_seg, output_img, output_seg, id)
288
+ else:
289
+ nib_img = nib.load(i)
290
+ ant_img = ants.image_read(i)
291
+ if flipped:
292
+ left_img, flipped_right_img = crop_and_flip_V2(nib_img, ant_img, resize_shape, geo_info)
293
+ print(
294
+ 'Scan ID: ' + id + f', img before cropping: {nib_img.get_fdata().shape}, after cropping, flipping and padding: {left_img.shape} and {flipped_right_img.shape}')
295
+ crop_flip_save_file_V2(left_img, flipped_right_img, nib_img, output_img, id)
296
+ else:
297
+ outImg = cropV2(nib_img, ant_img, resize_shape, geo_info)
298
+ print(
299
+ 'Scan ID: ' + id + f', img before cropping: {nib_img.get_fdata().shape}, after cropping and padding the image: {outImg.shape}')
300
+ save_fileV2(outImg, nib_img, output_img, id)
301
+
302
+ if __name__ == '__main__':
303
+ main()
deepatlas/preprocess/crop_flip_training.py ADDED
@@ -0,0 +1,332 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import glob
3
+ import ants
4
+ import nibabel as nib
5
+ import os
6
+ import argparse
7
+ import sys
8
+ from crop import crop, cropV2, save_fileV2
9
+ from pathlib import Path
10
+
11
+ def parse_command_line():
12
+ parser = argparse.ArgumentParser(
13
+ description='pipeline for data preprocessing')
14
+ parser.add_argument('-rs', metavar='shape after resizing', type=int, nargs='+',
15
+ help='shape after resizing the image and segmentation. Expected to be 2^N')
16
+ parser.add_argument('-fp', action='store_true',
17
+ help='check if need to flip the data')
18
+ parser.add_argument('-ti', metavar='task id and name', type=str,
19
+ help='task name and id')
20
+ argv = parser.parse_args()
21
+ return argv
22
+
23
+ def path_to_id(path):
24
+ ids = []
25
+ for i in glob.glob(path + '/*nii.gz'):
26
+ id = os.path.basename(i).split('.')[0]
27
+ ids.append(id)
28
+ return ids
29
+
30
+ def pad(raw_image, bound_x, bound_y, bound_z, resize, seg=False):
31
+ diff_x = resize[0] - (bound_x[1]-bound_x[0])
32
+ diff_y = resize[1] - (bound_y[1]-bound_y[0])
33
+ diff_z = resize[2] - (bound_z[1]-bound_z[0])
34
+ if diff_x < 0 or diff_y < 0 or diff_z < 0:
35
+ sys.exit(
36
+ 'the dimension of ROI is larger than the resizing dimension, please choose a different padding dimension')
37
+ left_y, right_y = split(diff_y)
38
+ left_z, right_z = split(diff_z)
39
+ left_x, right_x = split(diff_x)
40
+ new_bound_x_left = bound_x[0] - left_x
41
+ new_bound_x_right = bound_x[1] + right_x
42
+ new_bound_y_left = bound_y[0] - left_y
43
+ new_bound_y_right = bound_y[1] + right_y
44
+ new_bound_z_left = bound_z[0] - left_z
45
+ new_bound_z_right = bound_z[1] + right_z
46
+ # check if x_dim out of bounds
47
+ if new_bound_x_left < 0:
48
+ new_bound_x_left = 0
49
+ new_bound_x_right = bound_x[1] + diff_x - bound_x[0]
50
+
51
+ elif new_bound_x_right > raw_image.shape[0]:
52
+ new_bound_x_right = raw_image.shape[0]
53
+ new_bound_x_left = bound_x[0] - \
54
+ (diff_x - (raw_image.shape[0] - bound_x[1]))
55
+ # check if y_dim out of bounds
56
+ if new_bound_y_left < 0:
57
+ new_bound_y_left = 0
58
+ new_bound_y_right = bound_y[1] + diff_y - bound_y[0]
59
+
60
+ elif new_bound_y_right > raw_image.shape[1]:
61
+ new_bound_y_right = raw_image.shape[1]
62
+ new_bound_y_left = bound_y[0] - \
63
+ (diff_y - (raw_image.shape[1] - bound_y[1]))
64
+ # check if z_dim out of bounds
65
+ if new_bound_z_left < 0:
66
+ new_bound_z_left = 0
67
+ new_bound_z_right = bound_z[1] + diff_z - bound_z[0]
68
+
69
+ elif new_bound_z_right > raw_image.shape[2]:
70
+ new_bound_z_right = raw_image.shape[2]
71
+ new_bound_z_left = bound_z[0] - \
72
+ (diff_z - (raw_image.shape[2] - bound_z[1]))
73
+
74
+ assert new_bound_x_right - new_bound_x_left == resize[0]
75
+ assert new_bound_y_right - new_bound_y_left == resize[1]
76
+ assert new_bound_z_right - new_bound_z_left == resize[2]
77
+ if not seg:
78
+ return raw_image[new_bound_x_left:new_bound_x_right, new_bound_y_left:new_bound_y_right, new_bound_z_left:new_bound_z_right]
79
+ else:
80
+ new_seg = np.zeros_like(raw_image)
81
+ new_seg[bound_x[0]:bound_x[1],
82
+ bound_y[0]:bound_y[1], bound_z[0]:bound_z[1]] = raw_image[bound_x[0]:bound_x[1], bound_y[0]:bound_y[1], bound_z[0]:bound_z[1]]
83
+ return new_seg[new_bound_x_left:new_bound_x_right, new_bound_y_left:new_bound_y_right, new_bound_z_left:new_bound_z_right]
84
+
85
+
86
+ def split(distance):
87
+ if distance == 0:
88
+ return 0, 0
89
+
90
+ half_dist = int(distance / 2)
91
+ left = int(half_dist * 0.8)
92
+ right = distance - left
93
+ return left, right
94
+
95
+
96
+ def crop_and_flip(nib_img, nib_seg, ants_img, ants_seg, resize):
97
+ img = nib_img.get_fdata()
98
+ seg = nib_seg.get_fdata()
99
+ gem = ants.label_geometry_measures(ants_seg, ants_img)
100
+ low_x = min(list(gem.loc[:, 'BoundingBoxLower_x']))
101
+ upp_x = max(list(gem.loc[:, 'BoundingBoxUpper_x']))
102
+ low_y = min(list(gem.loc[:, 'BoundingBoxLower_y']))
103
+ upp_y = max(list(gem.loc[:, 'BoundingBoxUpper_y']))
104
+ low_z = min(list(gem.loc[:, 'BoundingBoxLower_z']))
105
+ upp_z = max(list(gem.loc[:, 'BoundingBoxUpper_z']))
106
+
107
+ img = Zscore_normalization(img)
108
+ #img = MinMax_normalization(img)
109
+ # Compute mid point
110
+ mid_x = int((low_x + upp_x) / 2)
111
+
112
+ tuple_x_left = tuple([low_x, mid_x])
113
+ tuple_x_right = tuple([mid_x, upp_x])
114
+ tuple_y = tuple([low_y, upp_y])
115
+ tuple_z = tuple([low_z, upp_z])
116
+ left_img = pad(img, tuple_x_left, tuple_y, tuple_z, resize, seg=False)
117
+ left_seg = pad(seg, tuple_x_left, tuple_y, tuple_z, resize, seg=True)
118
+ right_img = pad(img, tuple_x_right, tuple_y, tuple_z, resize, seg=False)
119
+ right_seg = pad(seg, tuple_x_right, tuple_y, tuple_z, resize, seg=True)
120
+ flipped_right_img = np.flip(right_img, axis=0)
121
+ flipped_right_seg = np.flip(right_seg, axis=0)
122
+
123
+ return left_img, left_seg, flipped_right_img, flipped_right_seg
124
+
125
+
126
+ def crop_and_flip_V2(nib_img, ants_img, resize, geo_info):
127
+ img = nib_img.get_fdata()
128
+ tuple_x = geo_info[0]
129
+ tuple_y = geo_info[1]
130
+ tuple_z = geo_info[2]
131
+ low_x = tuple_x[0]
132
+ upp_x = tuple_x[1]
133
+ img = Zscore_normalization(img)
134
+ #img = MinMax_normalization(img)
135
+ # Compute mid point
136
+ mid_x = int((low_x + upp_x) / 2)
137
+
138
+ tuple_x_left = tuple([low_x, mid_x])
139
+ tuple_x_right = tuple([mid_x, upp_x])
140
+
141
+ left_img = pad(img, tuple_x_left, tuple_y, tuple_z, resize, seg=False)
142
+ right_img = pad(img, tuple_x_right, tuple_y, tuple_z, resize, seg=False)
143
+ flipped_right_img = np.flip(right_img, axis=0)
144
+
145
+ return left_img, flipped_right_img
146
+
147
+
148
+ def MinMax_normalization(scan):
149
+ lb = np.amin(scan)
150
+ ub = np.amax(scan)
151
+ scan = (scan - lb) / (ub - lb)
152
+ return scan
153
+
154
+
155
+ def Zscore_normalization(scan):
156
+ mean = np.mean(scan)
157
+ std = np.std(scan)
158
+ lb = np.percentile(scan, 0.05)
159
+ ub = np.percentile(scan, 99.5)
160
+ scan = np.clip(scan, lb, ub)
161
+ scan = (scan - mean) / std
162
+ return scan
163
+
164
+
165
+ def load_data(img_path, seg_path):
166
+ nib_seg = nib.load(seg_path)
167
+ nib_img = nib.load(img_path)
168
+ ants_seg = ants.image_read(seg_path)
169
+ ants_img = ants.image_read(img_path)
170
+ return nib_img, nib_seg, ants_img, ants_seg
171
+
172
+
173
+ def crop_flip_save_file(left_img, left_seg, flipped_right_img, flipped_right_seg, nib_img, nib_seg, output_img, output_seg, scan_id):
174
+ left_img_nii = nib.Nifti1Image(
175
+ left_img, affine=nib_img.affine, header=nib_img.header)
176
+ left_seg_nii = nib.Nifti1Image(
177
+ left_seg, affine=nib_seg.affine, header=nib_seg.header)
178
+ right_img_nii = nib.Nifti1Image(
179
+ flipped_right_img, affine=nib_img.affine, header=nib_img.header)
180
+ right_seg_nii = nib.Nifti1Image(
181
+ flipped_right_seg, affine=nib_seg.affine, header=nib_seg.header)
182
+ left_img_nii.to_filename(os.path.join(
183
+ output_img, 'right_' + scan_id + '.nii.gz'))
184
+ left_seg_nii.to_filename(os.path.join(
185
+ output_seg, 'right_' + scan_id + '.nii.gz'))
186
+ right_img_nii.to_filename(os.path.join(
187
+ output_img, 'left_' + scan_id + '.nii.gz'))
188
+ right_seg_nii.to_filename(os.path.join(
189
+ output_seg, 'left_' + scan_id + '.nii.gz'))
190
+
191
+ def get_geometry_info(seg_path, img_path):
192
+ abs_low_x = np.Inf
193
+ abs_upp_x = -np.Inf
194
+ abs_low_y = np.Inf
195
+ abs_upp_y = -np.Inf
196
+ abs_low_z = np.Inf
197
+ abs_upp_z = -np.Inf
198
+ for i in sorted(glob.glob(os.path.join(img_path, '*.nii.gz'))):
199
+ name = os.path.basename(i)
200
+ if os.path.exists(os.path.join(seg_path, name)):
201
+ seg = ants.image_read(os.path.join(seg_path, name))
202
+ img = ants.image_read(i)
203
+ gem = ants.label_geometry_measures(seg, img)
204
+ low_x = min(list(gem.loc[:, 'BoundingBoxLower_x']))
205
+ upp_x = max(list(gem.loc[:, 'BoundingBoxUpper_x']))
206
+ low_y = min(list(gem.loc[:, 'BoundingBoxLower_y']))
207
+ upp_y = max(list(gem.loc[:, 'BoundingBoxUpper_y']))
208
+ low_z = min(list(gem.loc[:, 'BoundingBoxLower_z']))
209
+ upp_z = max(list(gem.loc[:, 'BoundingBoxUpper_z']))
210
+ if low_x < abs_low_x:
211
+ abs_low_x = low_x
212
+ if upp_x > abs_upp_x:
213
+ abs_upp_x = upp_x
214
+ if low_y < abs_low_y:
215
+ abs_low_y = low_y
216
+ if upp_y > abs_upp_y:
217
+ abs_upp_y = upp_y
218
+ if low_z < abs_low_z:
219
+ abs_low_z = low_z
220
+ if upp_z > abs_upp_z:
221
+ abs_upp_z = upp_z
222
+
223
+ tuple_x = tuple([abs_low_x, abs_upp_x])
224
+ tuple_y = tuple([abs_low_y, abs_upp_y])
225
+ tuple_z = tuple([abs_low_z, abs_upp_z])
226
+ return [tuple_x, tuple_y, tuple_z]
227
+
228
+
229
+ def crop_flip_save_file_V2(left_img, flipped_right_img, nib_img, output_img, scan_id):
230
+ left_img_nii = nib.Nifti1Image(
231
+ left_img, affine=nib_img.affine, header=nib_img.header)
232
+ right_img_nii = nib.Nifti1Image(
233
+ flipped_right_img, affine=nib_img.affine, header=nib_img.header)
234
+ left_img_nii.to_filename(os.path.join(
235
+ output_img, 'right_' + scan_id + '.nii.gz'))
236
+ right_img_nii.to_filename(os.path.join(
237
+ output_img, 'left_' + scan_id + '.nii.gz'))
238
+
239
+ def crop_save_file(left_img, left_seg, nib_img, nib_seg, output_img, output_seg, scan_id):
240
+ left_img_nii = nib.Nifti1Image(
241
+ left_img, affine=nib_img.affine, header=nib_img.header)
242
+ left_seg_nii = nib.Nifti1Image(
243
+ left_seg, affine=nib_seg.affine, header=nib_seg.header)
244
+ left_img_nii.to_filename(os.path.join(
245
+ output_img, scan_id + '.nii.gz'))
246
+ left_seg_nii.to_filename(os.path.join(
247
+ output_seg, scan_id + '.nii.gz'))
248
+
249
+
250
+ def main():
251
+ ROOT_DIR = str(Path(os.getcwd()).parent.parent.absolute())
252
+ args = parse_command_line()
253
+ resize_shape = args.rs
254
+ flipped = args.fp
255
+ deepatlas_path = ROOT_DIR
256
+ task_id = args.ti
257
+ base_path = os.path.join(deepatlas_path, 'deepatlas_raw_data_base', task_id, 'Training_dataset')
258
+ image_path = os.path.join(base_path, 'images')
259
+ seg_path = os.path.join(base_path, 'labels')
260
+ output_path = os.path.join(deepatlas_path, 'deepatlas_preprocessed')
261
+ task_path = os.path.join(deepatlas_path, 'deepatlas_preprocessed', task_id)
262
+ training_data_path = os.path.join(deepatlas_path, 'deepatlas_preprocessed', task_id, 'Training_dataset')
263
+ output_img = os.path.join(deepatlas_path, 'deepatlas_preprocessed', task_id, 'Training_dataset', 'images')
264
+ output_seg = os.path.join(deepatlas_path, 'deepatlas_preprocessed', task_id, 'Training_dataset', 'labels')
265
+ label_list = path_to_id(seg_path)
266
+ geo_info = get_geometry_info(seg_path, image_path)
267
+ print(geo_info)
268
+ try:
269
+ os.mkdir(output_path)
270
+ except:
271
+ print(f'{output_path} is already existed')
272
+
273
+ try:
274
+ os.mkdir(task_path)
275
+ except:
276
+ print(f'{task_path} is already existed')
277
+
278
+ try:
279
+ os.mkdir(training_data_path)
280
+ except:
281
+ print(f"{training_data_path} already exists")
282
+
283
+ try:
284
+ os.mkdir(output_path)
285
+ except:
286
+ print(f'{output_path} is already existed')
287
+
288
+ try:
289
+ os.mkdir(output_img)
290
+ except:
291
+ print(f'{output_img} is already existed')
292
+
293
+ try:
294
+ os.mkdir(output_seg)
295
+ except:
296
+ print(f'{output_seg} is already existed')
297
+
298
+ for i in sorted(glob.glob(image_path + '/*nii.gz')):
299
+ id = os.path.basename(i).split('.')[0]
300
+ if id in label_list:
301
+ label_path = os.path.join(seg_path, id + '.nii.gz')
302
+ nib_img, nib_seg, ants_img, ants_seg = load_data(i, label_path)
303
+ if flipped:
304
+ left_img, left_seg, flipped_right_img, flipped_right_seg = crop_and_flip(
305
+ nib_img, nib_seg, ants_img, ants_seg, resize_shape)
306
+ print(
307
+ 'Scan ID: ' + id + f', img & seg before cropping: {nib_img.get_fdata().shape}, after cropping, flipping and padding: {left_img.shape} and {flipped_right_img.shape}')
308
+ crop_flip_save_file(left_img, left_seg, flipped_right_img, flipped_right_seg,
309
+ nib_img, nib_seg, output_img, output_seg, id)
310
+ else:
311
+ left_img, left_seg = crop(
312
+ nib_img, nib_seg, ants_img, ants_seg, resize_shape)
313
+ print(
314
+ 'Scan ID: ' + id + f', img & seg before cropping: {nib_img.get_fdata().shape}, after cropping and padding the image and seg: {left_img.shape}')
315
+ crop_save_file(left_img, left_seg, nib_img,
316
+ nib_seg, output_img, output_seg, id)
317
+ else:
318
+ nib_img = nib.load(i)
319
+ ant_img = ants.image_read(i)
320
+ if flipped:
321
+ left_img, flipped_right_img = crop_and_flip_V2(nib_img, ant_img, resize_shape, geo_info)
322
+ print(
323
+ 'Scan ID: ' + id + f', img before cropping: {nib_img.get_fdata().shape}, after cropping, flipping and padding: {left_img.shape} and {flipped_right_img.shape}')
324
+ crop_flip_save_file_V2(left_img, flipped_right_img, nib_img, output_img, id)
325
+ else:
326
+ outImg = cropV2(nib_img, ant_img, resize_shape, geo_info)
327
+ print(
328
+ 'Scan ID: ' + id + f', img before cropping: {nib_img.get_fdata().shape}, after cropping and padding the image: {outImg.shape}')
329
+ save_fileV2(outImg, nib_img, output_img, id)
330
+
331
+ if __name__ == '__main__':
332
+ main()
deepatlas/preprocess/generate_info.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ python3 split_data.py RegisteredImageFolderPath RegisteredLabelFolderPath
3
+ Given the parameter as the path to the registered images,
4
+ function creates two folders in the base directory (same level as this script), randomly putting in
5
+ 70 percent of images into the train and 30 percent to the test
6
+ '''
7
+ import os
8
+ import glob
9
+ import random
10
+ import shutil
11
+ from pathlib import Path
12
+ from typing import Tuple
13
+ import numpy as np
14
+ from collections import OrderedDict
15
+ import json
16
+ import argparse
17
+ import sys
18
+ from collections import namedtuple
19
+
20
+ ROOT_DIR = str(Path(os.getcwd()).parent.parent.absolute())
21
+ sys.path.insert(0, os.path.join(ROOT_DIR, 'deepatlas/utils'))
22
+
23
+ from utils import (
24
+ make_if_dont_exist, load_json
25
+ )
26
+ """
27
+ creates a folder at a specified folder path if it does not exists
28
+ folder_path : relative path of the folder (from cur_dir) which needs to be created
29
+ over_write :(default: False) if True overwrite the existing folder
30
+ """
31
+ def parse_command_line():
32
+ print('Parsing Command Line Arguments')
33
+ parser = argparse.ArgumentParser(
34
+ description='pipeline for dataset split')
35
+ parser.add_argument('--config', metavar='path to the configuration file', type=str,
36
+ help='absolute path to the configuration file')
37
+ parser.add_argument('--train_only', action='store_true',
38
+ help='only training or training plus test')
39
+ argv = parser.parse_args()
40
+ return argv
41
+
42
+
43
+ def split(img, seg, seg_path):
44
+ label = []
45
+ unlabel = []
46
+ total = []
47
+ for i in img:
48
+ name = os.path.basename(i)
49
+ seg_name = os.path.join(seg_path, name)
50
+ if seg_name in seg:
51
+ item = {"img": i,
52
+ "seg": seg_name}
53
+ label.append(item)
54
+ else:
55
+ item = {"img": i}
56
+ unlabel.append(item)
57
+
58
+ total.append(item)
59
+ return label, unlabel, total
60
+
61
+ def main():
62
+ random.seed(2938649572)
63
+ ROOT_DIR = str(Path(os.getcwd()).parent.parent.absolute())
64
+ args = parse_command_line()
65
+ config = args.config
66
+ config = load_json(config)
67
+ config = namedtuple("config", config.keys())(*config.values())
68
+ task_id = config.task_name
69
+ k_fold = config.num_fold
70
+ folder_name = config.folder_name
71
+ train_only = args.train_only
72
+ deepatlas_path = ROOT_DIR
73
+ base_path = os.path.join(deepatlas_path, "deepatlas_preprocessed")
74
+ task_path = os.path.join(base_path, task_id)
75
+ img_path = os.path.join(task_path, 'Training_dataset', 'images')
76
+ seg_path = os.path.join(task_path, 'Training_dataset', 'labels')
77
+ image_list = glob.glob(img_path + "/*.nii.gz")
78
+ label_list = glob.glob(seg_path + "/*.nii.gz")
79
+ label, unlabel, total = split(image_list, label_list, seg_path)
80
+ piece_data = {}
81
+ info_path = os.path.join(task_path, 'Training_dataset', 'data_info')
82
+ folder_path = os.path.join(info_path, folder_name)
83
+ make_if_dont_exist(info_path)
84
+ make_if_dont_exist(folder_path)
85
+
86
+ if not train_only:
87
+ # compute number of scans for each fold
88
+ num_images = len(image_list)
89
+ num_each_fold_scan = divmod(num_images, k_fold)[0]
90
+ fold_num_scan = np.repeat(num_each_fold_scan, k_fold)
91
+ num_remain_scan = divmod(num_images, k_fold)[1]
92
+ count = 0
93
+ while num_remain_scan > 0:
94
+ fold_num_scan[count] += 1
95
+ count = (count+1) % k_fold
96
+ num_remain_scan -= 1
97
+
98
+ # compute number of labels for each fold
99
+ num_seg = len(label_list)
100
+ num_each_fold_seg = divmod(num_seg, k_fold)[0]
101
+ fold_num_seg = np.repeat(num_each_fold_seg, k_fold)
102
+ num_remain_seg = divmod(num_seg, k_fold)[1]
103
+ count = 0
104
+ while num_remain_seg > 0:
105
+ fold_num_seg[count] += 1
106
+ count = (count+1) % k_fold
107
+ num_remain_seg -= 1
108
+
109
+ random.shuffle(unlabel)
110
+ random.shuffle(label)
111
+ start_point = 0
112
+ start_point1 = 0
113
+ # select scans for each fold
114
+ for m in range(k_fold):
115
+ piece_data[f'fold_{m+1}'] = label[start_point:start_point+fold_num_seg[m]]
116
+ fold_num_unlabel = fold_num_scan[m] - fold_num_seg[m]
117
+ piece_data[f'fold_{m+1}'].extend(unlabel[start_point1:start_point1+fold_num_unlabel])
118
+ start_point += fold_num_seg[m]
119
+ start_point1 += fold_num_unlabel
120
+
121
+ info_json_path = os.path.join(folder_path, f'info.json')
122
+ else:
123
+ piece_data = total
124
+ info_json_path = os.path.join(folder_path, f'info_train_only.json')
125
+
126
+ with open(info_json_path, 'w') as f:
127
+ json.dump(piece_data, f, indent=4, sort_keys=True)
128
+
129
+ if os.path.exists(info_json_path):
130
+ print("new info json file created!")
131
+
132
+ if __name__ == '__main__':
133
+ main()
deepatlas/preprocess/process_data.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import monai
2
+ import torch
3
+ import itk
4
+ import numpy as np
5
+ import glob
6
+ import os
7
+
8
+
9
+ def path_to_id(path):
10
+ return os.path.basename(path).split('.')[0]
11
+
12
+
13
+ def split_data(img_path, seg_path, num_seg):
14
+ total_img_paths = []
15
+ total_seg_paths = []
16
+ for i in sorted(glob.glob(img_path + '/*.nii.gz')):
17
+ total_img_paths.append(i)
18
+
19
+ for j in sorted(glob.glob(seg_path + '/*.nii.gz')):
20
+ total_seg_paths.append(j)
21
+
22
+ np.random.shuffle(total_img_paths)
23
+ num_train = int(round(len(total_seg_paths)*0.8))
24
+ num_test = len(total_seg_paths) - num_train
25
+ seg_train = total_seg_paths[:num_train]
26
+ seg_test = total_seg_paths[num_train:]
27
+ img_train = []
28
+ img_test = []
29
+ test = []
30
+ train = []
31
+ img_ids = list(map(path_to_id, total_img_paths))
32
+ img_ids1 = img_ids
33
+ total_img_paths1 = total_img_paths
34
+ seg_ids_test = map(path_to_id, seg_test)
35
+ seg_ids_train = map(path_to_id, seg_train)
36
+ for seg_index, seg_id in enumerate(seg_ids_test):
37
+ data_item = {}
38
+ assert seg_id in img_ids
39
+ img_test.append(total_img_paths[img_ids.index(seg_id)])
40
+ data_item['img'] = total_img_paths[img_ids.index(seg_id)]
41
+ total_img_paths1.pop(img_ids1.index(seg_id))
42
+ img_ids1.pop(img_ids1.index(seg_id))
43
+ data_item['seg'] = seg_test[seg_index]
44
+ test.append(data_item)
45
+
46
+ img_train = total_img_paths1
47
+ np.random.shuffle(seg_train)
48
+ if num_seg < len(seg_train):
49
+ seg_train_available = seg_train[:num_seg]
50
+ else:
51
+ seg_train_available = seg_train
52
+ seg_ids = list(map(path_to_id, seg_train_available))
53
+ img_ids = map(path_to_id, img_train)
54
+ for img_index, img_id in enumerate(img_ids):
55
+ data_item = {'img': img_train[img_index]}
56
+ if img_id in seg_ids:
57
+ data_item['seg'] = seg_train_available[seg_ids.index(img_id)]
58
+ train.append(data_item)
59
+
60
+ num_train = len(img_train)
61
+ return train, test, num_train, num_test
62
+
63
+
64
+ def load_seg_dataset(train, valid):
65
+ transform_seg_available = monai.transforms.Compose(
66
+ transforms=[
67
+ monai.transforms.LoadImageD(keys=['img', 'seg'], image_only=True),
68
+ monai.transforms.AddChannelD(keys=['img', 'seg']),
69
+ monai.transforms.SpacingD(keys=['img', 'seg'], pixdim=(1., 1., 1.), mode=('trilinear', 'nearest')),
70
+ monai.transforms.ToTensorD(keys=['img', 'seg'])
71
+ ]
72
+ )
73
+ itk.ProcessObject.SetGlobalWarningDisplay(False)
74
+ dataset_seg_available_train = monai.data.CacheDataset(
75
+ data=train,
76
+ transform=transform_seg_available,
77
+ cache_num=16,
78
+ hash_as_key=True
79
+ )
80
+
81
+ dataset_seg_available_valid = monai.data.CacheDataset(
82
+ data=valid,
83
+ transform=transform_seg_available,
84
+ cache_num=16,
85
+ hash_as_key=True
86
+ )
87
+ return dataset_seg_available_train, dataset_seg_available_valid
88
+
89
+
90
+ def load_reg_dataset(train, valid):
91
+ transform_pair = monai.transforms.Compose(
92
+ transforms=[
93
+ monai.transforms.LoadImageD(
94
+ keys=['img1', 'seg1', 'img2', 'seg2'], image_only=True, allow_missing_keys=True),
95
+ monai.transforms.ToTensorD(
96
+ keys=['img1', 'seg1', 'img2', 'seg2'], allow_missing_keys=True),
97
+ monai.transforms.AddChannelD(
98
+ keys=['img1', 'seg1', 'img2', 'seg2'], allow_missing_keys=True),
99
+ monai.transforms.SpacingD(keys=['img1', 'seg1', 'img2', 'seg2'], pixdim=(1., 1., 1.), mode=(
100
+ 'trilinear', 'nearest', 'trilinear', 'nearest'), allow_missing_keys=True),
101
+ monai.transforms.ConcatItemsD(
102
+ keys=['img1', 'img2'], name='img12', dim=0),
103
+ monai.transforms.DeleteItemsD(keys=['img1', 'img2'])
104
+ ]
105
+ )
106
+ dataset_pairs_train_subdivided = {
107
+ seg_availability: monai.data.CacheDataset(
108
+ data=data_list,
109
+ transform=transform_pair,
110
+ cache_num=32,
111
+ hash_as_key=True
112
+ )
113
+ for seg_availability, data_list in train.items()
114
+ }
115
+
116
+ dataset_pairs_valid_subdivided = {
117
+ seg_availability: monai.data.CacheDataset(
118
+ data=data_list,
119
+ transform=transform_pair,
120
+ cache_num=32,
121
+ hash_as_key=True
122
+ )
123
+ for seg_availability, data_list in valid.items()
124
+ }
125
+ return dataset_pairs_train_subdivided, dataset_pairs_valid_subdivided
126
+
127
+
128
+ def take_data_pairs(data, symmetric=True):
129
+ """Given a list of dicts that have keys for an image and maybe a segmentation,
130
+ return a list of dicts corresponding to *pairs* of images and maybe segmentations.
131
+ Pairs consisting of a repeated image are not included.
132
+ If symmetric is set to True, then for each pair that is included, its reverse is also included"""
133
+ data_pairs = []
134
+ for i in range(len(data)):
135
+ j_limit = len(data) if symmetric else i
136
+ for j in range(j_limit):
137
+ if j == i:
138
+ continue
139
+ d1 = data[i]
140
+ d2 = data[j]
141
+ pair = {
142
+ 'img1': d1['img'],
143
+ 'img2': d2['img']
144
+ }
145
+ if 'seg' in d1.keys():
146
+ pair['seg1'] = d1['seg']
147
+ if 'seg' in d2.keys():
148
+ pair['seg2'] = d2['seg']
149
+ data_pairs.append(pair)
150
+ return data_pairs
151
+
152
+
153
+ def subdivide_list_of_data_pairs(data_pairs_list):
154
+ out_dict = {'00': [], '01': [], '10': [], '11': []}
155
+ for d in data_pairs_list:
156
+ if 'seg1' in d.keys() and 'seg2' in d.keys():
157
+ out_dict['11'].append(d)
158
+ elif 'seg1' in d.keys():
159
+ out_dict['10'].append(d)
160
+ elif 'seg2' in d.keys():
161
+ out_dict['01'].append(d)
162
+ else:
163
+ out_dict['00'].append(d)
164
+ return out_dict
deepatlas/preprocess/registration_test.py ADDED
@@ -0,0 +1,364 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import ants
3
+ import nrrd
4
+ import numpy as np
5
+ import glob
6
+ import slicerio
7
+ import shutil
8
+ import argparse
9
+ from pathlib import Path
10
+
11
+ def parse_command_line():
12
+ print('---'*10)
13
+ print('Parsing Command Line Arguments')
14
+ parser = argparse.ArgumentParser(
15
+ description='pipeline for dataset co-alignment')
16
+ parser.add_argument('-bp', metavar='base path', type=str,
17
+ help="absolute path of the base directory")
18
+ parser.add_argument('-op', metavar='output path for both registration & crop steps', type=str,
19
+ help="relative path of the output directory, should be same name in the registration, crop and final prediction steps")
20
+ parser.add_argument('-template', metavar='template scan path', type=str,
21
+ help="relative path of the template scan directory")
22
+ parser.add_argument('-target_scan', metavar='target scan path', type=str,
23
+ help="relative path of the target image directory")
24
+ parser.add_argument('-target_seg', metavar='target segmentation path', type=str,
25
+ help="relative path of the target segmentation directory")
26
+ parser.add_argument('-sl', metavar='segmentation information list', type=str, nargs='+',
27
+ help='a list of label name and corresponding value')
28
+ parser.add_argument('-ti', metavar='task id and name', type=str,
29
+ help='task name and id')
30
+ argv = parser.parse_args()
31
+ return argv
32
+
33
+
34
+ def split_and_registration(template, target, base, template_images_path, target_images_path, seg_path, img_out_path, seg_out_path, template_fomat, target_fomat, has_label=False):
35
+ print('---'*10)
36
+ print('Creating file paths')
37
+ # Define the path for template, target, and segmentations (from template)
38
+ fixed_path = os.path.join(base, template_images_path, template + '.' + template_fomat)
39
+ moving_path = os.path.join(base, target_images_path, target + '.' + target_fomat)
40
+ images_output = os.path.join(img_out_path, target + '.nii.gz')
41
+ print('---'*10)
42
+ print('Reading in the template and target image')
43
+ # Read the template and target image
44
+ template_image = ants.image_read(fixed_path)
45
+ target_image = ants.image_read(moving_path)
46
+ print('---'*10)
47
+ print('Performing the template and target image registration')
48
+ transform_forward = ants.registration(fixed=template_image, moving=target_image,
49
+ type_of_transform="Similarity", verbose=False)
50
+ if has_label:
51
+ segmentation_path = os.path.join(
52
+ base, seg_path, target + '.nii.gz')
53
+ segmentation_output = os.path.join(
54
+ seg_out_path, target + '.nii.gz')
55
+ print('---'*10)
56
+ print('Reading in the segmentation')
57
+ # Split segmentations into individual components
58
+ segment_target = ants.image_read(segmentation_path)
59
+ print('---'*10)
60
+ print('Applying the transformation for label propagation and image registration')
61
+ predicted_targets_image = ants.apply_transforms(
62
+ fixed=template_image,
63
+ moving=segment_target,
64
+ transformlist=transform_forward["fwdtransforms"],
65
+ interpolator="genericLabel",
66
+ verbose=False)
67
+ predicted_targets_image.to_file(segmentation_output)
68
+
69
+ reg_img = ants.apply_transforms(
70
+ fixed=template_image,
71
+ moving=target_image,
72
+ transformlist=transform_forward["fwdtransforms"],
73
+ interpolator="linear",
74
+ verbose=False)
75
+ print('---'*10)
76
+ print("writing out transformed template segmentation")
77
+ reg_img.to_file(images_output)
78
+ print('Label Propagation & Image Registration complete')
79
+
80
+
81
+ def convert_to_one_hot(data, header, segment_indices=None):
82
+ print('---'*10)
83
+ print("converting to one hot")
84
+
85
+ layer_values = get_layer_values(header)
86
+ label_values = get_label_values(header)
87
+
88
+ # Newer Slicer NRRD (compressed layers)
89
+ if layer_values and label_values:
90
+
91
+ assert len(layer_values) == len(label_values)
92
+ if len(data.shape) == 3:
93
+ x_dim, y_dim, z_dim = data.shape
94
+ elif len(data.shape) == 4:
95
+ x_dim, y_dim, z_dim = data.shape[1:]
96
+
97
+ num_segments = len(layer_values)
98
+ one_hot = np.zeros((num_segments, x_dim, y_dim, z_dim))
99
+
100
+ if segment_indices is None:
101
+ segment_indices = list(range(num_segments))
102
+
103
+ elif isinstance(segment_indices, int):
104
+ segment_indices = [segment_indices]
105
+
106
+ elif not isinstance(segment_indices, list):
107
+ print("incorrectly specified segment indices")
108
+ return
109
+
110
+ # Check if NRRD is composed of one layer 0
111
+ if np.max(layer_values) == 0:
112
+ for i, seg_idx in enumerate(segment_indices):
113
+ layer = layer_values[seg_idx]
114
+ label = label_values[seg_idx]
115
+ one_hot[i] = 1*(data == label).astype(np.uint8)
116
+
117
+ else:
118
+ for i, seg_idx in enumerate(segment_indices):
119
+ layer = layer_values[seg_idx]
120
+ label = label_values[seg_idx]
121
+ one_hot[i] = 1*(data[layer] == label).astype(np.uint8)
122
+
123
+ # Binary labelmap
124
+ elif len(data.shape) == 3:
125
+ x_dim, y_dim, z_dim = data.shape
126
+ num_segments = np.max(data)
127
+ one_hot = np.zeros((num_segments, x_dim, y_dim, z_dim))
128
+
129
+ if segment_indices is None:
130
+ segment_indices = list(range(1, num_segments + 1))
131
+
132
+ elif isinstance(segment_indices, int):
133
+ segment_indices = [segment_indices]
134
+
135
+ elif not isinstance(segment_indices, list):
136
+ print("incorrectly specified segment indices")
137
+ return
138
+
139
+ for i, seg_idx in enumerate(segment_indices):
140
+ one_hot[i] = 1*(data == seg_idx).astype(np.uint8)
141
+
142
+ # Older Slicer NRRD (already one-hot)
143
+ else:
144
+ return data
145
+
146
+ return one_hot
147
+
148
+
149
+ def get_layer_values(header, indices=None):
150
+ layer_values = []
151
+ num_segments = len([key for key in header.keys() if "Layer" in key])
152
+ for i in range(num_segments):
153
+ layer_values.append(int(header['Segment{}_Layer'.format(i)]))
154
+ return layer_values
155
+
156
+
157
+ def get_label_values(header, indices=None):
158
+ label_values = []
159
+ num_segments = len([key for key in header.keys() if "LabelValue" in key])
160
+ for i in range(num_segments):
161
+ label_values.append(int(header['Segment{}_LabelValue'.format(i)]))
162
+ return label_values
163
+
164
+
165
+ def get_num_segments(header, indices=None):
166
+ num_segments = len([key for key in header.keys() if "LabelValue" in key])
167
+ return num_segments
168
+
169
+
170
+ def checkCorrespondence(segmentation, base, paired_list, filename):
171
+ print(filename)
172
+ assert type(paired_list) == list
173
+ data, tempSeg = nrrd.read(os.path.join(base, segmentation, filename))
174
+ seg_info = slicerio.read_segmentation_info(
175
+ os.path.join(base, segmentation, filename))
176
+ output_voxels, output_header = slicerio.extract_segments(
177
+ data, tempSeg, seg_info, paired_list)
178
+ output = os.path.join(base, 'MatchedSegs/' +
179
+ filename)
180
+ nrrd.write(output, output_voxels, output_header)
181
+ print('---'*10)
182
+ print('Check the label names and values')
183
+ print(slicerio.read_segmentation_info(output))
184
+ return output
185
+
186
+
187
+ def checkSegFormat(base, segmentation, paired_list, check=False):
188
+ path = os.path.join(base, segmentation)
189
+ save_dir = os.path.join(base, 're-format_labels')
190
+ try:
191
+ os.mkdir(save_dir)
192
+ except:
193
+ print(f'{save_dir} already exists')
194
+
195
+ for file in os.listdir(path):
196
+ name = file.split('.')[0]
197
+ if file.endswith('seg.nrrd') or file.endswith('nrrd'):
198
+ if check:
199
+ output_path = checkCorrespondence(
200
+ segmentation, base, paired_list, file)
201
+ ants_img = ants.image_read(output_path)
202
+ header = nrrd.read_header(output_path)
203
+ else:
204
+ ants_img = ants.image_read(os.path.join(path, file))
205
+ header = nrrd.read_header(os.path.join(path, file))
206
+ segmentations = True
207
+ filename = os.path.join(save_dir, name + '.nii.gz')
208
+ nrrd2nifti(ants_img, header, filename, segmentations)
209
+ elif file.endswith('nii'):
210
+ image = ants.image_read(os.path.join(path, file))
211
+ image.to_file(os.path.join(save_dir, name + '.nii.gz'))
212
+ elif file.endswith('nii.gz'):
213
+ shutil.copy(os.path.join(path, file), save_dir)
214
+
215
+ return save_dir
216
+
217
+
218
+ def nrrd2nifti(img, header, filename, segmentations=True):
219
+ img_as_np = img.view(single_components=segmentations)
220
+ if segmentations:
221
+ data = convert_to_one_hot(img_as_np, header)
222
+ foreground = np.max(data, axis=0)
223
+ labelmap = np.multiply(np.argmax(data, axis=0) + 1,
224
+ foreground).astype('uint8')
225
+ segmentation_img = ants.from_numpy(
226
+ labelmap, origin=img.origin, spacing=img.spacing, direction=img.direction)
227
+ print('-- Saving NII Segmentations')
228
+ segmentation_img.to_file(filename)
229
+ else:
230
+ print('-- Saving NII Volume')
231
+ img.to_file(filename)
232
+
233
+
234
+ def find_template(base, image_path, fomat):
235
+ scans = sorted(glob.glob(os.path.join(base, image_path) + '/*' + fomat))
236
+ template = os.path.basename(scans[0]).split('.')[0]
237
+ return template
238
+
239
+
240
+ def find_template_V2(base, image_path, fomat):
241
+ maxD = -np.inf
242
+ for i in glob.glob(os.path.join(base, image_path) + '/*' + fomat):
243
+ id = os.path.basename(i).split('.')[0]
244
+ img = ants.image_read(i)
245
+ thirdD = img.shape[2]
246
+ if thirdD > maxD:
247
+ template = id
248
+ maxD = thirdD
249
+
250
+ return template
251
+
252
+
253
+ def path_to_id(path, fomat):
254
+ ids = []
255
+ for i in glob.glob(path + '/*' + fomat):
256
+ id = os.path.basename(i).split('.')[0]
257
+ ids.append(id)
258
+ return ids
259
+
260
+
261
+ def checkFormat(base, images_path):
262
+ path = os.path.join(base, images_path)
263
+ for file in os.listdir(path):
264
+ if file.endswith('.nii'):
265
+ ret = 'nii'
266
+ break
267
+ elif file.endswith('.nii.gz'):
268
+ ret = 'nii.gz'
269
+ break
270
+ elif file.endswith('.nrrd'):
271
+ ret = 'nrrd'
272
+ break
273
+ elif file.endswith('.seg.nrrd'):
274
+ ret = 'seg.nrrd'
275
+ break
276
+ return ret
277
+
278
+
279
+ def main():
280
+ ROOT_DIR = str(Path(os.getcwd()).parent.parent.absolute())
281
+ args = parse_command_line()
282
+ base = args.bp
283
+ template_path = args.template
284
+ target_seg = args.target_seg
285
+ target_scan = args.target_scan
286
+ label_list = args.sl
287
+ task_id = args.ti
288
+ deepatlas_path = ROOT_DIR
289
+ task_path = os.path.join(deepatlas_path, 'deepatlas_raw_data_base', task_id)
290
+ output_data_path = os.path.join(task_path, 'customize_test_data')
291
+ out_data_path = os.path.join(output_data_path, args.op)
292
+ images_output = os.path.join(out_data_path, 'images')
293
+ labels_output = os.path.join(out_data_path, 'labels')
294
+ template_fomat = checkFormat(base, template_path)
295
+ target_fomat = checkFormat(base, target_scan)
296
+ fomat_seg = checkFormat(base, target_seg)
297
+ template = os.path.basename(glob.glob(os.path.join(base, template_path) + '/*' + template_fomat)[0]).split('.')[0]
298
+ label_lists = path_to_id(os.path.join(base, target_seg), fomat_seg)
299
+ if label_list is not None:
300
+ matched_output = os.path.join(base, 'MatchedSegs')
301
+ try:
302
+ os.mkdir(matched_output)
303
+ except:
304
+ print(f"{matched_output} already exists")
305
+
306
+ try:
307
+ os.mkdir(output_data_path)
308
+ except:
309
+ print(f"{output_data_path} already exists")
310
+
311
+ try:
312
+ os.mkdir(out_data_path)
313
+ except:
314
+ print(f"{out_data_path} already exists")
315
+
316
+ try:
317
+ os.mkdir(images_output)
318
+ except:
319
+ print(f"{images_output} already exists")
320
+
321
+ try:
322
+ os.mkdir(labels_output)
323
+ except:
324
+ print(f"{labels_output} already exists")
325
+
326
+ paired_list = []
327
+ if label_list is not None:
328
+ for i in range(0, len(label_list), 2):
329
+ if not label_list[i].isdigit():
330
+ print(
331
+ "Wrong order of input argument for pair-wising label value and its name !!!")
332
+ return
333
+ else:
334
+ value = label_list[i]
335
+ if not label_list[i+1].isdigit():
336
+ key = label_list[i+1]
337
+ ele = tuple((key, value))
338
+ paired_list.append(ele)
339
+ else:
340
+ print(
341
+ "Wrong input argument for pair-wising label value and its name !!!")
342
+ return
343
+
344
+ # print(new_segmentation)
345
+ seg_output_path = checkSegFormat(
346
+ base, target_seg, paired_list, check=True)
347
+
348
+ else:
349
+ seg_output_path = checkSegFormat(
350
+ base, target_seg, paired_list, check=False)
351
+
352
+ for i in sorted(glob.glob(os.path.join(base, target_scan) + '/*' + target_fomat)):
353
+ id = os.path.basename(i).split('.')[0]
354
+ target = id
355
+ if id in label_lists:
356
+ split_and_registration(
357
+ template, target, base, template_path, target_scan, seg_output_path, images_output, labels_output, template_fomat, target_fomat, has_label=True)
358
+ else:
359
+ split_and_registration(
360
+ template, target, base, template_path, target_scan, seg_output_path, images_output, labels_output, template_fomat, target_fomat, has_label=False)
361
+
362
+
363
+ if __name__ == '__main__':
364
+ main()
deepatlas/preprocess/registration_training.py ADDED
@@ -0,0 +1,379 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import ants
3
+ import nrrd
4
+ import numpy as np
5
+ import glob
6
+ import slicerio
7
+ import shutil
8
+ import argparse
9
+ from pathlib import Path
10
+
11
+ def parse_command_line():
12
+ print('---'*10)
13
+ print('Parsing Command Line Arguments')
14
+ parser = argparse.ArgumentParser(
15
+ description='pipeline for dataset co-alignment')
16
+ parser.add_argument('-bp', metavar='base path', type=str,
17
+ help="Absolute path of the base directory")
18
+ parser.add_argument('-ip', metavar='image path', type=str,
19
+ help="Relative path of the image directory")
20
+ parser.add_argument('-sp', metavar='segmentation path', type=str,
21
+ help="Relative path of the image directory")
22
+ parser.add_argument('-sl', metavar='segmentation information list', type=str, nargs='+',
23
+ help='a list of label name and corresponding value')
24
+ parser.add_argument('-ti', metavar='task id and name', type=str,
25
+ help='task name and id')
26
+ argv = parser.parse_args()
27
+ return argv
28
+
29
+
30
+ def split_and_registration(template, target, base, template_images_path, target_images_path, seg_path, img_out_path, seg_out_path, template_fomat, target_fomat, checked=False, has_label=False):
31
+ print('---'*10)
32
+ print('Creating file paths')
33
+ # Define the path for template, target, and segmentations (from template)
34
+ fixed_path = os.path.join(base, template_images_path, template + '.' + template_fomat)
35
+ moving_path = os.path.join(base, target_images_path, target + '.' + target_fomat)
36
+ images_output = os.path.join(img_out_path, target + '.nii.gz')
37
+ print('---'*10)
38
+ print('Reading in the template and target image')
39
+ # Read the template and target image
40
+ template_image = ants.image_read(fixed_path)
41
+ target_image = ants.image_read(moving_path)
42
+ print('---'*10)
43
+ print('Performing the template and target image registration')
44
+ transform_forward = ants.registration(fixed=template_image, moving=target_image,
45
+ type_of_transform="Similarity", verbose=False)
46
+ if has_label:
47
+ segmentation_path = os.path.join(
48
+ base, seg_path, target + '.nii.gz')
49
+ segmentation_output = os.path.join(
50
+ seg_out_path, target + '.nii.gz')
51
+ print('---'*10)
52
+ print('Reading in the segmentation')
53
+ # Split segmentations into individual components
54
+ segment_target = ants.image_read(segmentation_path)
55
+ print('---'*10)
56
+ print('Applying the transformation for label propagation and image registration')
57
+ predicted_targets_image = ants.apply_transforms(
58
+ fixed=template_image,
59
+ moving=segment_target,
60
+ transformlist=transform_forward["fwdtransforms"],
61
+ interpolator="genericLabel",
62
+ verbose=False)
63
+ predicted_targets_image.to_file(segmentation_output)
64
+
65
+ reg_img = ants.apply_transforms(
66
+ fixed=template_image,
67
+ moving=target_image,
68
+ transformlist=transform_forward["fwdtransforms"],
69
+ interpolator="linear",
70
+ verbose=False)
71
+ print('---'*10)
72
+ print("writing out transformed template segmentation")
73
+ reg_img.to_file(images_output)
74
+ print('Label Propagation & Image Registration complete')
75
+
76
+
77
+ def convert_to_one_hot(data, header, segment_indices=None):
78
+ print('---'*10)
79
+ print("converting to one hot")
80
+
81
+ layer_values = get_layer_values(header)
82
+ label_values = get_label_values(header)
83
+
84
+ # Newer Slicer NRRD (compressed layers)
85
+ if layer_values and label_values:
86
+
87
+ assert len(layer_values) == len(label_values)
88
+ if len(data.shape) == 3:
89
+ x_dim, y_dim, z_dim = data.shape
90
+ elif len(data.shape) == 4:
91
+ x_dim, y_dim, z_dim = data.shape[1:]
92
+
93
+ num_segments = len(layer_values)
94
+ one_hot = np.zeros((num_segments, x_dim, y_dim, z_dim))
95
+
96
+ if segment_indices is None:
97
+ segment_indices = list(range(num_segments))
98
+
99
+ elif isinstance(segment_indices, int):
100
+ segment_indices = [segment_indices]
101
+
102
+ elif not isinstance(segment_indices, list):
103
+ print("incorrectly specified segment indices")
104
+ return
105
+
106
+ # Check if NRRD is composed of one layer 0
107
+ if np.max(layer_values) == 0:
108
+ for i, seg_idx in enumerate(segment_indices):
109
+ layer = layer_values[seg_idx]
110
+ label = label_values[seg_idx]
111
+ one_hot[i] = 1*(data == label).astype(np.uint8)
112
+
113
+ else:
114
+ for i, seg_idx in enumerate(segment_indices):
115
+ layer = layer_values[seg_idx]
116
+ label = label_values[seg_idx]
117
+ one_hot[i] = 1*(data[layer] == label).astype(np.uint8)
118
+
119
+ # Binary labelmap
120
+ elif len(data.shape) == 3:
121
+ x_dim, y_dim, z_dim = data.shape
122
+ num_segments = np.max(data)
123
+ one_hot = np.zeros((num_segments, x_dim, y_dim, z_dim))
124
+
125
+ if segment_indices is None:
126
+ segment_indices = list(range(1, num_segments + 1))
127
+
128
+ elif isinstance(segment_indices, int):
129
+ segment_indices = [segment_indices]
130
+
131
+ elif not isinstance(segment_indices, list):
132
+ print("incorrectly specified segment indices")
133
+ return
134
+
135
+ for i, seg_idx in enumerate(segment_indices):
136
+ one_hot[i] = 1*(data == seg_idx).astype(np.uint8)
137
+
138
+ # Older Slicer NRRD (already one-hot)
139
+ else:
140
+ return data
141
+
142
+ return one_hot
143
+
144
+
145
+ def get_layer_values(header, indices=None):
146
+ layer_values = []
147
+ num_segments = len([key for key in header.keys() if "Layer" in key])
148
+ for i in range(num_segments):
149
+ layer_values.append(int(header['Segment{}_Layer'.format(i)]))
150
+ return layer_values
151
+
152
+
153
+ def get_label_values(header, indices=None):
154
+ label_values = []
155
+ num_segments = len([key for key in header.keys() if "LabelValue" in key])
156
+ for i in range(num_segments):
157
+ label_values.append(int(header['Segment{}_LabelValue'.format(i)]))
158
+ return label_values
159
+
160
+
161
+ def get_num_segments(header, indices=None):
162
+ num_segments = len([key for key in header.keys() if "LabelValue" in key])
163
+ return num_segments
164
+
165
+
166
+ def checkCorrespondence(segmentation, base, paired_list, filename):
167
+ print(filename)
168
+ assert type(paired_list) == list
169
+ data, tempSeg = nrrd.read(os.path.join(base, segmentation, filename))
170
+ seg_info = slicerio.read_segmentation_info(
171
+ os.path.join(base, segmentation, filename))
172
+ output_voxels, output_header = slicerio.extract_segments(
173
+ data, tempSeg, seg_info, paired_list)
174
+ output = os.path.join(base, 'MatchedSegs/' +
175
+ filename)
176
+ nrrd.write(output, output_voxels, output_header)
177
+ print('---'*10)
178
+ print('Check the label names and values')
179
+ print(slicerio.read_segmentation_info(output))
180
+ return output
181
+
182
+
183
+ def checkSegFormat(base, segmentation, paired_list, check=False):
184
+ path = os.path.join(base, segmentation)
185
+ save_dir = os.path.join(base, 're-format_labels')
186
+ try:
187
+ os.mkdir(save_dir)
188
+ except:
189
+ print(f'{save_dir} already exists')
190
+
191
+ for file in os.listdir(path):
192
+ name = file.split('.')[0]
193
+ if file.endswith('seg.nrrd') or file.endswith('nrrd'):
194
+ if check:
195
+ output_path = checkCorrespondence(
196
+ segmentation, base, paired_list, file)
197
+ ants_img = ants.image_read(output_path)
198
+ header = nrrd.read_header(output_path)
199
+ else:
200
+ ants_img = ants.image_read(os.path.join(path, file))
201
+ header = nrrd.read_header(os.path.join(path, file))
202
+ segmentations = True
203
+ filename = os.path.join(save_dir, name + '.nii.gz')
204
+ nrrd2nifti(ants_img, header, filename, segmentations)
205
+ elif file.endswith('nii'):
206
+ image = ants.image_read(os.path.join(path, file))
207
+ image.to_file(os.path.join(save_dir, name + '.nii.gz'))
208
+ elif file.endswith('nii.gz'):
209
+ shutil.copy(os.path.join(path, file), save_dir)
210
+
211
+ return save_dir
212
+
213
+
214
+ def nrrd2nifti(img, header, filename, segmentations=True):
215
+ img_as_np = img.view(single_components=segmentations)
216
+ if segmentations:
217
+ data = convert_to_one_hot(img_as_np, header)
218
+ foreground = np.max(data, axis=0)
219
+ labelmap = np.multiply(np.argmax(data, axis=0) + 1,
220
+ foreground).astype('uint8')
221
+ segmentation_img = ants.from_numpy(
222
+ labelmap, origin=img.origin, spacing=img.spacing, direction=img.direction)
223
+ print('-- Saving NII Segmentations')
224
+ segmentation_img.to_file(filename)
225
+ else:
226
+ print('-- Saving NII Volume')
227
+ img.to_file(filename)
228
+
229
+
230
+ def find_template(base, image_path, fomat):
231
+ scans = sorted(glob.glob(os.path.join(base, image_path) + '/*' + fomat))
232
+ template = os.path.basename(scans[0]).split('.')[0]
233
+ return template
234
+
235
+
236
+ def find_template_V2(base, image_path, fomat):
237
+ maxD = -np.inf
238
+ for i in glob.glob(os.path.join(base, image_path) + '/*' + fomat):
239
+ id = os.path.basename(i).split('.')[0]
240
+ img = ants.image_read(i)
241
+ thirdD = img.shape[2]
242
+ if thirdD > maxD:
243
+ template = id
244
+ maxD = thirdD
245
+
246
+ return template
247
+
248
+
249
+ def path_to_id(path, fomat):
250
+ ids = []
251
+ for i in glob.glob(path + '/*' + fomat):
252
+ id = os.path.basename(i).split('.')[0]
253
+ ids.append(id)
254
+ return ids
255
+
256
+
257
+ def checkFormat(base, images_path):
258
+ path = os.path.join(base, images_path)
259
+ for file in os.listdir(path):
260
+ if file.endswith('.nii'):
261
+ ret = 'nii'
262
+ break
263
+ elif file.endswith('.nii.gz'):
264
+ ret = 'nii.gz'
265
+ break
266
+ elif file.endswith('.nrrd'):
267
+ ret = 'nrrd'
268
+ break
269
+ elif file.endswith('.seg.nrrd'):
270
+ ret = 'seg.nrrd'
271
+ break
272
+ return ret
273
+
274
+
275
+ def main():
276
+ ROOT_DIR = str(Path(os.getcwd()).parent.parent.absolute())
277
+ args = parse_command_line()
278
+ base = args.bp
279
+ images_path = args.ip
280
+ segmentation = args.sp
281
+ label_list = args.sl
282
+ task_id = args.ti
283
+ deepatlas_path = ROOT_DIR
284
+ raw_data_path = os.path.join(deepatlas_path, 'deepatlas_raw_data_base')
285
+ task_path = os.path.join(deepatlas_path, 'deepatlas_raw_data_base', task_id)
286
+ training_data_path = os.path.join(deepatlas_path, 'deepatlas_raw_data_base', task_id, 'Training_dataset')
287
+ images_output = os.path.join(deepatlas_path, 'deepatlas_raw_data_base', task_id, 'Training_dataset', 'images')
288
+ labels_output = os.path.join(deepatlas_path, 'deepatlas_raw_data_base', task_id, 'Training_dataset', 'labels')
289
+ fomat = checkFormat(base, images_path)
290
+ fomat_seg = checkFormat(base, segmentation)
291
+ template = find_template(base, images_path, fomat)
292
+ label_lists = path_to_id(os.path.join(base, segmentation), fomat_seg)
293
+ if label_list is not None:
294
+ matched_output = os.path.join(base, 'MatchedSegs')
295
+ try:
296
+ os.mkdir(matched_output)
297
+ except:
298
+ print(f"{matched_output} already exists")
299
+
300
+ try:
301
+ os.mkdir(raw_data_path)
302
+ except:
303
+ print(f"{raw_data_path} already exists")
304
+
305
+ try:
306
+ os.mkdir(task_path)
307
+ except:
308
+ print(f"{task_path} already exists")
309
+
310
+ try:
311
+ os.mkdir(training_data_path)
312
+ except:
313
+ print(f"{training_data_path} already exists")
314
+
315
+ try:
316
+ os.mkdir(images_output)
317
+ except:
318
+ print(f"{images_output} already exists")
319
+
320
+ try:
321
+ os.mkdir(labels_output)
322
+ except:
323
+ print(f"{labels_output} already exists")
324
+
325
+ paired_list = []
326
+ if label_list is not None:
327
+ for i in range(0, len(label_list), 2):
328
+ if not label_list[i].isdigit():
329
+ print(
330
+ "Wrong order of input argument for pair-wising label value and its name !!!")
331
+ return
332
+ else:
333
+ value = label_list[i]
334
+ if not label_list[i+1].isdigit():
335
+ key = label_list[i+1]
336
+ ele = tuple((key, value))
337
+ paired_list.append(ele)
338
+ else:
339
+ print(
340
+ "Wrong input argument for pair-wising label value and its name !!!")
341
+ return
342
+
343
+ # print(new_segmentation)
344
+ seg_output_path = checkSegFormat(
345
+ base, segmentation, paired_list, check=True)
346
+
347
+ else:
348
+ seg_output_path = checkSegFormat(
349
+ base, segmentation, paired_list, check=False)
350
+
351
+ for i in sorted(glob.glob(os.path.join(base, images_path) + '/*' + fomat)):
352
+ id = os.path.basename(i).split('.')[0]
353
+ if id == template:
354
+ pass
355
+ else:
356
+ target = id
357
+ if id in label_lists:
358
+ split_and_registration(
359
+ template, target, base, images_path, images_path, seg_output_path, images_output, labels_output, fomat, fomat, checked=False, has_label=True)
360
+ else:
361
+ split_and_registration(
362
+ template, target, base, images_path, images_path, seg_output_path, images_output, labels_output, fomat, fomat, checked=False, has_label=False)
363
+
364
+ image = ants.image_read(os.path.join(
365
+ base, images_path, template + '.' + fomat))
366
+ image.to_file(os.path.join(images_output, template + '.nii.gz'))
367
+
368
+ images_path = images_output
369
+ fomat = 'nii.gz'
370
+ if template in label_lists:
371
+ split_and_registration(
372
+ target, template, base, images_path, images_path, seg_output_path, images_output, labels_output, fomat, fomat, checked=True, has_label=True)
373
+ else:
374
+ split_and_registration(
375
+ target, template, base, images_path, images_path, seg_output_path, images_output, labels_output, fomat, fomat, checked=True, has_label=False)
376
+
377
+
378
+ if __name__ == '__main__':
379
+ main()
deepatlas/scripts/deep_atlas.sh ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ source ~/proj_MONAI/bin/activate
3
+ cd ~/DeepAtlasV2/DeepAtlas/deepatlas/preprocess
4
+ #python3 registration_training.py -bp ~/HN_data/modified/data -ip images -sp labels -ti Task003_HN33_numres2_25lab
5
+ #python3 crop_flip_training.py -rs 240 240 144 -ti Task003_HN33_numres2_25lab
6
+ #python3 generate_info.py -ti Task003_test -kf 5
7
+ cd ~/DeepAtlasV2/DeepAtlas/deepatlas/scripts
8
+ CUDA_VISIBLE_DEVICES=1 python3 deep_atlas_train.py --config ~/DeepAtlasV2/DeepAtlas/deepatlas_config/config_test.json --continue_training
9
+ CUDA_VISIBLE_DEVICES=1 python3 deep_atlas_test.py --config ~/DeepAtlasV2/DeepAtlas/deepatlas_config/config_test.json
10
+ #CUDA_VISIBLE_DEVICES=1 python3 deep_atlas_train.py --config ~/DeepAtlas/deepatlas_config/config_NC_2gt.json
11
+ #CUDA_VISIBLE_DEVICES=1 python3 deep_atlas_test.py --config ~/DeepAtlas/deepatlas_config/config_NC_2gt.json
12
+ #CUDA_VISIBLE_DEVICES=1 python3 deep_atlas_train.py --config ~/DeepAtlas/deepatlas_config/config_NC_4gt.json
13
+ #CUDA_VISIBLE_DEVICES=1 python3 deep_atlas_test.py --config ~/DeepAtlas/deepatlas_config/config_NC_4gt.json
deepatlas/scripts/deep_atlas_test.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pkg_resources import add_activation_listener
2
+ import monai
3
+ import torch
4
+ import itk
5
+ import numpy as np
6
+ import os.path
7
+ import argparse
8
+ import sys
9
+ from pathlib import Path
10
+ import deep_atlas_train
11
+ from collections import namedtuple
12
+
13
+ ROOT_DIR = str(Path(os.getcwd()).parent.parent.absolute())
14
+ sys.path.insert(0, os.path.join(ROOT_DIR, 'deepatlas/test'))
15
+ sys.path.insert(0, os.path.join(ROOT_DIR, 'deepatlas/utils'))
16
+ from test import (
17
+ seg_training_inference, reg_training_inference
18
+ )
19
+ from utils import (
20
+ make_if_dont_exist, load_json
21
+ )
22
+
23
+ def parse_command_line():
24
+ parser = argparse.ArgumentParser(
25
+ description='pipeline for deep atlas test')
26
+ parser.add_argument('--config', metavar='path to the configuration file', type=str,
27
+ help='absolute path to the configuration file')
28
+ parser.add_argument('--train_only', action='store_true',
29
+ help='only training or training plus test')
30
+ argv = parser.parse_args()
31
+ return argv
32
+
33
+
34
+ def main():
35
+ ROOT_DIR = str(Path(os.getcwd()).parent.parent.absolute())
36
+ args = parse_command_line()
37
+ monai.utils.set_determinism(seed=2938649572)
38
+ config = args.config
39
+ train_only = args.train_only
40
+ config = load_json(config)
41
+ config = namedtuple("config", config.keys())(*config.values())
42
+ if len(config.info_name.split('_')) <= 1 or not train_only:
43
+ task = config.task_name
44
+ if torch.cuda.is_available():
45
+ device = torch.device("cuda:" + str(torch.cuda.current_device()))
46
+ output_path = os.path.join(ROOT_DIR, 'deepatlas_results', task, f'set_{config.exp_set}',f'{config.num_seg_used}gt', config.folder_name, 'training_predicted_results')
47
+ make_if_dont_exist(output_path)
48
+
49
+ for i in range(1, config.num_fold+1):
50
+ num_fold = f'fold_{i}'
51
+ json_path = os.path.join(
52
+ ROOT_DIR, 'deepatlas_results', task, f'set_{config.exp_set}',f'{config.num_seg_used}gt', config.folder_name, 'training_results', num_fold, 'dataset.json')
53
+ #num_fold = json_file['num_fold']
54
+ output_fold_path = os.path.join(output_path, num_fold)
55
+ seg_model_path = os.path.join(Path(json_path).parent.absolute(), 'SegNet', 'model', 'seg_net_best.pth')
56
+ reg_model_path = os.path.join(Path(json_path).parent.absolute(), 'RegNet', 'model', 'reg_net_best.pth')
57
+ labels = config.labels
58
+ num_label = len(labels.keys())
59
+ network_info = config.network
60
+ spatial_dim = network_info['spatial_dim']
61
+ dropout = network_info['dropout']
62
+ activation_type = network_info['activation_type']
63
+ normalization_type = network_info['normalization_type']
64
+ num_res = network_info['num_res']
65
+ seg_path = os.path.join(output_fold_path, 'SegNet')
66
+ reg_path = os.path.join(output_fold_path, 'RegNet')
67
+ make_if_dont_exist(output_fold_path)
68
+ make_if_dont_exist(seg_path)
69
+ make_if_dont_exist(reg_path)
70
+ seg_net = deep_atlas_train.get_seg_net(
71
+ spatial_dim, num_label, dropout, activation_type, normalization_type, num_res)
72
+ reg_net = deep_atlas_train.get_reg_net(
73
+ spatial_dim, spatial_dim, dropout, activation_type, normalization_type, num_res)
74
+ seg_training_inference(seg_net, device, seg_model_path, seg_path, num_label, json_path=json_path, data=None)
75
+ reg_training_inference(reg_net, device, reg_model_path, reg_path, num_label, json_path=json_path, data=None)
76
+ else:
77
+ print('train only, test will be done in the future !!!')
78
+
79
+ if __name__ == '__main__':
80
+ main()
deepatlas/scripts/deep_atlas_test_customized.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pkg_resources import add_activation_listener
2
+ import monai
3
+ import torch
4
+ import itk
5
+ import numpy as np
6
+ import os.path
7
+ import argparse
8
+ import sys
9
+ from pathlib import Path
10
+ import deep_atlas_train
11
+ import glob
12
+
13
+ ROOT_DIR = str(Path(os.getcwd()).parent.parent.absolute())
14
+ sys.path.insert(0, os.path.join(ROOT_DIR, 'deepatlas/test'))
15
+
16
+ from test import (
17
+ seg_training_inference, load_json, reg_training_inference
18
+ )
19
+
20
+ def parse_command_line():
21
+ parser = argparse.ArgumentParser(
22
+ description='pipeline for deep atlas test')
23
+ parser.add_argument('-gpu', metavar='id of gpu', type=str, default='0',
24
+ help='id of gpu device to use')
25
+ parser.add_argument('-ti', metavar='task id and name', type=str,
26
+ help='task name and id')
27
+ parser.add_argument('-nf', metavar='number of fold', type=int,
28
+ help='number of fold for testing')
29
+ parser.add_argument('-op', metavar='output path for prediction step', type=str,
30
+ help="relative path of the output directory, should be same name in the registration, crop and final prediction steps")
31
+ argv = parser.parse_args()
32
+ return argv
33
+
34
+
35
+ def path_to_id(path):
36
+ return os.path.basename(path).split('.')[0]
37
+
38
+
39
+ def main():
40
+ ROOT_DIR = str(Path(os.getcwd()).parent.parent.absolute())
41
+ args = parse_command_line()
42
+ gpu = args.gpu
43
+ task = args.ti
44
+ num_fold = f'fold_{args.nf}'
45
+ output_path = os.path.join(ROOT_DIR, 'deepatlas_results', task, 'customize_predicted_results')
46
+ fold_path = os.path.join(output_path, num_fold)
47
+ out_path = os.path.join(fold_path, args.op)
48
+ json_path = os.path.join(
49
+ ROOT_DIR, 'deepatlas_results', task, "training_results", num_fold, 'dataset.json')
50
+ seg_model_path = os.path.join(
51
+ ROOT_DIR, 'deepatlas_results', task, 'training_results', num_fold, 'SegNet', 'seg_net_best.pth')
52
+ reg_model_path = os.path.join(
53
+ ROOT_DIR, 'deepatlas_results', task, 'training_results', num_fold, 'RegNet', 'reg_net_best.pth')
54
+ json_file = load_json(json_path)
55
+ labels = json_file['labels']
56
+ num_label = len(labels.keys())
57
+ network_info = json_file['network']
58
+ spatial_dim = network_info['spatial_dim']
59
+ dropout = network_info['dropout']
60
+ activation_type = network_info['activation_type']
61
+ normalization_type = network_info['normalization_type']
62
+ num_res = network_info['num_res']
63
+ device = torch.device("cuda:" + gpu)
64
+ output_seg_path = os.path.join(out_path, 'SegNet')
65
+ output_reg_path = os.path.join(out_path, 'RegNet')
66
+ try:
67
+ os.mkdir(output_path)
68
+ except:
69
+ print(f'{output_path} is already existed !!!')
70
+
71
+ try:
72
+ os.mkdir(fold_path)
73
+ except:
74
+ print(f'{fold_path} is already existed !!!')
75
+
76
+ try:
77
+ os.mkdir(out_path)
78
+ except:
79
+ print(f'{out_path} is already existed !!!')
80
+
81
+ try:
82
+ os.mkdir(output_seg_path)
83
+ except:
84
+ print(f'{output_seg_path} is already existed !!!')
85
+
86
+ try:
87
+ os.mkdir(output_reg_path)
88
+ except:
89
+ print(f'{output_reg_path} is already existed !!!')
90
+
91
+ seg_net = deep_atlas_train.get_seg_net(
92
+ spatial_dim, num_label, dropout, activation_type, normalization_type, num_res)
93
+ reg_net = deep_atlas_train.get_reg_net(
94
+ spatial_dim, spatial_dim, dropout, activation_type, normalization_type, num_res)
95
+
96
+ img_path = os.path.join(ROOT_DIR, 'deepatlas_preprocessed', task, 'customize_test_data', args.op, 'images')
97
+ seg_path = os.path.join(ROOT_DIR, 'deepatlas_preprocessed', task, 'customize_test_data', args.op, 'labels')
98
+ total_img_paths = []
99
+ total_seg_paths = []
100
+ for i in sorted(glob.glob(img_path + '/*.nii.gz')):
101
+ total_img_paths.append(i)
102
+
103
+ for j in sorted(glob.glob(seg_path + '/*.nii.gz')):
104
+ total_seg_paths.append(j)
105
+
106
+ seg_ids = list(map(path_to_id, total_seg_paths))
107
+ img_ids = map(path_to_id, total_img_paths)
108
+ data = []
109
+ for img_index, img_id in enumerate(img_ids):
110
+ data_item = {'img': total_img_paths[img_index]}
111
+ if img_id in seg_ids:
112
+ data_item['seg'] = total_seg_paths[seg_ids.index(img_id)]
113
+ data.append(data_item)
114
+
115
+ seg_training_inference(seg_net, device, seg_model_path, output_seg_path, num_label, json_path=None, data=data)
116
+ reg_training_inference(reg_net, device, reg_model_path, output_reg_path, num_label, json_path=None, data=data)
117
+
118
+
119
+ if __name__ == '__main__':
120
+ main()
deepatlas/scripts/deep_atlas_test_customized.sh ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ source ~/proj_MONAI/bin/activate
3
+ cd ~/DeepAtlas/deepatlas/preprocess
4
+ python3 registration_test.py -op firstTest -bp ~/Test_data -template template -target_scan target -target_seg target_seg -sl 1 Ear 2 Mid 3 Nasal -ti Task001_SepET
5
+ python3 crop_flip_test.py -rs 128 128 128 -fp -ti Task001_SepET -op firstTest
6
+ cd ~/DeepAtlas/deepatlas/scripts
7
+ python3 deep_atlas_test_customized.py -gpu 0 -ti Task001_SepET -op firstTest -nf 5
deepatlas/scripts/deep_atlas_train.py ADDED
@@ -0,0 +1,482 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import seg_train
2
+ from pathlib import Path
3
+ from collections import OrderedDict
4
+ import json
5
+ import sys
6
+ import argparse
7
+ import os.path
8
+ import glob
9
+ import random
10
+ import matplotlib.pyplot as plt
11
+ import torch
12
+ import monai
13
+ import logging
14
+ import shutil
15
+ from collections import namedtuple
16
+ import numpy as np
17
+ import datetime
18
+
19
+ ROOT_DIR = str(Path(os.getcwd()).parent.parent.absolute())
20
+ sys.path.insert(0, os.path.join(ROOT_DIR, 'deepatlas/preprocess'))
21
+ sys.path.insert(0, os.path.join(ROOT_DIR, 'deepatlas/network'))
22
+ sys.path.insert(0, os.path.join(ROOT_DIR, 'deepatlas/train'))
23
+ sys.path.insert(0, os.path.join(ROOT_DIR, 'deepatlas/utils'))
24
+ from train import (
25
+ train_network
26
+ )
27
+ from network import (
28
+ regNet, segNet
29
+ )
30
+ from process_data import (
31
+ split_data, load_seg_dataset, load_reg_dataset, take_data_pairs, subdivide_list_of_data_pairs
32
+ )
33
+ from utils import (
34
+ load_json, make_if_dont_exist
35
+ )
36
+
37
+ def parse_command_line():
38
+ parser = argparse.ArgumentParser(
39
+ description='pipeline for deep atlas train')
40
+ parser.add_argument('--config', metavar='path to the configuration file', type=str,
41
+ help='absolute path to the configuration file')
42
+ parser.add_argument('--continue_training', action='store_true',
43
+ help='use this if you want to continue a training')
44
+ parser.add_argument('--train_only', action='store_true',
45
+ help='only training or training plus test')
46
+ parser.add_argument('--plot_network', action='store_true',
47
+ help='whether to plot the network')
48
+ argv = parser.parse_args()
49
+ return argv
50
+
51
+
52
+ def get_seg_net(spatial_dims, num_label, dropout, activation_type, normalization_type, num_res):
53
+ seg_net = segNet(
54
+ spatial_dim=spatial_dims, # spatial dims
55
+ in_channel=1, # input channels
56
+ out_channel=num_label, # output channels
57
+ channel=(8, 16, 16, 32, 32, 64, 64), # channel sequence
58
+ stride=(1, 2, 1, 2, 1, 2), # convolutional strides
59
+ dropouts=dropout,
60
+ acts=activation_type,
61
+ norms=normalization_type,
62
+ num_res_unit=num_res
63
+ )
64
+ return seg_net
65
+
66
+
67
+ def get_reg_net(spatial_dims, num_label, dropout, activation_type, normalization_type, num_res):
68
+ reg_net = regNet(
69
+ spatial_dim=spatial_dims, # spatial dims
70
+ in_channel=2, # input channels
71
+ out_channel=num_label, # output channels
72
+ channel=(16, 32, 32, 32, 32), # channel sequence
73
+ stride=(1, 2, 2, 2), # convolutional strides
74
+ dropouts=dropout,
75
+ acts=activation_type,
76
+ norms=normalization_type,
77
+ num_res_unit=num_res
78
+ )
79
+ return reg_net
80
+
81
+
82
+ def setup_logger(logger_name, log_file, level=logging.INFO):
83
+ log_setup = logging.getLogger(logger_name)
84
+ formatter = logging.Formatter('%(asctime)s %(message)s', datefmt="%Y-%m-%d %H:%M:%S")
85
+ fileHandler = logging.FileHandler(log_file, mode='w')
86
+ fileHandler.setFormatter(formatter)
87
+ streamHandler = logging.StreamHandler()
88
+ streamHandler.setFormatter(formatter)
89
+ log_setup.setLevel(level)
90
+ log_setup.addHandler(fileHandler)
91
+ log_setup.addHandler(streamHandler)
92
+
93
+ def classify_data(data_info, fold):
94
+ lab_each_fold = {}
95
+ lab = []
96
+ unlab = []
97
+ total_seg = 0
98
+ total_seg_each_fold = {}
99
+ for key, value in data_info.items():
100
+ if key != f'fold_{fold}':
101
+ lab_each_fold[key] = []
102
+ total_seg_each_fold[key] = 0
103
+ for val in value:
104
+ if 'seg' not in val.keys():
105
+ unlab.append(val)
106
+ else:
107
+ lab_each_fold[key].append(val)
108
+ lab.append(val)
109
+ total_seg += 1
110
+ total_seg_each_fold[key] += 1
111
+
112
+ return lab_each_fold, lab, unlab, total_seg, total_seg_each_fold
113
+
114
+ def select_n_seg(lab, fold, num, total_seg_each_fold):
115
+ seg_items = lab[f'fold_{fold}']
116
+ num_seg = len(seg_items)
117
+ rand_num = random.sample(range(num_seg), num)
118
+ seg_item = np.array(seg_items)[np.array(rand_num)]
119
+ seg_items.pop(rand_num[0])
120
+ total_seg_each_fold[f'fold_{fold}'] -= 1
121
+ lab[f'fold_{fold}'] = seg_items
122
+ return list(seg_item), lab, total_seg_each_fold
123
+
124
+ def combine_data(data_info, fold, exp, num_seg):
125
+ all_fold = np.arange(len(data_info.keys())) + 1
126
+ num_train_fold = len(data_info.keys()) - 1
127
+ fake_train_fold = np.delete(all_fold, fold-1)
128
+ fake_train_fold = np.tile(fake_train_fold, 2)
129
+ real_train_fold = fake_train_fold[fold-1:fold+num_train_fold-1]
130
+ train = []
131
+ test = []
132
+ for j in data_info[f'fold_{fold}']:
133
+ if 'seg' in j.keys():
134
+ test.append(j)
135
+
136
+ lab_each_fold, lab, unlab, total_seg, total_seg_each_fold = classify_data(data_info, fold)
137
+ if total_seg < num_seg:
138
+ num_seg = total_seg
139
+
140
+ num_each_fold_seg = divmod(num_seg, num_train_fold)[0]
141
+ fold_num_seg = np.repeat(num_each_fold_seg, num_train_fold)
142
+ num_remain_seg = divmod(num_seg, num_train_fold)[1]
143
+ count = 0
144
+ while num_remain_seg > 0:
145
+ fold_num_seg[count] += 1
146
+ count = (count+1) % num_train_fold
147
+ num_remain_seg -= 1
148
+
149
+ train = unlab
150
+ k = 0
151
+ while num_seg > 0:
152
+ next_fold = real_train_fold[k]
153
+ if total_seg_each_fold[f'fold_{next_fold}'] > 0:
154
+ seg_items, lab_each_fold, total_seg_each_fold = select_n_seg(lab_each_fold, next_fold, 1, total_seg_each_fold)
155
+ train.extend(seg_items)
156
+ num_seg -= 1
157
+ k = (k+1) % 4
158
+
159
+ num_segs = 0
160
+ if exp != 1:
161
+ for key, value in total_seg_each_fold.items():
162
+ if value != 0:
163
+ for j in lab_each_fold[key]:
164
+ item = {'img': j['img']}
165
+ train.append(item)
166
+ total_seg_each_fold[key] -= 1
167
+ for key, value in total_seg_each_fold.items():
168
+ num_segs += value
169
+
170
+ assert num_segs == 0
171
+
172
+ return train, test
173
+
174
+
175
+ def main():
176
+ args = parse_command_line()
177
+ config = args.config
178
+ continue_training = args.continue_training
179
+ train_only = args.train_only
180
+ config = load_json(config)
181
+ config = namedtuple("config", config.keys())(*config.values())
182
+ folder_name = config.folder_name
183
+ num_seg_used = config.num_seg_used
184
+ experiment_set = config.exp_set
185
+ monai.utils.set_determinism(seed=2938649572)
186
+ data_path = os.path.join(ROOT_DIR, 'deepatlas_results')
187
+ base_path = os.path.join(ROOT_DIR, 'deepatlas_preprocessed')
188
+ task = os.path.join(data_path, config.task_name)
189
+ exp_path = os.path.join(task, f'set_{experiment_set}')
190
+ gt_path = os.path.join(exp_path, f'{num_seg_used}gt')
191
+ folder_path = os.path.join(gt_path, folder_name)
192
+ result_path = os.path.join(folder_path, 'training_results')
193
+ if train_only:
194
+ info_name = 'info_train_only'
195
+ else:
196
+ info_name = 'info'
197
+ info_path = os.path.join(base_path, config.task_name, 'Training_dataset', 'data_info', folder_name, info_name+'.json')
198
+ info = load_json(info_path)
199
+ if torch.cuda.is_available():
200
+ device = torch.device("cuda:" + str(torch.cuda.current_device()))
201
+
202
+ spatial_dim = config.network['spatial_dim']
203
+ dropout = config.network['dropout']
204
+ activation_type = config.network['activation_type']
205
+ normalization_type = config.network['normalization_type']
206
+ num_res = config.network['num_res']
207
+ lr_reg = config.network["registration_network_learning_rate"]
208
+ lr_seg = config.network["segmentation_network_learning_rate"]
209
+ lam_a = config.network["anatomy_loss_weight"]
210
+ lam_sp = config.network["supervised_segmentation_loss_weight"]
211
+ lam_re = config.network["regularization_loss_weight"]
212
+ max_epoch = config.network["number_epoch"]
213
+ val_step = config.network["validation_step"]
214
+ make_if_dont_exist(data_path)
215
+ make_if_dont_exist(task)
216
+ make_if_dont_exist(exp_path)
217
+ make_if_dont_exist(gt_path)
218
+ make_if_dont_exist(folder_path)
219
+ make_if_dont_exist(result_path)
220
+
221
+ if not continue_training:
222
+ start_fold = 1
223
+ else:
224
+ folds = sorted(os.listdir(result_path))
225
+ if len(folds) == 0:
226
+ continue_training = False
227
+ start_fold = 1
228
+ else:
229
+ last_fold_num = folds[-1].split('_')[-1]
230
+ start_fold = int(last_fold_num)
231
+
232
+ if train_only:
233
+ num_fold = 1
234
+ else:
235
+ num_fold = config.num_fold
236
+
237
+ for i in range (start_fold, num_fold+1):
238
+ if not train_only:
239
+ fold_path = os.path.join(result_path, f'fold_{i}')
240
+ result_seg_path = os.path.join(fold_path, 'SegNet')
241
+ result_reg_path = os.path.join(fold_path, 'RegNet')
242
+ else:
243
+ fold_path = os.path.join(result_path, f'all')
244
+ result_seg_path = os.path.join(fold_path, 'SegNet')
245
+ result_reg_path = os.path.join(fold_path, 'RegNet')
246
+
247
+ make_if_dont_exist(fold_path)
248
+ make_if_dont_exist(result_reg_path)
249
+ make_if_dont_exist(result_seg_path)
250
+ datetime_object = 'training_log_' + datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S") + '.log'
251
+ log_path = os.path.join(fold_path, datetime_object)
252
+
253
+ if not train_only:
254
+ if not continue_training:
255
+ setup_logger(f'log_{i}', log_path)
256
+ logger = logging.getLogger(f'log_{i}')
257
+ logger.info(f"Start Pipeline with fold_{i}")
258
+ else:
259
+ setup_logger(f'log_{i+1}', log_path)
260
+ logger = logging.getLogger(f'log_{i+1}')
261
+ logger.info(f"Resume Pipeline with fold_{i}")
262
+ else:
263
+ setup_logger(f'all', log_path)
264
+ logger = logging.getLogger(f'all')
265
+ logger.info(f"Start Pipeline with all data")
266
+
267
+ if not os.path.exists(os.path.join(fold_path, 'dataset.json')):
268
+ logger.info('prepare dataset into train and test')
269
+ json_dict = OrderedDict()
270
+ json_dict['name'] = os.path.basename(task).split('_')[0]
271
+ json_dict['description'] = '_'.join(os.path.basename(task).split('_')[1:])
272
+ json_dict['tensorImageSize'] = "4D"
273
+ json_dict['reference'] = "MODIFY"
274
+ json_dict['licence'] = "MODIFY"
275
+ json_dict['release'] = "0.0"
276
+ json_dict['modality'] = {
277
+ "0": "CT"
278
+ }
279
+ json_dict['labels'] = config.labels
280
+ json_dict['network'] = config.network
281
+ json_dict['experiment_set'] = experiment_set
282
+ if not train_only:
283
+ json_dict['num_fold'] = f'fold_{i}'
284
+ train, test = combine_data(info, i, experiment_set, num_seg_used)
285
+ else:
286
+ json_dict['num_fold'] = 'all'
287
+ train = info
288
+ test = []
289
+ num_seg_used = len(list(filter(lambda d: 'seg' in d.keys(), train)))
290
+ #num_seg = 15
291
+ #train, test, num_train, num_test = split_data(img_path, seg_path, num_seg)
292
+ #print(type(train))
293
+
294
+ num_seg = num_seg_used
295
+ num_train = len(train)
296
+ num_test = len(test)
297
+ #print(train.keys())
298
+ json_dict['total_numScanTraining'] = num_train
299
+ json_dict['total_numLabelTraining'] = num_seg
300
+ json_dict['total_numTest'] = num_test
301
+ json_dict['total_train'] = train
302
+ json_dict['total_test'] = test
303
+ # prepare segmentation dataset
304
+ logger.info('prepare segmentation dataset')
305
+ data_seg_available = list(filter(lambda d: 'seg' in d.keys(), train))
306
+ data_seg_unavailable = list(filter(lambda d: 'seg' not in d.keys(), train))
307
+ data_seg_available_train, data_seg_available_valid = \
308
+ monai.data.utils.partition_dataset(data_seg_available, ratios=(8, 2))
309
+ json_dict['seg_numTrain'] = len(data_seg_available_train)
310
+ json_dict['seg_train'] = data_seg_available_train
311
+ json_dict['seg_numValid'] = len(data_seg_available_valid)
312
+ json_dict['seg_valid'] = data_seg_available_valid
313
+ dataset_seg_available_train, dataset_seg_available_valid = load_seg_dataset(
314
+ data_seg_available_train, data_seg_available_valid)
315
+ data_item = random.choice(dataset_seg_available_train)
316
+ img_shape = data_item['seg'].unsqueeze(0).shape[2:]
317
+ num_label = len(torch.unique(data_item['seg']))
318
+ logger.info('prepare segmentation network')
319
+ seg_net = get_seg_net(spatial_dim, num_label, dropout,
320
+ activation_type, normalization_type, num_res)
321
+ # prepare registration dataset
322
+ logger.info('prepare registration dataset')
323
+ data_without_seg_valid = data_seg_unavailable + data_seg_available_train
324
+ data_valid, data_train = monai.data.utils.partition_dataset(
325
+ data_without_seg_valid, # Note the order
326
+ ratios=(2, 8), # Note the order
327
+ shuffle=False
328
+ )
329
+ data_paires_without_seg_valid = take_data_pairs(data_without_seg_valid)
330
+ data_pairs_valid = take_data_pairs(data_valid)
331
+ data_pairs_train = take_data_pairs(data_train)
332
+ data_pairs_valid_subdivided = subdivide_list_of_data_pairs(
333
+ data_pairs_valid)
334
+ data_pairs_train_subdivided = subdivide_list_of_data_pairs(
335
+ data_pairs_train)
336
+ num_train_reg_net = len(data_pairs_train)
337
+ num_valid_reg_net = len(data_pairs_valid)
338
+ num_train_both = len(data_pairs_train_subdivided['01']) +\
339
+ len(data_pairs_train_subdivided['10']) +\
340
+ len(data_pairs_train_subdivided['11'])
341
+ json_dict['reg_seg_numTrain'] = num_train_reg_net
342
+ json_dict['reg_seg_numTrain_00'] = len(data_pairs_train_subdivided['00'])
343
+ json_dict['reg_seg_train_00'] = data_pairs_train_subdivided['00']
344
+ json_dict['reg_seg_numTrain_01'] = len(data_pairs_train_subdivided['01'])
345
+ json_dict['reg_seg_train_01'] = data_pairs_train_subdivided['01']
346
+ json_dict['reg_seg_numTrain_10'] = len(data_pairs_train_subdivided['10'])
347
+ json_dict['reg_seg_train_10'] = data_pairs_train_subdivided['10']
348
+ json_dict['reg_seg_numTrain_11'] = len(data_pairs_train_subdivided['11'])
349
+ json_dict['reg_seg_train_11'] = data_pairs_train_subdivided['11']
350
+ json_dict['reg_numValid'] = num_valid_reg_net
351
+ json_dict['reg_numValid_00'] = len(data_pairs_valid_subdivided['00'])
352
+ json_dict['reg_valid_00'] = data_pairs_valid_subdivided['00']
353
+ json_dict['reg_numValid_01'] = len(data_pairs_valid_subdivided['01'])
354
+ json_dict['reg_valid_01'] = data_pairs_valid_subdivided['01']
355
+ json_dict['reg_numValid_10'] = len(data_pairs_valid_subdivided['10'])
356
+ json_dict['reg_valid_10'] = data_pairs_valid_subdivided['10']
357
+ json_dict['reg_numValid_11'] = len(data_pairs_valid_subdivided['11'])
358
+ json_dict['reg_valid_11'] = data_pairs_valid_subdivided['11']
359
+ print(f"""We have {num_train_both} pairs to train reg_net and seg_net together, and an additional {num_train_reg_net - num_train_both} to train reg_net alone.""")
360
+ print(f"We have {num_valid_reg_net} pairs for reg_net validation.")
361
+
362
+ dataset_pairs_train_subdivided, dataset_pairs_valid_subdivided = load_reg_dataset(
363
+ data_pairs_train_subdivided, data_pairs_valid_subdivided)
364
+ logger.info('prepare registration network')
365
+ reg_net = get_reg_net(spatial_dim, spatial_dim, dropout,
366
+ activation_type, normalization_type, num_res)
367
+ logger.info('generate dataset json file')
368
+ with open(os.path.join(fold_path, 'dataset.json'), 'w') as f:
369
+ json.dump(json_dict, f, indent=4, sort_keys=False)
370
+
371
+ else:
372
+ dataset_json = load_json(os.path.join(fold_path, 'dataset.json'))
373
+
374
+ data_seg_available_train = dataset_json['seg_train']
375
+ data_seg_available_valid = dataset_json['seg_valid']
376
+ dataset_seg_available_train, dataset_seg_available_valid = load_seg_dataset(data_seg_available_train, data_seg_available_valid)
377
+ data_item = random.choice(dataset_seg_available_train)
378
+ img_shape = data_item['seg'].unsqueeze(0).shape[2:]
379
+ num_label = len(torch.unique(data_item['seg']))
380
+ logger.info('prepare segmentation network')
381
+ seg_net = get_seg_net(spatial_dim, num_label, dropout, activation_type, normalization_type, num_res)
382
+
383
+ data_pairs_train_subdivided = {
384
+ '00': dataset_json['reg_seg_train_00'],
385
+ '01': dataset_json['reg_seg_train_01'],
386
+ '10': dataset_json['reg_seg_train_10'],
387
+ '11': dataset_json['reg_seg_train_11']
388
+ }
389
+ data_pairs_valid_subdivided = {
390
+ '00': dataset_json['reg_valid_00'],
391
+ '01': dataset_json['reg_valid_01'],
392
+ '10': dataset_json['reg_valid_10'],
393
+ '11': dataset_json['reg_valid_11']
394
+ }
395
+ num_train_reg_net = dataset_json['reg_seg_numTrain']
396
+ num_valid_reg_net = dataset_json['reg_numValid']
397
+ num_train_both = len(data_pairs_train_subdivided['01']) +\
398
+ len(data_pairs_train_subdivided['10']) +\
399
+ len(data_pairs_train_subdivided['11'])
400
+ print(f"""We have {num_train_both} pairs to train reg_net and seg_net together,
401
+ and an additional {num_train_reg_net - num_train_both} to train reg_net alone.""")
402
+ print(f"We have {num_valid_reg_net} pairs for reg_net validation.")
403
+
404
+ dataset_pairs_train_subdivided, dataset_pairs_valid_subdivided = load_reg_dataset(
405
+ data_pairs_train_subdivided, data_pairs_valid_subdivided)
406
+ logger.info('prepare registration network')
407
+ reg_net = get_reg_net(spatial_dim, spatial_dim, dropout,
408
+ activation_type, normalization_type, num_res)
409
+
410
+
411
+ dataloader_train_seg = monai.data.DataLoader(
412
+ dataset_seg_available_train,
413
+ batch_size=2,
414
+ num_workers=4,
415
+ shuffle=True
416
+ )
417
+ dataloader_valid_seg = monai.data.DataLoader(
418
+ dataset_seg_available_valid,
419
+ batch_size=4,
420
+ num_workers=4,
421
+ shuffle=False
422
+ )
423
+ dataloader_train_reg = {
424
+ seg_availability: monai.data.DataLoader(
425
+ dataset,
426
+ batch_size=1,
427
+ num_workers=4,
428
+ shuffle=True
429
+ )
430
+ # empty dataloaders are not a thing-- put an empty list if needed
431
+ if len(dataset) > 0 else []
432
+ for seg_availability, dataset in dataset_pairs_train_subdivided.items()
433
+ }
434
+ dataloader_valid_reg = {
435
+ seg_availability: monai.data.DataLoader(
436
+ dataset,
437
+ batch_size=2,
438
+ num_workers=4,
439
+ shuffle=True # Shuffle validation data because we will only take a sample for validation each time
440
+ )
441
+ # empty dataloaders are not a thing-- put an empty list if needed
442
+ if len(dataset) > 0 else []
443
+ for seg_availability, dataset in dataset_pairs_valid_subdivided.items()
444
+ }
445
+ train_network(dataloader_train_reg,
446
+ dataloader_valid_reg,
447
+ dataloader_train_seg,
448
+ dataloader_valid_seg,
449
+ device,
450
+ seg_net,
451
+ reg_net,
452
+ num_label,
453
+ lr_reg,
454
+ lr_seg,
455
+ lam_a,
456
+ lam_sp,
457
+ lam_re,
458
+ max_epoch,
459
+ val_step,
460
+ result_seg_path,
461
+ result_reg_path,
462
+ logger,
463
+ img_shape,
464
+ plot_network=args.plot_network,
465
+ continue_training=continue_training
466
+ )
467
+ '''
468
+ seg_train.train_seg(
469
+ dataloader_train_seg,
470
+ dataloader_valid_seg,
471
+ device,
472
+ seg_net,
473
+ lr_seg,
474
+ max_epoch,
475
+ val_step,
476
+ result_seg_path
477
+ )
478
+ '''
479
+
480
+ if __name__ == '__main__':
481
+ torch.cuda.empty_cache()
482
+ main()
deepatlas/scripts/seg_train.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #import generators
2
+ import monai
3
+ import torch
4
+ import numpy as np
5
+ import matplotlib.pyplot as plt
6
+ import os
7
+ import sys
8
+
9
+ from pathlib import Path
10
+
11
+ ROOT_DIR = str(Path(os.getcwd()).parent.parent.absolute())
12
+ sys.path.insert(0, os.path.join(ROOT_DIR, 'deepatlas/utils'))
13
+ sys.path.insert(0, os.path.join(ROOT_DIR, 'deepatlas/loss_function'))
14
+ from utils import (
15
+ preview_image, preview_3D_vector_field, preview_3D_deformation,
16
+ jacobian_determinant
17
+ )
18
+ from losses import (
19
+ warp_func, warp_nearest_func, lncc_loss_func, dice_loss_func, reg_losses, dice_loss_func2
20
+ )
21
+
22
+ def train_seg(dataloader_train_seg,
23
+ dataloader_valid_seg,
24
+ device,
25
+ seg_net,
26
+ lr_seg,
27
+ max_epoch,
28
+ val_step,
29
+ result_seg_path
30
+ ):
31
+ # (if already done then you may skip to and uncomment the checkpoint loading cell below)
32
+
33
+ seg_net.to(device)
34
+
35
+ learning_rate = 1e-3
36
+ optimizer = torch.optim.Adam(seg_net.parameters(), learning_rate)
37
+
38
+ max_epochs = 300
39
+ training_losses = []
40
+ validation_losses = []
41
+ val_interval = 5
42
+ dice_loss = dice_loss_func2()
43
+ for epoch_number in range(max_epochs):
44
+
45
+ print(f"Epoch {epoch_number+1}/{max_epochs}:")
46
+
47
+ seg_net.train()
48
+ losses = []
49
+ for batch in dataloader_train_seg:
50
+ imgs = batch['img'].to(device)
51
+ true_segs = batch['seg'].to(device)
52
+
53
+ optimizer.zero_grad()
54
+ predicted_segs = seg_net(imgs)
55
+ loss = dice_loss(predicted_segs, true_segs)
56
+ loss.backward()
57
+ optimizer.step()
58
+
59
+ losses.append(loss.item())
60
+
61
+ training_loss = np.mean(losses)
62
+ print(f"\ttraining loss: {training_loss}")
63
+ training_losses.append([epoch_number, training_loss])
64
+
65
+ if epoch_number % val_interval == 0:
66
+ seg_net.eval()
67
+ losses = []
68
+ with torch.no_grad():
69
+ for batch in dataloader_valid_seg:
70
+ imgs = batch['img'].to(device)
71
+ true_segs = batch['seg'].to(device)
72
+ predicted_segs = seg_net(imgs)
73
+ loss = dice_loss(predicted_segs, true_segs)
74
+ losses.append(loss.item())
75
+
76
+ validation_loss = np.mean(losses)
77
+ print(f"\tvalidation loss: {validation_loss}")
78
+ validation_losses.append([epoch_number, validation_loss])
79
+
80
+ # Free up some memory
81
+ del loss, predicted_segs, true_segs, imgs
82
+ torch.cuda.empty_cache()
83
+ torch.save(seg_net.state_dict(), os.path.join(result_seg_path, 'seg_net_best.pth'))
deepatlas/test/test.py ADDED
@@ -0,0 +1,536 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import monai
2
+ import torch
3
+ import itk
4
+ import numpy as np
5
+ import matplotlib.pyplot as plt
6
+ import matplotlib as mpl
7
+ import os
8
+ import nibabel as nib
9
+ import sys
10
+ import json
11
+ from pathlib import Path
12
+ mpl.rc('figure', max_open_warning = 0)
13
+ ROOT_DIR = str(Path(os.getcwd()).parent.parent.absolute())
14
+ sys.path.insert(0, os.path.join(ROOT_DIR, 'deepatlas/utils'))
15
+ sys.path.insert(0, os.path.join(ROOT_DIR, 'deepatlas/loss_function'))
16
+ sys.path.insert(0, os.path.join(ROOT_DIR, 'deepatlas/preprocess'))
17
+
18
+ from process_data import (
19
+ take_data_pairs, subdivide_list_of_data_pairs
20
+ )
21
+ from utils import (
22
+ plot_2D_vector_field, jacobian_determinant, plot_2D_deformation, load_json
23
+ )
24
+ from losses import (
25
+ warp_func, warp_nearest_func, lncc_loss_func, dice_loss_func2, dice_loss_func
26
+ )
27
+
28
+ def load_seg_dataset(data_list):
29
+ transform_seg_available = monai.transforms.Compose(
30
+ transforms=[
31
+ monai.transforms.LoadImageD(keys=['img', 'seg'], image_only=True, allow_missing_keys=True),
32
+ #monai.transforms.TransposeD(
33
+ #keys=['img', 'seg'], indices=(2, 1, 0)),
34
+ monai.transforms.AddChannelD(keys=['img', 'seg'], allow_missing_keys=True),
35
+ monai.transforms.SpacingD(keys=['img', 'seg'], pixdim=(1., 1., 1.), mode=('trilinear', 'nearest'), allow_missing_keys=True),
36
+ #monai.transforms.OrientationD(keys=['img', 'seg'], axcodes='RAS'),
37
+ monai.transforms.ToTensorD(keys=['img', 'seg'], allow_missing_keys=True)
38
+ ]
39
+ )
40
+ itk.ProcessObject.SetGlobalWarningDisplay(False)
41
+ dataset_seg_available_train = monai.data.CacheDataset(
42
+ data=data_list,
43
+ transform=transform_seg_available,
44
+ cache_num=16,
45
+ hash_as_key=True
46
+ )
47
+ return dataset_seg_available_train
48
+
49
+
50
+ def load_reg_dataset(data_list):
51
+ transform_pair = monai.transforms.Compose(
52
+ transforms=[
53
+ monai.transforms.LoadImageD(
54
+ keys=['img1', 'seg1', 'img2', 'seg2'], image_only=True, allow_missing_keys=True),
55
+ #monai.transforms.TransposeD(keys=['img1', 'seg1', 'img2', 'seg2'], indices=(2, 1, 0), allow_missing_keys=True),
56
+ # if resize is not None else monai.transforms.Identity()
57
+ monai.transforms.ToTensorD(
58
+ keys=['img1', 'seg1', 'img2', 'seg2'], allow_missing_keys=True),
59
+ monai.transforms.AddChannelD(
60
+ keys=['img1', 'seg1', 'img2', 'seg2'], allow_missing_keys=True),
61
+ monai.transforms.SpacingD(keys=['img1', 'seg1', 'img2', 'seg2'], pixdim=(1., 1., 1.), mode=(
62
+ 'trilinear', 'nearest', 'trilinear', 'nearest'), allow_missing_keys=True),
63
+ #monai.transforms.OrientationD(
64
+ #keys=['img1', 'seg1', 'img2', 'seg2'], axcodes='RAS', allow_missing_keys=True),
65
+ monai.transforms.ConcatItemsD(
66
+ keys=['img1', 'img2'], name='img12', dim=0),
67
+ monai.transforms.DeleteItemsD(keys=['img1', 'img2']),
68
+ ]
69
+ )
70
+ dataset_pairs_train_subdivided = {
71
+ seg_availability: monai.data.CacheDataset(
72
+ data=data,
73
+ transform=transform_pair,
74
+ cache_num=32,
75
+ hash_as_key=True
76
+ )
77
+ for seg_availability, data in data_list.items()
78
+ }
79
+
80
+ return dataset_pairs_train_subdivided
81
+
82
+ def get_nii_info(data, reg=False):
83
+ headers = []
84
+ affines = []
85
+ ids = []
86
+ if not reg:
87
+ for i in range(len(data)):
88
+ item = data[i]
89
+ if 'seg' in item.keys():
90
+ id = os.path.basename(item['seg']).split('.')[0]
91
+ seg = nib.load(item['seg'])
92
+ num_labels = len(np.unique(seg.get_fdata()))
93
+ headers.append(seg.header)
94
+ affines.append(seg.affine)
95
+ ids.append(id)
96
+ else:
97
+ id = os.path.basename(item['img']).split('.')[0]
98
+ img = nib.load(item['img'])
99
+ headers.append(img.header)
100
+ affines.append(img.affine)
101
+ ids.append(id)
102
+ else:
103
+ headers = {'00': [], '01': [], '10': [], '11': []}
104
+ affines = {'00': [], '01': [], '10': [], '11': []}
105
+ ids = {'00': [], '01': [], '10': [], '11': []}
106
+ for i in range(len(data)):
107
+ header = {}
108
+ affine = {}
109
+ id = {}
110
+ item = data[i]
111
+ keys = item.keys()
112
+ if 'seg1' in keys and 'seg2' in keys:
113
+ for key in keys:
114
+ idd = os.path.basename(item[key]).split('.')[0]
115
+ ele = nib.load(item[key])
116
+ header[key] = ele.header
117
+ affine[key] = ele.affine
118
+ id[key] = idd
119
+
120
+ headers['11'].append(header)
121
+ affines['11'].append(affine)
122
+ ids['11'].append(id)
123
+ elif 'seg1' in keys:
124
+ for key in keys:
125
+ idd = os.path.basename(item[key]).split('.')[0]
126
+ ele = nib.load(item[key])
127
+ header[key] = ele.header
128
+ affine[key] = ele.affine
129
+ id[key] = idd
130
+
131
+ headers['10'].append(header)
132
+ affines['10'].append(affine)
133
+ ids['10'].append(id)
134
+ elif 'seg2' in keys:
135
+ for key in keys:
136
+ idd = os.path.basename(item[key]).split('.')[0]
137
+ ele = nib.load(item[key])
138
+ header[key] = ele.header
139
+ affine[key] = ele.affine
140
+ id[key] = idd
141
+
142
+ headers['01'].append(header)
143
+ affines['01'].append(affine)
144
+ ids['01'].append(id)
145
+ else:
146
+ for key in keys:
147
+ idd = os.path.basename(item[key]).split('.')[0]
148
+ ele = nib.load(item[key])
149
+ header[key] = ele.header
150
+ affine[key] = ele.affine
151
+ id[key] = idd
152
+
153
+ headers['00'].append(header)
154
+ affines['00'].append(affine)
155
+ ids['00'].append(id)
156
+
157
+ return headers, affines, ids
158
+
159
+
160
+ def seg_training_inference(seg_net, device, model_path, output_path, num_label, json_path=None, data=None):
161
+ if json_path is not None:
162
+ assert data is None
163
+ json_file = load_json(json_path)
164
+ raw_data = json_file['total_test']
165
+ else:
166
+ assert data is not None
167
+ raw_data = data
168
+ headers, affines, ids = get_nii_info(raw_data, reg=False)
169
+ seg_net.to(device)
170
+ seg_net.load_state_dict(torch.load(model_path, map_location=device))
171
+ seg_net.eval()
172
+ dice_metric = monai.metrics.DiceMetric(include_background=False, reduction='none')
173
+ data_seg = load_seg_dataset(raw_data)
174
+ k = 0
175
+ eval_losses = []
176
+ eval_los = []
177
+ for i in data_seg:
178
+ has_seg = False
179
+ header1 = headers[k]
180
+ affine1 = affines[k]
181
+ id = ids[k]
182
+ data_item = i
183
+ test_input = data_item['img']
184
+ if 'seg' in data_item.keys():
185
+ test_gt = data_item['seg']
186
+ has_seg = True
187
+ with torch.no_grad():
188
+ test_seg_predicted = seg_net(test_input.unsqueeze(0).to(device)).cpu()
189
+
190
+ prediction = torch.argmax(torch.softmax(
191
+ test_seg_predicted, dim=1), dim=1, keepdim=True)[0, 0]
192
+ prediction1 = torch.argmax(torch.softmax(
193
+ test_seg_predicted, dim=1), dim=1, keepdim=True)
194
+
195
+ onehot_pred = monai.networks.one_hot(prediction1, num_label)
196
+ if has_seg:
197
+ onehot_gt = monai.networks.one_hot(test_gt.unsqueeze(0), num_label)
198
+ dsc = dice_metric(onehot_pred, onehot_gt).numpy()
199
+ eval_los.append(dsc)
200
+ eval_loss = f"Scan ID: {id}, dice score: {dsc}"
201
+ eval_losses.append(eval_loss)
202
+
203
+ pred_np = prediction.detach().cpu().numpy()
204
+ print(f'{id}: {np.unique(pred_np)}')
205
+
206
+ pred_np = pred_np.astype('int16')
207
+ nii = nib.Nifti1Image(pred_np, affine=affine1, header=header1)
208
+ nii.header.get_xyzt_units()
209
+ nib.save(nii, (os.path.join(output_path, id + '.nii.gz')))
210
+ k += 1
211
+
212
+ del test_seg_predicted
213
+
214
+ average = np.mean(eval_los, 0)
215
+
216
+ with open(os.path.join(output_path, 'seg_dsc.txt'), 'w') as f:
217
+ for s in eval_losses:
218
+ f.write(s + '\n')
219
+ f.write('\n\nAverage Dice Score: ' + str(average))
220
+ torch.cuda.empty_cache()
221
+
222
+
223
+ def reg_training_inference(reg_net, device, model_path, output_path, num_label, json_path=None, data=None):
224
+ if json_path is not None:
225
+ assert data is None
226
+ json_file = load_json(json_path)
227
+ raw_data = json_file['total_test']
228
+ else:
229
+ assert data is not None
230
+ raw_data = data
231
+ # Run this cell to try out reg net on a random validation pair
232
+ reg_net.to(device)
233
+ reg_net.load_state_dict(torch.load(model_path, map_location=device))
234
+ reg_net.eval()
235
+ data_list = take_data_pairs(raw_data)
236
+ headers, affines, ids = get_nii_info(data_list, reg=True)
237
+ subvided_data_list = subdivide_list_of_data_pairs(data_list)
238
+ subvided_dataset = load_reg_dataset(subvided_data_list)
239
+ warp = warp_func()
240
+ warp_nearest = warp_nearest_func()
241
+ lncc_loss = lncc_loss_func()
242
+ k = 0
243
+ if len(subvided_data_list['01']) != 0:
244
+ dataset01 = subvided_dataset['01']
245
+ #test_len = int(len(dataset01) / 4)
246
+ for j in range(len(dataset01)):
247
+ data_item = dataset01[j]
248
+ img12 = data_item['img12'].unsqueeze(0).to(device)
249
+ moving_raw_seg = data_item['seg2'].unsqueeze(0).to(device)
250
+ moving_seg = monai.networks.one_hot(moving_raw_seg, num_label)
251
+ id = ids['01'][k]
252
+ affine = affines['01'][k]
253
+ header = headers['01'][k]
254
+ with torch.no_grad():
255
+ reg_net_example_output = reg_net(img12)
256
+
257
+ example_warped_image = warp(
258
+ img12[:, [1], :, :, :], # moving image
259
+ reg_net_example_output # warping
260
+ )
261
+ example_warped_seg = warp_nearest(
262
+ moving_seg,
263
+ reg_net_example_output
264
+ )
265
+ moving_img = img12[0, 1, :, :, :]
266
+ target_img = img12[0, 0, :, :, :]
267
+ id_target_img = id['img1']
268
+ id_moving_img = id['img2']
269
+ head_target_img = header['img1']
270
+ head_target_seg = header['img1']
271
+ aff_target_img = affine['img1']
272
+ aff_target_seg = affine['img1']
273
+ prediction = torch.argmax(torch.softmax(
274
+ example_warped_seg, dim=1), dim=1, keepdim=True)[0, 0]
275
+ prediction1 = torch.argmax(torch.softmax(
276
+ example_warped_seg, dim=1), dim=1, keepdim=True)
277
+ warped_img_np = example_warped_image[0, 0].detach().cpu().numpy()
278
+ #warped_img_np = np.transpose(warped_img_np, (2, 1, 0))
279
+ warped_seg_np = prediction.detach().cpu().numpy()
280
+ #warped_seg_np = np.transpose(warped_seg_np, (2, 1, 0))
281
+ nii_seg = nib.Nifti1Image(
282
+ warped_seg_np, affine=aff_target_seg, header=head_target_seg)
283
+ nii = nib.Nifti1Image(
284
+ warped_img_np, affine=aff_target_img, header=head_target_img)
285
+ nii.to_filename(os.path.join(
286
+ output_path, id_moving_img + '_to_' + id_target_img + '.nii.gz'))
287
+ nii_seg.to_filename(os.path.join(
288
+ output_path, id_moving_img + '_to_' + id_target_img + '_seg.nii.gz'))
289
+ grid_spacing = 5
290
+ det = jacobian_determinant(reg_net_example_output.cpu().detach()[0])
291
+ visualize(target_img.cpu(),
292
+ id_target_img,
293
+ moving_img.cpu(),
294
+ id_moving_img,
295
+ example_warped_image[0, 0].cpu(),
296
+ reg_net_example_output.cpu().detach()[0],
297
+ det,
298
+ grid_spacing,
299
+ normalize_by='slice',
300
+ cmap='gray',
301
+ threshold=None,
302
+ linewidth=1,
303
+ color='darkblue',
304
+ downsampling=None,
305
+ threshold_det=0,
306
+ output=output_path
307
+ )
308
+ k += 1
309
+ del reg_net_example_output, img12, example_warped_image, example_warped_seg
310
+
311
+ if len(subvided_data_list['11']) != 0:
312
+ dataset11 = subvided_dataset['11']
313
+ k = 0
314
+ eval_losses_img = []
315
+ eval_losses_seg = []
316
+ eval_los = []
317
+ #test_len = int(len(dataset11) / 4)
318
+ for i in range(len(dataset11)):
319
+ data_item = dataset11[i]
320
+ img12 = data_item['img12'].unsqueeze(0).to(device)
321
+ gt_raw_seg = data_item['seg1'].unsqueeze(0).to(device)
322
+ moving_raw_seg = data_item['seg2'].unsqueeze(0).to(device)
323
+ moving_seg = monai.networks.one_hot(moving_raw_seg, num_label)
324
+ gt_seg = monai.networks.one_hot(gt_raw_seg, num_label)
325
+ id = ids['11'][k]
326
+ affine = affines['11'][k]
327
+ header = headers['11'][k]
328
+ with torch.no_grad():
329
+ reg_net_example_output = reg_net(img12)
330
+
331
+ example_warped_image = warp(
332
+ img12[:, [1], :, :, :], # moving image
333
+ reg_net_example_output # warping
334
+ )
335
+ example_warped_seg = warp_nearest(
336
+ moving_seg,
337
+ reg_net_example_output
338
+ )
339
+ moving_img = img12[0, 1, :, :, :]
340
+ target_img = img12[0, 0, :, :, :]
341
+ id_target_img = id['img1']
342
+ id_moving_img = id['img2']
343
+ head_target_img = header['img1']
344
+ head_target_seg = header['seg1']
345
+ aff_target_img = affine['img1']
346
+ aff_target_seg = affine['seg1']
347
+ dice_metric = monai.metrics.DiceMetric(include_background=False, reduction='none')
348
+ prediction = torch.argmax(torch.softmax(
349
+ example_warped_seg, dim=1), dim=1, keepdim=True)[0, 0]
350
+ prediction1 = torch.argmax(torch.softmax(
351
+ example_warped_seg, dim=1), dim=1, keepdim=True)
352
+ onehot_pred = monai.networks.one_hot(prediction1, num_label)
353
+ dsc = dice_metric(onehot_pred, gt_seg).detach().cpu().numpy()
354
+ eval_los.append(dsc)
355
+ eval_loss_seg = f"Scan {id_moving_img} to {id_target_img}, dice score: {dsc}"
356
+ eval_losses_seg.append(eval_loss_seg)
357
+ warped_img_np = example_warped_image[0, 0].detach().cpu().numpy()
358
+ #warped_img_np = np.transpose(warped_img_np, (2, 1, 0))
359
+ warped_seg_np = prediction.detach().cpu().numpy()
360
+ #warped_seg_np = np.transpose(warped_seg_np, (2, 1, 0))
361
+ nii_seg = nib.Nifti1Image(
362
+ warped_seg_np, affine=aff_target_seg, header=head_target_seg)
363
+ nii = nib.Nifti1Image(
364
+ warped_img_np, affine=aff_target_img, header=head_target_img)
365
+ nii.to_filename(os.path.join(
366
+ output_path, id_moving_img + '_to_' + id_target_img + '.nii.gz'))
367
+ nii_seg.to_filename(os.path.join(
368
+ output_path, id_moving_img + '_to_' + id_target_img + '_seg.nii.gz'))
369
+ grid_spacing = 5
370
+ det = jacobian_determinant(reg_net_example_output.cpu().detach()[0])
371
+ visualize(target_img.cpu(),
372
+ id_target_img,
373
+ moving_img.cpu(),
374
+ id_moving_img,
375
+ example_warped_image[0, 0].cpu(),
376
+ reg_net_example_output.cpu().detach()[0],
377
+ det,
378
+ grid_spacing,
379
+ normalize_by='slice',
380
+ cmap='gray',
381
+ threshold=None,
382
+ linewidth=1,
383
+ color='darkblue',
384
+ downsampling=None,
385
+ threshold_det=0,
386
+ output=output_path
387
+ )
388
+ loss = lncc_loss(example_warped_image, img12[:, [0], :, :, :]).item()
389
+ eval_loss_img = f"Warped {id_moving_img} to {id_target_img}, similarity loss: {loss}, number of folds: {(det<=0).sum()}"
390
+ eval_losses_img.append(eval_loss_img)
391
+ k += 1
392
+ del reg_net_example_output, img12, example_warped_image, example_warped_seg
393
+
394
+ with open(os.path.join(output_path, "reg_img_losses.txt"), 'w') as f:
395
+ for s in eval_losses_img:
396
+ f.write(s + '\n')
397
+
398
+ average = np.mean(eval_los, 0)
399
+ with open(os.path.join(output_path, "reg_seg_dsc.txt"), 'w') as f:
400
+ for s in eval_losses_seg:
401
+ f.write(s + '\n')
402
+ f.write('\n\nAverage Dice Score: ' + str(average))
403
+ torch.cuda.empty_cache()
404
+
405
+ def visualize(target,
406
+ target_id,
407
+ moving,
408
+ moving_id,
409
+ warped,
410
+ vector_field,
411
+ det,
412
+ grid_spacing,
413
+ normalize_by='volume',
414
+ cmap=None,
415
+ threshold=None,
416
+ linewidth=1,
417
+ color='red',
418
+ downsampling=None,
419
+ threshold_det=None,
420
+ output=None
421
+ ):
422
+ if normalize_by == "slice":
423
+ vmin = None
424
+ vmax_moving = None
425
+ vmax_target = None
426
+ vmax_warped = None
427
+ vmax_det = None
428
+ elif normalize_by == "volume":
429
+ vmin = 0
430
+ vmax_moving = moving.max().item()
431
+ vmax_target = target.max().item()
432
+ vmax_warped = warped.max().item()
433
+ vmax_det = det.max().item()
434
+ else:
435
+ raise(ValueError(
436
+ f"Invalid value '{normalize_by}' given for normalize_by"))
437
+
438
+ # half-way slices
439
+ plt.figure(figsize=(24, 24))
440
+ x, y, z = np.array(moving.shape)//2
441
+ moving_imgs = (moving[x, :, :], moving[:, y, :], moving[:, :, z])
442
+ target_imgs = (target[x, :, :], target[:, y, :], target[:, :, z])
443
+ warped_imgs = (warped[x, :, :], warped[:, y, :], warped[:, :, z])
444
+ det_imgs = (det[x, :, :], det[:, y, :], det[:, :, z])
445
+ for i in range(3):
446
+ im = moving_imgs[i]
447
+ plt.subplot(6, 3, i+1)
448
+ plt.axis('off')
449
+ plt.title(f'moving image: {moving_id}')
450
+ plt.imshow(im, origin='lower', vmin=vmin, vmax=vmax_moving, cmap=cmap)
451
+ # threshold will be useful when displaying jacobian determinant images;
452
+ # we will want to clearly see where the jacobian determinant is negative
453
+ if threshold is not None:
454
+ red = np.zeros(im.shape+(4,)) # RGBA array
455
+ red[im <= threshold] = [1, 0, 0, 1]
456
+ plt.imshow(red, origin='lower')
457
+
458
+ for k in range(3):
459
+ j = k + 4
460
+ im = target_imgs[k]
461
+ plt.subplot(6, 3, j)
462
+ plt.axis('off')
463
+ plt.title(f'target image: {target_id}')
464
+ plt.imshow(im, origin='lower', vmin=vmin, vmax=vmax_target, cmap=cmap)
465
+ # threshold will be useful when displaying jacobian determinant images;
466
+ # we will want to clearly see where the jacobian determinant is negative
467
+ if threshold is not None:
468
+ red = np.zeros(im.shape+(4,)) # RGBA array
469
+ red[im <= threshold] = [1, 0, 0, 1]
470
+ plt.imshow(red, origin='lower')
471
+
472
+ for m in range(3):
473
+ j = 7 + m
474
+ im = warped_imgs[m]
475
+ plt.subplot(6, 3, j)
476
+ plt.axis('off')
477
+ plt.title(f'warped image: {moving_id} to {target_id}')
478
+ plt.imshow(im, origin='lower', vmin=vmin, vmax=vmax_warped, cmap=cmap)
479
+ # threshold will be useful when displaying jacobian determinant images;
480
+ # we will want to clearly see where the jacobian determinant is negative
481
+ if threshold is not None:
482
+ red = np.zeros(im.shape+(4,)) # RGBA array
483
+ red[im <= threshold] = [1, 0, 0, 1]
484
+ plt.imshow(red, origin='lower')
485
+
486
+ if downsampling is None:
487
+ # guess a reasonable downsampling value to make a nice plot
488
+ downsampling = max(1, int(max(vector_field.shape[1:])) >> 5)
489
+
490
+ x, y, z = np.array(vector_field.shape[1:])//2 # half-way slices
491
+ plt.subplot(6, 3, 10)
492
+ plt.axis('off')
493
+ plt.title(f'deformation vector field: {moving_id} to {target_id}')
494
+ plot_2D_vector_field(vector_field[[1, 2], x, :, :], downsampling)
495
+ plt.subplot(6, 3, 11)
496
+ plt.axis('off')
497
+ plt.title(f'deformation vector field: {moving_id} to {target_id}')
498
+ plot_2D_vector_field(vector_field[[0, 2], :, y, :], downsampling)
499
+ plt.subplot(6, 3, 12)
500
+ plt.axis('off')
501
+ plt.title(f'deformation vector field: {moving_id} to {target_id}')
502
+ plot_2D_vector_field(vector_field[[0, 1], :, :, z], downsampling)
503
+
504
+ x, y, z = np.array(vector_field.shape[1:])//2 # half-way slices
505
+ plt.subplot(6, 3, 13)
506
+ plt.axis('off')
507
+ plt.title(f'deformation vector field on grid: {moving_id} to {target_id}')
508
+ plot_2D_deformation(
509
+ vector_field[[1, 2], x, :, :], grid_spacing, linewidth=linewidth, color=color)
510
+ plt.subplot(6, 3, 14)
511
+ plt.axis('off')
512
+ plt.title(f'deformation vector field on grid: {moving_id} to {target_id}')
513
+ plot_2D_deformation(
514
+ vector_field[[0, 2], :, y, :], grid_spacing, linewidth=linewidth, color=color)
515
+ plt.subplot(6, 3, 15)
516
+ plt.axis('off')
517
+ plt.title(f'deformation vector field on grid: {moving_id} to {target_id}')
518
+ plot_2D_deformation(
519
+ vector_field[[0, 1], :, :, z], grid_spacing, linewidth=linewidth, color=color)
520
+
521
+ for n in range(3):
522
+ o = n + 16
523
+ im = det_imgs[n]
524
+ plt.subplot(6, 3, o)
525
+ plt.axis('off')
526
+ plt.title(f'jacobian determinant: {moving_id} to {target_id}')
527
+ plt.imshow(im, origin='lower', vmin=vmin, vmax=vmax_det, cmap=None)
528
+ # threshold will be useful when displaying jacobian determinant images;
529
+ # we will want to clearly see where the jacobian determinant is negative
530
+ if threshold_det is not None:
531
+ red = np.zeros(im.shape+(4,)) # RGBA array
532
+ red[im <= threshold_det] = [1, 0, 0, 1]
533
+ plt.imshow(red, origin='lower')
534
+
535
+ plt.savefig(os.path.join(
536
+ output, f'reg_net_infer_{moving_id}_to_{target_id}.png'))
deepatlas/train/generators.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import monai
2
+ import torch
3
+ import itk
4
+ import numpy as np
5
+ import matplotlib.pyplot as plt
6
+ import random
7
+ import glob
8
+ import os.path
9
+ import argparse
10
+ import sys
11
+
12
+
13
+ def create_batch_generator(dataloader_subdivided, weights=None):
14
+ """
15
+ Create a batch generator that samples data pairs with various segmentation availabilities.
16
+
17
+ Arguments:
18
+ dataloader_subdivided : a mapping from the labels in seg_availabilities to dataloaders
19
+ weights : a list of probabilities, one for each label in seg_availabilities;
20
+ if not provided then we weight by the number of data items of each type,
21
+ effectively sampling uniformly over the union of the datasets
22
+
23
+ Returns: batch_generator
24
+ A function that accepts a number of batches to sample and that returns a generator.
25
+ The generator will weighted-randomly pick one of the seg_availabilities and
26
+ yield the next batch from the corresponding dataloader.
27
+ """
28
+ seg_availabilities = ['00', '01', '10', '11']
29
+ if weights is None:
30
+ weights = np.array([len(dataloader_subdivided[s]) for s in seg_availabilities])
31
+ weights = np.array(weights)
32
+ weights = weights / weights.sum()
33
+ dataloader_subdivided_as_iterators = {s: iter(d) for s, d in dataloader_subdivided.items()}
34
+
35
+ def batch_generator(num_batches_to_sample):
36
+ for _ in range(num_batches_to_sample):
37
+ seg_availability = np.random.choice(seg_availabilities, p=weights)
38
+ try:
39
+ yield next(dataloader_subdivided_as_iterators[seg_availability])
40
+ except StopIteration: # If dataloader runs out, restart it
41
+ dataloader_subdivided_as_iterators[seg_availability] =\
42
+ iter(dataloader_subdivided[seg_availability])
43
+ yield next(dataloader_subdivided_as_iterators[seg_availability])
44
+ return batch_generator
45
+
deepatlas/train/train.py ADDED
@@ -0,0 +1,444 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import generators
2
+ import monai
3
+ import torch
4
+ import numpy as np
5
+ import matplotlib.pyplot as plt
6
+ import matplotlib
7
+ import os
8
+ import sys
9
+ from pathlib import Path
10
+ import pickle
11
+
12
+ ROOT_DIR = str(Path(os.getcwd()).parent.parent.absolute())
13
+ sys.path.insert(0, os.path.join(ROOT_DIR, 'deepatlas/utils'))
14
+ sys.path.insert(0, os.path.join(ROOT_DIR, 'deepatlas/loss_function'))
15
+ from utils import (
16
+ preview_image, preview_3D_vector_field, preview_3D_deformation,
17
+ jacobian_determinant, plot_progress, make_if_dont_exist, save_seg_checkpoint, save_reg_checkpoint, load_latest_checkpoint,
18
+ load_best_checkpoint, load_valid_checkpoint, plot_architecture
19
+ )
20
+ from losses import (
21
+ warp_func, warp_nearest_func, lncc_loss_func, dice_loss_func, reg_losses, dice_loss_func2
22
+ )
23
+
24
+
25
+ def swap_training(network_to_train, network_to_not_train):
26
+ """
27
+ Switch out of training one network and into training another
28
+ """
29
+
30
+ for param in network_to_not_train.parameters():
31
+ param.requires_grad = False
32
+
33
+ for param in network_to_train.parameters():
34
+ param.requires_grad = True
35
+
36
+ network_to_not_train.eval()
37
+ network_to_train.train()
38
+
39
+ def train_network(dataloader_train_reg,
40
+ dataloader_valid_reg,
41
+ dataloader_train_seg,
42
+ dataloader_valid_seg,
43
+ device,
44
+ seg_net,
45
+ reg_net,
46
+ num_segmentation_classes,
47
+ lr_reg,
48
+ lr_seg,
49
+ lam_a,
50
+ lam_sp,
51
+ lam_re,
52
+ max_epoch,
53
+ val_step,
54
+ result_seg_path,
55
+ result_reg_path,
56
+ logger,
57
+ img_shape,
58
+ plot_network=False,
59
+ continue_training=False
60
+ ):
61
+ # Training cell
62
+
63
+ make_if_dont_exist(os.path.join(result_seg_path, 'training_plot'))
64
+ make_if_dont_exist(os.path.join(result_reg_path, 'training_plot'))
65
+ make_if_dont_exist(os.path.join(result_seg_path, 'model'))
66
+ make_if_dont_exist(os.path.join(result_reg_path, 'model'))
67
+ make_if_dont_exist(os.path.join(result_seg_path, 'checkpoints'))
68
+ make_if_dont_exist(os.path.join(result_reg_path, 'checkpoints'))
69
+
70
+ ROOT_DIR = str(Path(result_reg_path).parent.absolute())
71
+ seg_availabilities = ['00', '01', '10', '11']
72
+ batch_generator_train_reg = generators.create_batch_generator(
73
+ dataloader_train_reg)
74
+ batch_generator_valid_reg = generators.create_batch_generator(
75
+ dataloader_valid_reg)
76
+ seg_train_sampling_weights = [
77
+ 0] + [len(dataloader_train_reg[s]) for s in seg_availabilities[1:]]
78
+ print('----------'*10)
79
+ print(f"""When training seg_net alone, segmentation availabilities {seg_availabilities}
80
+ will be sampled with respective weights {seg_train_sampling_weights}""")
81
+ batch_generator_train_seg = generators.create_batch_generator(
82
+ dataloader_train_reg, seg_train_sampling_weights)
83
+ seg_net = seg_net.to(device)
84
+ reg_net = reg_net.to(device)
85
+
86
+ learning_rate_reg = lr_reg
87
+ optimizer_reg = torch.optim.Adam(reg_net.parameters(), learning_rate_reg)
88
+ scheduler_reg = torch.optim.lr_scheduler.StepLR(optimizer_reg, step_size=70, gamma=0.2, verbose=True)
89
+ learning_rate_seg = lr_seg
90
+ optimizer_seg = torch.optim.Adam(seg_net.parameters(), learning_rate_seg)
91
+ scheduler_seg = torch.optim.lr_scheduler.StepLR(optimizer_seg, step_size=50, gamma=0.2, verbose=True)
92
+ last_epoch = 0
93
+
94
+ training_losses_reg = []
95
+ validation_losses_reg = []
96
+ training_losses_seg = []
97
+ validation_losses_seg = []
98
+ regularization_loss_reg = []
99
+ anatomy_loss_reg = []
100
+ similarity_loss_reg = []
101
+ supervised_loss_seg = []
102
+ anatomy_loss_seg = []
103
+ best_seg_validation_loss = float('inf')
104
+ best_reg_validation_loss = float('inf')
105
+
106
+ last_epoch_valid = 0
107
+ if continue_training:
108
+ if os.path.exists(os.path.join(result_seg_path, 'checkpoints', 'valid_checkpoint.pth')) and os.path.exists(os.path.join(result_reg_path, 'checkpoints', 'valid_checkpoint.pth')):
109
+ if os.path.exists(os.path.join(result_seg_path, 'checkpoints', 'best_checkpoint.pth')) and os.path.exists(os.path.join(result_reg_path, 'checkpoints', 'best_checkpoint.pth')):
110
+ best_seg_validation_loss = load_best_checkpoint(os.path.join(result_reg_path, 'checkpoints'), device)
111
+ best_reg_validation_loss = load_best_checkpoint(os.path.join(result_seg_path, 'checkpoints'), device)
112
+
113
+ all_validation_losses_reg = load_valid_checkpoint(os.path.join(result_reg_path, 'checkpoints'), device)
114
+ all_validation_losses_seg = load_valid_checkpoint(os.path.join(result_seg_path, 'checkpoints'), device)
115
+ validation_losses_reg = all_validation_losses_reg['total_loss']
116
+ validation_losses_seg = all_validation_losses_seg['total_loss']
117
+ last_epoch_valid = np.minimum(len(validation_losses_reg), len(validation_losses_seg))
118
+ validation_losses_reg = validation_losses_reg[:last_epoch_valid]
119
+ validation_losses_seg = validation_losses_seg[:last_epoch_valid]
120
+ np_validation_losses_reg = np.array(validation_losses_reg)
121
+ np_validation_losses_seg = np.array(validation_losses_seg)
122
+ if best_reg_validation_loss not in np_validation_losses_reg[:, 1]:
123
+ best_reg_validation_loss = np.min(np_validation_losses_reg[:, 1])
124
+ if os.path.exists(os.path.join(result_reg_path, 'model', 'reg_net_last_best.pth')):
125
+ assert os.path.exists(os.path.join(result_reg_path, 'model', 'reg_net_best.pth'))
126
+ os.remove(os.path.join(result_reg_path, 'model', 'reg_net_best.pth'))
127
+ os.rename(os.path.join(result_reg_path, 'model', 'reg_net_last_best.pth'), os.path.join(result_reg_path, 'model', 'reg_net_best.pth'))
128
+ if best_seg_validation_loss not in np_validation_losses_seg[:, 1]:
129
+ best_seg_validation_loss = np.min(np_validation_losses_seg[:, 1])
130
+ if os.path.exists(os.path.join(result_seg_path, 'model', 'seg_net_last_best.pth')):
131
+ assert os.path.exists(os.path.join(result_seg_path, 'model', 'seg_net_best.pth'))
132
+ os.remove(os.path.join(result_seg_path, 'model', 'seg_net_best.pth'))
133
+ os.rename(os.path.join(result_seg_path, 'model', 'seg_net_last_best.pth'), os.path.join(result_seg_path, 'model', 'seg_net_best.pth'))
134
+ else:
135
+ if os.path.exists(os.path.join(result_seg_path, 'checkpoints', 'valid_checkpoint.pth')):
136
+ os.remove(os.path.join(result_seg_path, 'checkpoints', 'valid_checkpoint.pth'))
137
+ elif os.path.exists(os.path.join(result_reg_path, 'checkpoints', 'valid_checkpoint.pth')):
138
+ os.remove(os.path.join(result_reg_path, 'checkpoints', 'valid_checkpoint.pth'))
139
+
140
+ if last_epoch_valid != 0 and os.path.exists(os.path.join(result_seg_path, 'checkpoints', 'latest_checkpoint.pth')) and os.path.exists(os.path.join(result_reg_path, 'checkpoints', 'latest_checkpoint.pth')):
141
+ reg_net, optimizer_reg, all_training_losses_reg = load_latest_checkpoint(os.path.join(result_reg_path, 'checkpoints'), reg_net, optimizer_reg, device)
142
+ seg_net, optimizer_seg, all_training_losses_seg = load_latest_checkpoint(os.path.join(result_seg_path, 'checkpoints'), seg_net, optimizer_seg, device)
143
+ regularization_loss_reg = all_training_losses_reg['regular_loss']
144
+ anatomy_loss_reg = all_training_losses_reg['ana_loss']
145
+ similarity_loss_reg = all_training_losses_reg['sim_loss']
146
+ supervised_loss_seg = all_training_losses_seg['super_loss']
147
+ anatomy_loss_seg = all_training_losses_seg['ana_loss']
148
+ training_losses_reg = all_training_losses_reg['total_loss']
149
+ training_losses_seg = all_training_losses_seg['total_loss']
150
+ last_epoch_train = np.min(np.array([last_epoch_valid * val_step, len(training_losses_reg), len(training_losses_seg)]))
151
+ regularization_loss_reg = regularization_loss_reg[:last_epoch_train]
152
+ anatomy_loss_reg = anatomy_loss_reg[:last_epoch_train]
153
+ similarity_loss_reg = similarity_loss_reg[:last_epoch_train]
154
+ supervised_loss_seg = supervised_loss_seg[:last_epoch_train]
155
+ anatomy_loss_seg = anatomy_loss_seg[:last_epoch_train]
156
+ training_losses_reg = training_losses_reg[:last_epoch_train]
157
+ training_losses_seg = training_losses_seg[:last_epoch_train]
158
+
159
+ last_epoch = last_epoch_train
160
+ else:
161
+ if os.path.exists(os.path.join(result_seg_path, 'checkpoints', 'latest_checkpoint.pth')):
162
+ os.remove(os.path.join(result_seg_path, 'checkpoints', 'latest_checkpoint.pth'))
163
+ elif os.path.exists(os.path.join(result_reg_path, 'checkpoints', 'latest_checkpoint.pth')):
164
+ os.remove(os.path.join(result_reg_path, 'checkpoints', 'latest_checkpoint.pth'))
165
+
166
+ if len(dataloader_valid_reg) == 0:
167
+ validation_losses_reg = []
168
+
169
+ if len(dataloader_valid_seg) == 0:
170
+ validation_losses_seg = []
171
+
172
+ lambda_a = lam_a # anatomy loss weight
173
+ lambda_sp = lam_sp # supervised segmentation loss weight
174
+
175
+ # regularization loss weight
176
+ # monai has provided normalized bending energy loss
177
+ # no need to modify the weight according to the image size
178
+ lambda_r = lam_re
179
+
180
+ max_epochs = max_epoch
181
+ reg_phase_training_batches_per_epoch = 10
182
+ # Fewer batches needed, because seg_net converges more quickly
183
+ seg_phase_training_batches_per_epoch = 5
184
+ reg_phase_num_validation_batches_to_use = 10
185
+ val_interval = val_step
186
+ if plot_network:
187
+ plot_architecture(seg_net, img_shape, seg_phase_training_batches_per_epoch, 'SegNet', result_seg_path)
188
+ plot_architecture(reg_net, img_shape, reg_phase_training_batches_per_epoch, 'RegNet', result_reg_path)
189
+
190
+ logger.info('Start Training')
191
+
192
+ for epoch_number in range(last_epoch, max_epochs):
193
+
194
+ logger.info(f"Epoch {epoch_number+1}/{max_epochs}:")
195
+ # ------------------------------------------------
196
+ # reg_net training, with seg_net frozen
197
+ # ------------------------------------------------
198
+
199
+ # Keep computational graph in memory for reg_net, but not for seg_net, and do reg_net.train()
200
+ swap_training(reg_net, seg_net)
201
+
202
+ losses = []
203
+ regularization_loss = []
204
+ similarity_loss = []
205
+ anatomy_loss = []
206
+ for batch in batch_generator_train_reg(reg_phase_training_batches_per_epoch):
207
+ optimizer_reg.zero_grad()
208
+ loss_sim, loss_reg, loss_ana, df = reg_losses(
209
+ batch, device, reg_net, seg_net, num_segmentation_classes)
210
+ loss = loss_sim + lambda_r * loss_reg + lambda_a * loss_ana
211
+ loss.backward()
212
+ optimizer_reg.step()
213
+ losses.append(loss.item())
214
+ regularization_loss.append(loss_reg.item())
215
+ similarity_loss.append(loss_sim.item())
216
+ anatomy_loss.append(loss_ana.item())
217
+
218
+ #preview_3D_vector_field(df.cpu().detach()[0], ep=epoch_number, path=result_reg_path)
219
+
220
+ training_loss_reg = np.mean(losses)
221
+ regularization_loss_reg.append(
222
+ [epoch_number+1, np.mean(regularization_loss)])
223
+ similarity_loss_reg.append([epoch_number+1, np.mean(similarity_loss)])
224
+ anatomy_loss_reg.append([epoch_number+1, np.mean(anatomy_loss)])
225
+ logger.info(f"\treg training loss: {training_loss_reg}")
226
+ training_losses_reg.append([epoch_number+1, training_loss_reg])
227
+ logger.info("\tsave latest reg_net checkpoint")
228
+ save_reg_checkpoint(reg_net, optimizer_reg, epoch_number, training_loss_reg, sim_loss=similarity_loss_reg, regular_loss=regularization_loss_reg, ana_loss=anatomy_loss_reg, total_loss=training_losses_reg, save_dir=os.path.join(result_reg_path, 'checkpoints'), name='latest')
229
+ # validation process
230
+ if len(dataloader_valid_reg) == 0:
231
+ logger.info("\tno enough dataset for validation")
232
+ save_reg_checkpoint(reg_net, optimizer_reg, epoch_number, training_loss_reg, sim_loss=similarity_loss_reg, regular_loss=regularization_loss_reg, ana_loss=anatomy_loss_reg, total_loss=training_losses_reg, save_dir=os.path.join(result_reg_path, 'checkpoints'), name='best')
233
+ save_reg_checkpoint(reg_net, optimizer_reg, epoch_number, training_loss_reg, sim_loss=similarity_loss_reg, regular_loss=regularization_loss_reg, ana_loss=anatomy_loss_reg, total_loss=training_losses_reg, save_dir=os.path.join(result_reg_path, 'checkpoints'), name='valid')
234
+ if os.path.exists(os.path.join(result_reg_path, 'model', 'reg_net_best.pth')):
235
+ if os.path.exists(os.path.join(result_reg_path, 'model', 'reg_net_last_best.pth')):
236
+ os.remove(os.path.join(result_reg_path, 'model', 'reg_net_last_best.pth'))
237
+ os.rename(os.path.join(result_reg_path, 'model', 'reg_net_best.pth'), os.path.join(result_reg_path, 'model', 'reg_net_last_best.pth'))
238
+ torch.save(reg_net.state_dict(), os.path.join(result_reg_path, 'model', 'reg_net_best.pth'))
239
+ else:
240
+ if epoch_number % val_interval == 0:
241
+ reg_net.eval()
242
+ losses = []
243
+ with torch.no_grad():
244
+ for batch in batch_generator_valid_reg(reg_phase_num_validation_batches_to_use):
245
+ loss_sim, loss_reg, loss_ana, dv = reg_losses(
246
+ batch, device, reg_net, seg_net, num_segmentation_classes)
247
+ loss = loss_sim + lambda_r * loss_reg + lambda_a * loss_ana
248
+ losses.append(loss.item())
249
+
250
+ validation_loss_reg = np.mean(losses)
251
+ logger.info(f"\treg validation loss: {validation_loss_reg}")
252
+ validation_losses_reg.append([epoch_number+1, validation_loss_reg])
253
+
254
+ if validation_loss_reg < best_reg_validation_loss:
255
+ best_reg_validation_loss = validation_loss_reg
256
+ logger.info("\tsave best reg_net checkpoint and model")
257
+ save_reg_checkpoint(reg_net, optimizer_reg, epoch_number, best_reg_validation_loss, total_loss=validation_losses_reg, save_dir=os.path.join(result_reg_path, 'checkpoints'), name='best')
258
+ if os.path.exists(os.path.join(result_reg_path, 'model', 'reg_net_best.pth')):
259
+ if os.path.exists(os.path.join(result_reg_path, 'model', 'reg_net_last_best.pth')):
260
+ os.remove(os.path.join(result_reg_path, 'model', 'reg_net_last_best.pth'))
261
+ os.rename(os.path.join(result_reg_path, 'model', 'reg_net_best.pth'), os.path.join(result_reg_path, 'model', 'reg_net_last_best.pth'))
262
+ torch.save(reg_net.state_dict(), os.path.join(result_reg_path, 'model', 'reg_net_best.pth'))
263
+ save_reg_checkpoint(reg_net, optimizer_reg, epoch_number, validation_loss_reg, total_loss=validation_losses_reg, save_dir=os.path.join(result_reg_path, 'checkpoints'), name='valid')
264
+
265
+ plot_progress(logger, os.path.join(result_reg_path, 'training_plot'), training_losses_reg, validation_losses_reg, 'reg_net_training_loss')
266
+ plot_progress(logger, os.path.join(result_reg_path, 'training_plot'), regularization_loss_reg, [], 'regularization_reg_net_loss')
267
+ plot_progress(logger, os.path.join(result_reg_path, 'training_plot'), anatomy_loss_reg, [], 'anatomy_reg_net_loss')
268
+ plot_progress(logger, os.path.join(result_reg_path, 'training_plot'), similarity_loss_reg, [], 'similarity_reg_net_loss')
269
+ # scheduler_reg.step()
270
+ # Free up memory
271
+ del loss, loss_sim, loss_reg, loss_ana
272
+ torch.cuda.empty_cache()
273
+
274
+ # ------------------------------------------------
275
+ # seg_net training, with reg_net frozen
276
+ # ------------------------------------------------
277
+
278
+ # Keep computational graph in memory for seg_net, but not for reg_net, and do seg_net.train()
279
+ logger.info('\t'+'----'*10)
280
+ swap_training(seg_net, reg_net)
281
+ losses = []
282
+ supervised_loss = []
283
+ anatomy_loss = []
284
+ dice_loss = dice_loss_func()
285
+ warp = warp_func()
286
+ warp_nearest = warp_nearest_func()
287
+ dice_loss2 = dice_loss_func2()
288
+ for batch in batch_generator_train_seg(seg_phase_training_batches_per_epoch):
289
+ optimizer_seg.zero_grad()
290
+
291
+ img12 = batch['img12'].to(device)
292
+
293
+ displacement_fields = reg_net(img12)
294
+ seg1_predicted = seg_net(img12[:, [0], :, :, :]).softmax(dim=1)
295
+ seg2_predicted = seg_net(img12[:, [1], :, :, :]).softmax(dim=1)
296
+
297
+ # Below we compute the following:
298
+ # loss_supervised: supervised segmentation loss; compares ground truth seg with predicted seg
299
+ # loss_anatomy: anatomy loss; compares warped seg of moving image to seg of target image
300
+ # loss_metric: a single supervised seg loss, as a metric to track the progress of training
301
+
302
+ if 'seg1' in batch.keys() and 'seg2' in batch.keys():
303
+ seg1 = monai.networks.one_hot(
304
+ batch['seg1'].to(device), num_segmentation_classes)
305
+ seg2 = monai.networks.one_hot(
306
+ batch['seg2'].to(device), num_segmentation_classes)
307
+ loss_metric = dice_loss(seg2_predicted, seg2)
308
+ loss_supervised = loss_metric + dice_loss(seg1_predicted, seg1)
309
+ # The above supervised loss looks a bit different from the one in the paper
310
+ # in that it includes predictions for both images in the current image pair;
311
+ # we might as well do this, since we have gone to the trouble of loading
312
+ # both segmentations into memory.
313
+
314
+ elif 'seg1' in batch.keys(): # seg1 available, but no seg2
315
+ seg1 = monai.networks.one_hot(
316
+ batch['seg1'].to(device), num_segmentation_classes)
317
+ loss_metric = dice_loss(seg1_predicted, seg1)
318
+ loss_supervised = loss_metric
319
+ seg2 = seg2_predicted # Use this in anatomy loss
320
+
321
+ else: # seg2 available, but no seg1
322
+ assert('seg2' in batch.keys())
323
+ seg2 = monai.networks.one_hot(
324
+ batch['seg2'].to(device), num_segmentation_classes)
325
+ loss_metric = dice_loss(seg2_predicted, seg2)
326
+ loss_supervised = loss_metric
327
+ seg1 = seg1_predicted # Use this in anatomy loss
328
+
329
+ # seg1 and seg2 should now be in the form of one-hot class probabilities
330
+
331
+ loss_anatomy = dice_loss(warp_nearest(seg2, displacement_fields), seg1)\
332
+ if 'seg1' in batch.keys() or 'seg2' in batch.keys()\
333
+ else 0. # It wouldn't really be 0, but it would not contribute to training seg_net
334
+
335
+ # (If you want to refactor this code for *joint* training of reg_net and seg_net,
336
+ # then use the definition of anatomy loss given in the function anatomy_loss above,
337
+ # where differentiable warping is used and reg net can be trained with it.)
338
+
339
+ loss = lambda_a * loss_anatomy + lambda_sp * loss_supervised
340
+ loss.backward()
341
+ optimizer_seg.step()
342
+
343
+ losses.append(loss_metric.item())
344
+ supervised_loss.append(loss_supervised.item())
345
+ anatomy_loss.append(loss_anatomy.item())
346
+
347
+ training_loss_seg = np.mean(losses)
348
+ supervised_loss_seg.append([epoch_number+1, np.mean(supervised_loss)])
349
+ anatomy_loss_seg.append([epoch_number+1, np.mean(anatomy_loss)])
350
+ logger.info(f"\tseg training loss: {training_loss_seg}")
351
+ training_losses_seg.append([epoch_number+1, training_loss_seg])
352
+ logger.info("\tsave latest seg_net checkpoint")
353
+ save_seg_checkpoint(seg_net, optimizer_seg, epoch_number, training_loss_seg, super_loss=supervised_loss_seg,ana_loss=anatomy_loss_seg, total_loss=training_losses_seg, save_dir=os.path.join(result_seg_path, 'checkpoints'), name='latest')
354
+
355
+ if len(dataloader_valid_seg) == 0:
356
+ logger.info("\tno enough dataset for validation")
357
+ save_seg_checkpoint(seg_net, optimizer_seg, epoch_number, training_loss_seg, super_loss=supervised_loss_seg,ana_loss=anatomy_loss_seg, total_loss=training_losses_seg, save_dir=os.path.join(result_seg_path, 'checkpoints'), name='valid')
358
+ save_seg_checkpoint(seg_net, optimizer_seg, epoch_number, training_loss_seg, super_loss=supervised_loss_seg,ana_loss=anatomy_loss_seg, total_loss=training_losses_seg, save_dir=os.path.join(result_seg_path, 'checkpoints'), name='best')
359
+ if os.path.exists(os.path.join(result_seg_path, 'model', 'seg_net_best.pth')):
360
+ if os.path.exists(os.path.join(result_seg_path, 'model', 'seg_net_last_best.pth')):
361
+ os.remove(os.path.join(result_seg_path, 'model', 'seg_net_last_best.pth'))
362
+ os.rename(os.path.join(result_seg_path, 'model', 'seg_net_best.pth'), os.path.join(result_seg_path, 'model', 'seg_net_last_best.pth'))
363
+ torch.save(seg_net.state_dict(), os.path.join(result_seg_path, 'model', 'seg_net_best.pth'))
364
+ else:
365
+ if epoch_number % val_interval == 0:
366
+ # The following validation loop would not do anything in the case
367
+ # where there is just one segmentation available,
368
+ # because data_seg_available_valid would be empty.
369
+ seg_net.eval()
370
+ losses = []
371
+ with torch.no_grad():
372
+ for batch in dataloader_valid_seg:
373
+ imgs = batch['img'].to(device)
374
+ true_segs = batch['seg'].to(device)
375
+ predicted_segs = seg_net(imgs)
376
+ loss = dice_loss2(predicted_segs, true_segs)
377
+ losses.append(loss.item())
378
+
379
+ validation_loss_seg = np.mean(losses)
380
+ logger.info(f"\tseg validation loss: {validation_loss_seg}")
381
+ validation_losses_seg.append([epoch_number+1, validation_loss_seg])
382
+
383
+ if validation_loss_seg < best_seg_validation_loss:
384
+ best_seg_validation_loss = validation_loss_seg
385
+ logger.info("\tsave best seg_net checkpoint and model")
386
+ save_seg_checkpoint(seg_net, optimizer_seg, epoch_number, best_seg_validation_loss, total_loss=validation_losses_seg, save_dir=os.path.join(result_seg_path, 'checkpoints'), name='best')
387
+ if os.path.exists(os.path.join(result_seg_path, 'model', 'seg_net_best.pth')):
388
+ if os.path.exists(os.path.join(result_seg_path, 'model', 'seg_net_last_best.pth')):
389
+ os.remove(os.path.join(result_seg_path, 'model', 'seg_net_last_best.pth'))
390
+ os.rename(os.path.join(result_seg_path, 'model', 'seg_net_best.pth'), os.path.join(result_seg_path, 'model', 'seg_net_last_best.pth'))
391
+ torch.save(seg_net.state_dict(), os.path.join(result_seg_path, 'model', 'seg_net_best.pth'))
392
+ save_seg_checkpoint(seg_net, optimizer_seg, epoch_number, validation_loss_seg, total_loss=validation_losses_seg, save_dir=os.path.join(result_seg_path, 'checkpoints'), name='valid')
393
+
394
+ plot_progress(logger, os.path.join(result_seg_path, 'training_plot'), training_losses_seg, validation_losses_seg, 'seg_net_training_loss')
395
+ plot_progress(logger, os.path.join(result_seg_path, 'training_plot'), anatomy_loss_seg, [], 'anatomy_seg_net_loss')
396
+ plot_progress(logger, os.path.join(result_seg_path, 'training_plot'), supervised_loss_seg, [], 'supervised_seg_net_loss')
397
+ logger.info(f"\tseg lr: {optimizer_seg.param_groups[0]['lr']}")
398
+ logger.info(f"\treg lr: {optimizer_reg.param_groups[0]['lr']}")
399
+ # scheduler_seg.step()
400
+ # Free up memory
401
+ del (loss, seg1, seg2, displacement_fields, img12, loss_supervised, loss_anatomy, loss_metric,\
402
+ seg1_predicted, seg2_predicted)
403
+ torch.cuda.empty_cache()
404
+
405
+ if len(validation_losses_reg) == 0:
406
+ logger.info('Only small number of pairs are used for training, no need to do validation. Replace best validation loss with training loss !!!')
407
+ logger.info(f'Best reg_net validation loss: {training_loss_reg}')
408
+ else:
409
+ logger.info(f"Best reg_net validation loss: {best_reg_validation_loss}")
410
+
411
+ if len(validation_losses_seg) == 0:
412
+ logger.info('Only one label is used for training, no need to do validation. Replace best validation loss with training loss !!!')
413
+ logger.info(f'Best seg_net validation loss: {training_loss_seg}')
414
+ else:
415
+ logger.info(f"Best seg_net validation loss: {best_seg_validation_loss}")
416
+
417
+ # save reg training losses
418
+ reg_training_pkl = [{'training_losses': training_losses_reg},
419
+ {'anatomy_loss': anatomy_loss_reg},
420
+ {'similarity_loss': similarity_loss_reg},
421
+ {'regularization_loss': regularization_loss_reg}
422
+ ]
423
+ if len(validation_losses_reg) != 0:
424
+ reg_training_pkl.append({'validation_losses': validation_losses_reg})
425
+ reg_training_pkl.append({'best_reg_validation_loss': best_reg_validation_loss})
426
+ else:
427
+ reg_training_pkl.append({'best_reg_validation_loss': training_loss_reg})
428
+
429
+ # save seg training losses
430
+ seg_training_pkl = [{'training_losses': training_losses_seg},
431
+ {'anatomy_loss': anatomy_loss_seg},
432
+ {'supervised_loss': supervised_loss_seg}
433
+ ]
434
+ if len(validation_losses_seg) != 0:
435
+ seg_training_pkl.append({'validation_losses': validation_losses_seg})
436
+ seg_training_pkl.append({'best_seg_validation_loss': best_seg_validation_loss})
437
+ else:
438
+ seg_training_pkl.append({'best_seg_validation_loss': training_loss_seg})
439
+
440
+ with open(os.path.join(result_reg_path, 'training_plot', 'reg_training_losses.pkl'), 'wb') as f:
441
+ pickle.dump(reg_training_pkl, f)
442
+
443
+ with open(os.path.join(result_seg_path, 'training_plot', 'seg_training_losses.pkl'), 'wb') as ff:
444
+ pickle.dump(seg_training_pkl, ff)
deepatlas/utils/utils.py ADDED
@@ -0,0 +1,294 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import matplotlib.pyplot as plt
3
+ import monai
4
+ import torch
5
+ import os
6
+ import json
7
+ import matplotlib
8
+ import shutil
9
+ from torchview import draw_graph
10
+
11
+ def plot_architecture(network, img_shape, batch_size, name, save_dir):
12
+ if name == 'SegNet':
13
+ num_channels = 1
14
+ else:
15
+ num_channels = 2
16
+
17
+ H, D, W = img_shape
18
+ model_graph = draw_graph(network,
19
+ input_size=(batch_size, num_channels, H, D, W),
20
+ device='meta',
21
+ roll=True,
22
+ expand_nested=True,
23
+ save_graph=True,
24
+ filename=f"{name}_Graph",
25
+ directory=save_dir)
26
+
27
+ def make_if_dont_exist(folder_path, overwrite=False):
28
+ if os.path.exists(folder_path):
29
+ if not overwrite:
30
+ print(f'{folder_path} exists.')
31
+ else:
32
+ print(f"{folder_path} overwritten")
33
+ shutil.rmtree(folder_path, ignore_errors = True)
34
+ os.makedirs(folder_path)
35
+ else:
36
+ os.makedirs(folder_path)
37
+ print(f"{folder_path} created!")
38
+
39
+ def preview_image(image_array, normalize_by="volume", cmap=None, figsize=(12, 12), threshold=None):
40
+ """
41
+ Display three orthogonal slices of the given 3D image.
42
+
43
+ image_array is assumed to be of shape (H,W,D)
44
+
45
+ If a number is provided for threshold, then pixels for which the value
46
+ is below the threshold will be shown in red
47
+ """
48
+ plt.figure()
49
+ if normalize_by == "slice":
50
+ vmin = None
51
+ vmax = None
52
+ elif normalize_by == "volume":
53
+ vmin = 0
54
+ vmax = image_array.max().item()
55
+ else:
56
+ raise(ValueError(
57
+ f"Invalid value '{normalize_by}' given for normalize_by"))
58
+
59
+ # half-way slices
60
+ x, y, z = np.array(image_array.shape)//2
61
+ imgs = (image_array[x, :, :], image_array[:, y, :], image_array[:, :, z])
62
+
63
+ fig, axs = plt.subplots(1, 3, figsize=figsize)
64
+ for ax, im in zip(axs, imgs):
65
+ ax.axis('off')
66
+ ax.imshow(im, origin='lower', vmin=vmin, vmax=vmax, cmap=cmap)
67
+
68
+ # threshold will be useful when displaying jacobian determinant images;
69
+ # we will want to clearly see where the jacobian determinant is negative
70
+ if threshold is not None:
71
+ red = np.zeros(im.shape+(4,)) # RGBA array
72
+ red[im <= threshold] = [1, 0, 0, 1]
73
+ ax.imshow(red, origin='lower')
74
+
75
+ plt.savefig('test.png')
76
+
77
+
78
+ def plot_2D_vector_field(vector_field, downsampling):
79
+ """Plot a 2D vector field given as a tensor of shape (2,H,W).
80
+
81
+ The plot origin will be in the lower left.
82
+ Using "x" and "y" for the rightward and upward directions respectively,
83
+ the vector at location (x,y) in the plot image will have
84
+ vector_field[1,y,x] as its x-component and
85
+ vector_field[0,y,x] as its y-component.
86
+ """
87
+ downsample2D = monai.networks.layers.factories.Pool['AVG', 2](
88
+ kernel_size=downsampling)
89
+ vf_downsampled = downsample2D(vector_field.unsqueeze(0))[0]
90
+ plt.quiver(
91
+ vf_downsampled[1, :, :], vf_downsampled[0, :, :],
92
+ angles='xy', scale_units='xy', scale=downsampling,
93
+ headwidth=4.
94
+ )
95
+
96
+
97
+ def preview_3D_vector_field(vector_field, downsampling=None, ep=None, path=None):
98
+ """
99
+ Display three orthogonal slices of the given 3D vector field.
100
+
101
+ vector_field should be a tensor of shape (3,H,W,D)
102
+
103
+ Vectors are projected into the viewing plane, so you are only seeing
104
+ their components in the viewing plane.
105
+ """
106
+
107
+ if downsampling is None:
108
+ # guess a reasonable downsampling value to make a nice plot
109
+ downsampling = max(1, int(max(vector_field.shape[1:])) >> 5)
110
+
111
+ x, y, z = np.array(vector_field.shape[1:])//2 # half-way slices
112
+ plt.figure(figsize=(18, 6))
113
+ plt.subplot(1, 3, 1)
114
+ plt.axis('off')
115
+ plot_2D_vector_field(vector_field[[1, 2], x, :, :], downsampling)
116
+ plt.subplot(1, 3, 2)
117
+ plt.axis('off')
118
+ plot_2D_vector_field(vector_field[[0, 2], :, y, :], downsampling)
119
+ plt.subplot(1, 3, 3)
120
+ plt.axis('off')
121
+ plot_2D_vector_field(vector_field[[0, 1], :, :, z], downsampling)
122
+ plt.savefig(os.path.join(path, f'df_{ep}.png'))
123
+
124
+
125
+ def plot_2D_deformation(vector_field, grid_spacing, **kwargs):
126
+ """
127
+ Interpret vector_field as a displacement vector field defining a deformation,
128
+ and plot an x-y grid warped by this deformation.
129
+
130
+ vector_field should be a tensor of shape (2,H,W)
131
+ """
132
+ _, H, W = vector_field.shape
133
+ grid_img = np.zeros((H, W))
134
+ grid_img[np.arange(0, H, grid_spacing), :] = 1
135
+ grid_img[:, np.arange(0, W, grid_spacing)] = 1
136
+ grid_img = torch.tensor(grid_img, dtype=vector_field.dtype).unsqueeze(
137
+ 0) # adds channel dimension, now (C,H,W)
138
+ warp = monai.networks.blocks.Warp(mode="bilinear", padding_mode="zeros")
139
+ grid_img_warped = warp(grid_img.unsqueeze(0), vector_field.unsqueeze(0))[0]
140
+ plt.imshow(grid_img_warped[0], origin='lower', cmap='gist_gray')
141
+
142
+
143
+ def preview_3D_deformation(vector_field, grid_spacing, **kwargs):
144
+ """
145
+ Interpret vector_field as a displacement vector field defining a deformation,
146
+ and plot warped grids along three orthogonal slices.
147
+
148
+ vector_field should be a tensor of shape (3,H,W,D)
149
+ kwargs are passed to matplotlib plotting
150
+
151
+ Deformations are projected into the viewing plane, so you are only seeing
152
+ their components in the viewing plane.
153
+ """
154
+ x, y, z = np.array(vector_field.shape[1:])//2 # half-way slices
155
+ plt.figure(figsize=(18, 6))
156
+ plt.subplot(1, 3, 1)
157
+ plt.axis('off')
158
+ plot_2D_deformation(vector_field[[1, 2], x, :, :], grid_spacing, **kwargs)
159
+ plt.subplot(1, 3, 2)
160
+ plt.axis('off')
161
+ plot_2D_deformation(vector_field[[0, 2], :, y, :], grid_spacing, **kwargs)
162
+ plt.subplot(1, 3, 3)
163
+ plt.axis('off')
164
+ plot_2D_deformation(vector_field[[0, 1], :, :, z], grid_spacing, **kwargs)
165
+ plt.show()
166
+
167
+
168
+ def jacobian_determinant(vf):
169
+ """
170
+ Given a displacement vector field vf, compute the jacobian determinant scalar field.
171
+
172
+ vf is assumed to be a vector field of shape (3,H,W,D),
173
+ and it is interpreted as the displacement field.
174
+ So it is defining a discretely sampled map from a subset of 3-space into 3-space,
175
+ namely the map that sends point (x,y,z) to the point (x,y,z)+vf[:,x,y,z].
176
+ This function computes a jacobian determinant by taking discrete differences in each spatial direction.
177
+
178
+ Returns a numpy array of shape (H-1,W-1,D-1).
179
+ """
180
+
181
+ _, H, W, D = vf.shape
182
+
183
+ # Compute discrete spatial derivatives
184
+ def diff_and_trim(array, axis): return np.diff(
185
+ array, axis=axis)[:, :(H-1), :(W-1), :(D-1)]
186
+ dx = diff_and_trim(vf, 1)
187
+ dy = diff_and_trim(vf, 2)
188
+ dz = diff_and_trim(vf, 3)
189
+
190
+ # Add derivative of identity map
191
+ dx[0] += 1
192
+ dy[1] += 1
193
+ dz[2] += 1
194
+
195
+ # Compute determinant at each spatial location
196
+ det = dx[0]*(dy[1]*dz[2]-dz[1]*dy[2]) - dy[0]*(dx[1]*dz[2] -
197
+ dz[1]*dx[2]) + dz[0]*(dx[1]*dy[2]-dy[1]*dx[2])
198
+
199
+ return det
200
+
201
+ def load_json(json_path):
202
+ assert type(json_path) == str
203
+ fjson = open(json_path, 'r')
204
+ json_file = json.load(fjson)
205
+ return json_file
206
+
207
+ def plot_progress(logger, save_dir, train_loss, val_loss, name):
208
+ """
209
+ Should probably by improved
210
+ :return:
211
+ """
212
+ assert len(train_loss) != 0
213
+ train_loss = np.array(train_loss)
214
+ try:
215
+ font = {'weight': 'normal',
216
+ 'size': 18}
217
+
218
+ matplotlib.rc('font', **font)
219
+
220
+ fig = plt.figure(figsize=(30, 24))
221
+ ax = fig.add_subplot(111)
222
+ ax.plot(train_loss[:,0], train_loss[:,1], color='b', ls='-', label="loss_tr")
223
+ if len(val_loss) != 0:
224
+ val_loss = np.array(val_loss)
225
+ ax.plot(val_loss[:, 0], val_loss[:, 1], color='r', ls='-', label="loss_val")
226
+
227
+ ax.set_xlabel("epoch")
228
+ ax.set_ylabel("loss")
229
+ ax.legend()
230
+ ax.set_title(name)
231
+ fig.savefig(os.path.join(save_dir, name + ".png"))
232
+ plt.cla()
233
+ plt.close(fig)
234
+ except:
235
+ logger.info(f"failed to plot {name} training progress")
236
+
237
+ def save_reg_checkpoint(network, optimizer, epoch, best_loss, sim_loss=None, regular_loss=None, ana_loss=None, total_loss=None, save_dir=None, name=None):
238
+ all_loss = {
239
+ 'best_loss': best_loss,
240
+ 'total_loss': total_loss,
241
+ }
242
+ if sim_loss is not None:
243
+ all_loss['sim_loss'] = sim_loss
244
+ if regular_loss is not None:
245
+ all_loss['regular_loss'] = regular_loss
246
+ if ana_loss is not None:
247
+ all_loss['ana_loss'] = ana_loss
248
+
249
+ torch.save({
250
+ 'epoch': epoch,
251
+ 'network_state_dict': network.state_dict(),
252
+ 'optimizer_state_dict': optimizer.state_dict(),
253
+ 'all_loss': all_loss,
254
+ }, os.path.join(save_dir, name+'_checkpoint.pth'))
255
+
256
+
257
+ def save_seg_checkpoint(network, optimizer, epoch, best_loss, super_loss=None, ana_loss=None, total_loss=None, save_dir=None, name=None):
258
+ all_loss = {
259
+ 'best_loss': best_loss,
260
+ 'total_loss': total_loss,
261
+ }
262
+ if super_loss is not None:
263
+ all_loss['super_loss'] = super_loss
264
+ if ana_loss is not None:
265
+ all_loss['ana_loss'] = ana_loss
266
+
267
+ torch.save({
268
+ 'epoch': epoch,
269
+ 'network_state_dict': network.state_dict(),
270
+ 'optimizer_state_dict': optimizer.state_dict(),
271
+ 'all_loss': all_loss,
272
+ }, os.path.join(save_dir, name+'_checkpoint.pth'))
273
+
274
+
275
+ def load_latest_checkpoint(path, network, optimizer, device):
276
+ checkpoint_path = os.path.join(path, 'latest_checkpoint.pth')
277
+ checkpoint = torch.load(checkpoint_path, map_location=device)
278
+ network.load_state_dict(checkpoint['network_state_dict'])
279
+ optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
280
+ all_loss = checkpoint['all_loss']
281
+ return network, optimizer, all_loss
282
+
283
+ def load_valid_checkpoint(path, device):
284
+ checkpoint_path = os.path.join(path, 'valid_checkpoint.pth')
285
+ checkpoint = torch.load(checkpoint_path, map_location=device)
286
+ all_loss = checkpoint['all_loss']
287
+ return all_loss
288
+
289
+ def load_best_checkpoint(path, device):
290
+ checkpoint_path = os.path.join(path, 'best_checkpoint.pth')
291
+ checkpoint = torch.load(checkpoint_path, map_location=device)
292
+ best_loss = checkpoint['all_loss']['best_loss']
293
+ return best_loss
294
+
deepatlas_config/config_sample.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_name": "Task002_rnmw153_sim_Zsc",
3
+ "folder_name": "folder_name",
4
+ "info_name": "info",
5
+ "num_seg_used": 1,
6
+ "exp_set": 2,
7
+ "labels": {
8
+ "0": "background",
9
+ "1": "Spetum",
10
+ "2": "IT",
11
+ "3": "MS"
12
+ },
13
+ "network": {
14
+ "spatial_dim": 3,
15
+ "dropout": 0.2,
16
+ "activation_type": "leakyrelu",
17
+ "normalization_type": "batch",
18
+ "num_res": 0,
19
+ "anatomy_loss_weight": 15.0,
20
+ "supervised_segmentation_loss_weight": 3.0,
21
+ "regularization_loss_weight": 5e-6,
22
+ "registration_network_learning_rate": 1e-3,
23
+ "segmentation_network_learning_rate": 5e-4,
24
+ "number_epoch": 10,
25
+ "validation_step": 1
26
+ },
27
+ "num_fold": 2
28
+ }
deepatlas_results/Task002_rnmw153_sim_Zsc/dataset.json ADDED
@@ -0,0 +1,647 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Task002",
3
+ "description": "rnmw153",
4
+ "tensorImageSize": "4D",
5
+ "reference": "MODIFY",
6
+ "licence": "MODIFY",
7
+ "release": "0.0",
8
+ "modality": {
9
+ "0": "CT"
10
+ },
11
+ "labels": {
12
+ "0": "background",
13
+ "1": "Septum",
14
+ "2": "IT",
15
+ "3": "MS"
16
+ },
17
+ "network": {
18
+ "spatial_dim": 3,
19
+ "dropout": 0.2,
20
+ "activation_type": "leakyrelu",
21
+ "normalization_type": "batch",
22
+ "num_res": 0
23
+ },
24
+ "total_numScanTraining": 12,
25
+ "total_numLabelTraining": 10,
26
+ "total_numTest": 5,
27
+ "total_train": [
28
+ {
29
+ "img": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_190.nii.gz",
30
+ "seg": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_190.nii.gz"
31
+ },
32
+ {
33
+ "img": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_174.nii.gz",
34
+ "seg": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_174.nii.gz"
35
+ },
36
+ {
37
+ "img": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_140.nii.gz",
38
+ "seg": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_140.nii.gz"
39
+ },
40
+ {
41
+ "img": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_192.nii.gz",
42
+ "seg": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_192.nii.gz"
43
+ },
44
+ {
45
+ "img": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_194.nii.gz",
46
+ "seg": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_194.nii.gz"
47
+ },
48
+ {
49
+ "img": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_181.nii.gz"
50
+ },
51
+ {
52
+ "img": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_138.nii.gz",
53
+ "seg": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_138.nii.gz"
54
+ },
55
+ {
56
+ "img": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_183.nii.gz",
57
+ "seg": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_183.nii.gz"
58
+ },
59
+ {
60
+ "img": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_170.nii.gz",
61
+ "seg": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_170.nii.gz"
62
+ },
63
+ {
64
+ "img": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_171.nii.gz",
65
+ "seg": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_171.nii.gz"
66
+ },
67
+ {
68
+ "img": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_175.nii.gz"
69
+ },
70
+ {
71
+ "img": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_191.nii.gz",
72
+ "seg": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_191.nii.gz"
73
+ }
74
+ ],
75
+ "total_test": [
76
+ {
77
+ "img": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_141.nii.gz",
78
+ "seg": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_141.nii.gz"
79
+ },
80
+ {
81
+ "img": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_169.nii.gz",
82
+ "seg": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_169.nii.gz"
83
+ },
84
+ {
85
+ "img": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_135.nii.gz",
86
+ "seg": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_135.nii.gz"
87
+ },
88
+ {
89
+ "img": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_172.nii.gz",
90
+ "seg": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_172.nii.gz"
91
+ },
92
+ {
93
+ "img": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_146.nii.gz",
94
+ "seg": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_146.nii.gz"
95
+ }
96
+ ],
97
+ "seg_numTrain": 10,
98
+ "seg_train": [
99
+ {
100
+ "img": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_190.nii.gz",
101
+ "seg": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_190.nii.gz"
102
+ },
103
+ {
104
+ "img": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_174.nii.gz",
105
+ "seg": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_174.nii.gz"
106
+ },
107
+ {
108
+ "img": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_140.nii.gz",
109
+ "seg": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_140.nii.gz"
110
+ },
111
+ {
112
+ "img": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_192.nii.gz",
113
+ "seg": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_192.nii.gz"
114
+ },
115
+ {
116
+ "img": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_194.nii.gz",
117
+ "seg": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_194.nii.gz"
118
+ },
119
+ {
120
+ "img": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_138.nii.gz",
121
+ "seg": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_138.nii.gz"
122
+ },
123
+ {
124
+ "img": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_183.nii.gz",
125
+ "seg": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_183.nii.gz"
126
+ },
127
+ {
128
+ "img": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_170.nii.gz",
129
+ "seg": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_170.nii.gz"
130
+ },
131
+ {
132
+ "img": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_171.nii.gz",
133
+ "seg": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_171.nii.gz"
134
+ },
135
+ {
136
+ "img": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_191.nii.gz",
137
+ "seg": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_191.nii.gz"
138
+ }
139
+ ],
140
+ "reg_numTrain": 58,
141
+ "reg_train": [
142
+ {
143
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_181.nii.gz",
144
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_175.nii.gz"
145
+ },
146
+ {
147
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_181.nii.gz",
148
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_190.nii.gz",
149
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_190.nii.gz"
150
+ },
151
+ {
152
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_181.nii.gz",
153
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_174.nii.gz",
154
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_174.nii.gz"
155
+ },
156
+ {
157
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_181.nii.gz",
158
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_140.nii.gz",
159
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_140.nii.gz"
160
+ },
161
+ {
162
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_181.nii.gz",
163
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_192.nii.gz",
164
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_192.nii.gz"
165
+ },
166
+ {
167
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_181.nii.gz",
168
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_194.nii.gz",
169
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_194.nii.gz"
170
+ },
171
+ {
172
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_181.nii.gz",
173
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_138.nii.gz",
174
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_138.nii.gz"
175
+ },
176
+ {
177
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_181.nii.gz",
178
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_183.nii.gz",
179
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_183.nii.gz"
180
+ },
181
+ {
182
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_181.nii.gz",
183
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_170.nii.gz",
184
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_170.nii.gz"
185
+ },
186
+ {
187
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_175.nii.gz",
188
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_181.nii.gz"
189
+ },
190
+ {
191
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_175.nii.gz",
192
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_190.nii.gz",
193
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_190.nii.gz"
194
+ },
195
+ {
196
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_175.nii.gz",
197
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_174.nii.gz",
198
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_174.nii.gz"
199
+ },
200
+ {
201
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_175.nii.gz",
202
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_140.nii.gz",
203
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_140.nii.gz"
204
+ },
205
+ {
206
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_175.nii.gz",
207
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_192.nii.gz",
208
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_192.nii.gz"
209
+ },
210
+ {
211
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_175.nii.gz",
212
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_194.nii.gz",
213
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_194.nii.gz"
214
+ },
215
+ {
216
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_175.nii.gz",
217
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_138.nii.gz",
218
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_138.nii.gz"
219
+ },
220
+ {
221
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_175.nii.gz",
222
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_183.nii.gz",
223
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_183.nii.gz"
224
+ },
225
+ {
226
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_175.nii.gz",
227
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_170.nii.gz",
228
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_170.nii.gz"
229
+ },
230
+ {
231
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_190.nii.gz",
232
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_181.nii.gz",
233
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_190.nii.gz"
234
+ },
235
+ {
236
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_190.nii.gz",
237
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_175.nii.gz",
238
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_190.nii.gz"
239
+ },
240
+ {
241
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_190.nii.gz",
242
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_174.nii.gz",
243
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_190.nii.gz",
244
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_174.nii.gz"
245
+ },
246
+ {
247
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_190.nii.gz",
248
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_140.nii.gz",
249
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_190.nii.gz",
250
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_140.nii.gz"
251
+ },
252
+ {
253
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_190.nii.gz",
254
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_192.nii.gz",
255
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_190.nii.gz",
256
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_192.nii.gz"
257
+ },
258
+ {
259
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_190.nii.gz",
260
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_194.nii.gz",
261
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_190.nii.gz",
262
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_194.nii.gz"
263
+ },
264
+ {
265
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_190.nii.gz",
266
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_138.nii.gz",
267
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_190.nii.gz",
268
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_138.nii.gz"
269
+ },
270
+ {
271
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_190.nii.gz",
272
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_183.nii.gz",
273
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_190.nii.gz",
274
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_183.nii.gz"
275
+ },
276
+ {
277
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_190.nii.gz",
278
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_170.nii.gz",
279
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_190.nii.gz",
280
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_170.nii.gz"
281
+ },
282
+ {
283
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_174.nii.gz",
284
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_181.nii.gz",
285
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_174.nii.gz"
286
+ },
287
+ {
288
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_174.nii.gz",
289
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_175.nii.gz",
290
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_174.nii.gz"
291
+ },
292
+ {
293
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_174.nii.gz",
294
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_190.nii.gz",
295
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_174.nii.gz",
296
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_190.nii.gz"
297
+ },
298
+ {
299
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_174.nii.gz",
300
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_140.nii.gz",
301
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_174.nii.gz",
302
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_140.nii.gz"
303
+ },
304
+ {
305
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_174.nii.gz",
306
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_192.nii.gz",
307
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_174.nii.gz",
308
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_192.nii.gz"
309
+ },
310
+ {
311
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_174.nii.gz",
312
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_194.nii.gz",
313
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_174.nii.gz",
314
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_194.nii.gz"
315
+ },
316
+ {
317
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_174.nii.gz",
318
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_138.nii.gz",
319
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_174.nii.gz",
320
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_138.nii.gz"
321
+ },
322
+ {
323
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_174.nii.gz",
324
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_183.nii.gz",
325
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_174.nii.gz",
326
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_183.nii.gz"
327
+ },
328
+ {
329
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_174.nii.gz",
330
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_170.nii.gz",
331
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_174.nii.gz",
332
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_170.nii.gz"
333
+ },
334
+ {
335
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_140.nii.gz",
336
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_181.nii.gz",
337
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_140.nii.gz"
338
+ },
339
+ {
340
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_140.nii.gz",
341
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_175.nii.gz",
342
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_140.nii.gz"
343
+ },
344
+ {
345
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_140.nii.gz",
346
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_190.nii.gz",
347
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_140.nii.gz",
348
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_190.nii.gz"
349
+ },
350
+ {
351
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_140.nii.gz",
352
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_174.nii.gz",
353
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_140.nii.gz",
354
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_174.nii.gz"
355
+ },
356
+ {
357
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_140.nii.gz",
358
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_192.nii.gz",
359
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_140.nii.gz",
360
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_192.nii.gz"
361
+ },
362
+ {
363
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_140.nii.gz",
364
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_194.nii.gz",
365
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_140.nii.gz",
366
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_194.nii.gz"
367
+ },
368
+ {
369
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_140.nii.gz",
370
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_138.nii.gz",
371
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_140.nii.gz",
372
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_138.nii.gz"
373
+ },
374
+ {
375
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_140.nii.gz",
376
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_183.nii.gz",
377
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_140.nii.gz",
378
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_183.nii.gz"
379
+ },
380
+ {
381
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_140.nii.gz",
382
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_170.nii.gz",
383
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_140.nii.gz",
384
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_170.nii.gz"
385
+ },
386
+ {
387
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_192.nii.gz",
388
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_181.nii.gz",
389
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_192.nii.gz"
390
+ },
391
+ {
392
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_192.nii.gz",
393
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_175.nii.gz",
394
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_192.nii.gz"
395
+ },
396
+ {
397
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_192.nii.gz",
398
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_190.nii.gz",
399
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_192.nii.gz",
400
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_190.nii.gz"
401
+ },
402
+ {
403
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_192.nii.gz",
404
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_174.nii.gz",
405
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_192.nii.gz",
406
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_174.nii.gz"
407
+ },
408
+ {
409
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_192.nii.gz",
410
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_140.nii.gz",
411
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_192.nii.gz",
412
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_140.nii.gz"
413
+ },
414
+ {
415
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_192.nii.gz",
416
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_194.nii.gz",
417
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_192.nii.gz",
418
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_194.nii.gz"
419
+ },
420
+ {
421
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_192.nii.gz",
422
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_138.nii.gz",
423
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_192.nii.gz",
424
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_138.nii.gz"
425
+ },
426
+ {
427
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_192.nii.gz",
428
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_183.nii.gz",
429
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_192.nii.gz",
430
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_183.nii.gz"
431
+ },
432
+ {
433
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_192.nii.gz",
434
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_170.nii.gz",
435
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_192.nii.gz",
436
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_170.nii.gz"
437
+ },
438
+ {
439
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_194.nii.gz",
440
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_181.nii.gz",
441
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_194.nii.gz"
442
+ },
443
+ {
444
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_194.nii.gz",
445
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_175.nii.gz",
446
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_194.nii.gz"
447
+ },
448
+ {
449
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_194.nii.gz",
450
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_190.nii.gz",
451
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_194.nii.gz",
452
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_190.nii.gz"
453
+ },
454
+ {
455
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_194.nii.gz",
456
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_174.nii.gz",
457
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_194.nii.gz",
458
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_174.nii.gz"
459
+ },
460
+ {
461
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_194.nii.gz",
462
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_140.nii.gz",
463
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_194.nii.gz",
464
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_140.nii.gz"
465
+ },
466
+ {
467
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_194.nii.gz",
468
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_192.nii.gz",
469
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_194.nii.gz",
470
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_192.nii.gz"
471
+ },
472
+ {
473
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_194.nii.gz",
474
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_138.nii.gz",
475
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_194.nii.gz",
476
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_138.nii.gz"
477
+ },
478
+ {
479
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_194.nii.gz",
480
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_183.nii.gz",
481
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_194.nii.gz",
482
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_183.nii.gz"
483
+ },
484
+ {
485
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_194.nii.gz",
486
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_170.nii.gz",
487
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_194.nii.gz",
488
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_170.nii.gz"
489
+ },
490
+ {
491
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_138.nii.gz",
492
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_181.nii.gz",
493
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_138.nii.gz"
494
+ },
495
+ {
496
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_138.nii.gz",
497
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_175.nii.gz",
498
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_138.nii.gz"
499
+ },
500
+ {
501
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_138.nii.gz",
502
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_190.nii.gz",
503
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_138.nii.gz",
504
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_190.nii.gz"
505
+ },
506
+ {
507
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_138.nii.gz",
508
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_174.nii.gz",
509
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_138.nii.gz",
510
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_174.nii.gz"
511
+ },
512
+ {
513
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_138.nii.gz",
514
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_140.nii.gz",
515
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_138.nii.gz",
516
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_140.nii.gz"
517
+ },
518
+ {
519
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_138.nii.gz",
520
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_192.nii.gz",
521
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_138.nii.gz",
522
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_192.nii.gz"
523
+ },
524
+ {
525
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_138.nii.gz",
526
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_194.nii.gz",
527
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_138.nii.gz",
528
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_194.nii.gz"
529
+ },
530
+ {
531
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_138.nii.gz",
532
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_183.nii.gz",
533
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_138.nii.gz",
534
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_183.nii.gz"
535
+ },
536
+ {
537
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_138.nii.gz",
538
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_170.nii.gz",
539
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_138.nii.gz",
540
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_170.nii.gz"
541
+ },
542
+ {
543
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_183.nii.gz",
544
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_181.nii.gz",
545
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_183.nii.gz"
546
+ },
547
+ {
548
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_183.nii.gz",
549
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_175.nii.gz",
550
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_183.nii.gz"
551
+ },
552
+ {
553
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_183.nii.gz",
554
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_190.nii.gz",
555
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_183.nii.gz",
556
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_190.nii.gz"
557
+ },
558
+ {
559
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_183.nii.gz",
560
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_174.nii.gz",
561
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_183.nii.gz",
562
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_174.nii.gz"
563
+ },
564
+ {
565
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_183.nii.gz",
566
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_140.nii.gz",
567
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_183.nii.gz",
568
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_140.nii.gz"
569
+ },
570
+ {
571
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_183.nii.gz",
572
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_192.nii.gz",
573
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_183.nii.gz",
574
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_192.nii.gz"
575
+ },
576
+ {
577
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_183.nii.gz",
578
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_194.nii.gz",
579
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_183.nii.gz",
580
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_194.nii.gz"
581
+ },
582
+ {
583
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_183.nii.gz",
584
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_138.nii.gz",
585
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_183.nii.gz",
586
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_138.nii.gz"
587
+ },
588
+ {
589
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_183.nii.gz",
590
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_170.nii.gz",
591
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_183.nii.gz",
592
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_170.nii.gz"
593
+ },
594
+ {
595
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_170.nii.gz",
596
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_181.nii.gz",
597
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_170.nii.gz"
598
+ },
599
+ {
600
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_170.nii.gz",
601
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_175.nii.gz",
602
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_170.nii.gz"
603
+ },
604
+ {
605
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_170.nii.gz",
606
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_190.nii.gz",
607
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_170.nii.gz",
608
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_190.nii.gz"
609
+ },
610
+ {
611
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_170.nii.gz",
612
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_174.nii.gz",
613
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_170.nii.gz",
614
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_174.nii.gz"
615
+ },
616
+ {
617
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_170.nii.gz",
618
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_140.nii.gz",
619
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_170.nii.gz",
620
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_140.nii.gz"
621
+ },
622
+ {
623
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_170.nii.gz",
624
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_192.nii.gz",
625
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_170.nii.gz",
626
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_192.nii.gz"
627
+ },
628
+ {
629
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_170.nii.gz",
630
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_194.nii.gz",
631
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_170.nii.gz",
632
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_194.nii.gz"
633
+ },
634
+ {
635
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_170.nii.gz",
636
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_138.nii.gz",
637
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_170.nii.gz",
638
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_138.nii.gz"
639
+ },
640
+ {
641
+ "img1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_170.nii.gz",
642
+ "img2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/images/IR_183.nii.gz",
643
+ "seg1": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_170.nii.gz",
644
+ "seg2": "/home/ameen/Nasal Cavity/similarity/output_Zscore/labels/IR_183.nii.gz"
645
+ }
646
+ ]
647
+ }
deepatlas_results/Task002_rnmw153_sim_Zsc/training_results/RegNet/anatomy_loss_reg.txt ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 0.000000000000000000e+00 3.135408550500869973e-01
2
+ 1.000000000000000000e+00 3.057660102844238503e-01
3
+ 2.000000000000000000e+00 3.184201985597610585e-01
4
+ 3.000000000000000000e+00 3.163259848952293396e-01
5
+ 4.000000000000000000e+00 2.804470062255859375e-01
6
+ 5.000000000000000000e+00 2.842357277870178334e-01
7
+ 6.000000000000000000e+00 2.638818100094795227e-01
8
+ 7.000000000000000000e+00 2.777637779712677224e-01
9
+ 8.000000000000000000e+00 2.861650839447975048e-01
10
+ 9.000000000000000000e+00 2.704068765044212452e-01
11
+ 1.000000000000000000e+01 3.000330477952957153e-01
12
+ 1.100000000000000000e+01 2.721584409475326649e-01
13
+ 1.200000000000000000e+01 2.716749116778373607e-01
14
+ 1.300000000000000000e+01 2.548105090856552235e-01
15
+ 1.400000000000000000e+01 2.633110970258712991e-01
16
+ 1.500000000000000000e+01 2.299902319908141979e-01
17
+ 1.600000000000000000e+01 2.469595715403556935e-01
18
+ 1.700000000000000000e+01 2.442218318581581005e-01
19
+ 1.800000000000000000e+01 2.519750177860259899e-01
20
+ 1.900000000000000000e+01 2.407886266708373912e-01
21
+ 2.000000000000000000e+01 2.488558292388916016e-01
22
+ 2.100000000000000000e+01 2.248426392674446050e-01
23
+ 2.200000000000000000e+01 2.140324071049690136e-01
24
+ 2.300000000000000000e+01 2.415555238723754994e-01
25
+ 2.400000000000000000e+01 2.159206911921501271e-01
26
+ 2.500000000000000000e+01 2.323278933763504028e-01
27
+ 2.600000000000000000e+01 2.173769950866699108e-01
28
+ 2.700000000000000000e+01 2.162912964820861705e-01
29
+ 2.800000000000000000e+01 2.229687616229057423e-01
30
+ 2.900000000000000000e+01 2.084074690937995911e-01
31
+ 3.000000000000000000e+01 2.109835729002952631e-01
32
+ 3.100000000000000000e+01 1.859750419855117909e-01
33
+ 3.200000000000000000e+01 2.157837614417076055e-01
34
+ 3.300000000000000000e+01 2.094791114330291637e-01
35
+ 3.400000000000000000e+01 2.009812265634536854e-01
36
+ 3.500000000000000000e+01 2.076985687017440796e-01
37
+ 3.600000000000000000e+01 1.918238341808319036e-01
38
+ 3.700000000000000000e+01 2.009931191802024897e-01
39
+ 3.800000000000000000e+01 1.700636789202690236e-01
40
+ 3.900000000000000000e+01 1.962646290659904591e-01
41
+ 4.000000000000000000e+01 1.972826182842254694e-01
42
+ 4.100000000000000000e+01 1.782805666327476612e-01
43
+ 4.200000000000000000e+01 1.880114108324050792e-01
44
+ 4.300000000000000000e+01 1.938056230545043834e-01
45
+ 4.400000000000000000e+01 1.713560804724693354e-01
46
+ 4.500000000000000000e+01 1.633046224713325389e-01
47
+ 4.600000000000000000e+01 1.850738257169723400e-01
48
+ 4.700000000000000000e+01 1.752641960978507940e-01
49
+ 4.800000000000000000e+01 1.752187252044677679e-01
50
+ 4.900000000000000000e+01 1.773821949958801214e-01
51
+ 5.000000000000000000e+01 1.690604001283645685e-01
52
+ 5.100000000000000000e+01 1.769385010004043690e-01
53
+ 5.200000000000000000e+01 1.649790585041046032e-01
54
+ 5.300000000000000000e+01 1.689535513520240839e-01
55
+ 5.400000000000000000e+01 1.614030286669731196e-01
56
+ 5.500000000000000000e+01 1.654360249638557323e-01
57
+ 5.600000000000000000e+01 1.637925118207931574e-01
58
+ 5.700000000000000000e+01 1.591141849756240734e-01
59
+ 5.800000000000000000e+01 1.615763455629348755e-01
60
+ 5.900000000000000000e+01 1.589841812849044911e-01
61
+ 6.000000000000000000e+01 1.529493838548660334e-01
62
+ 6.100000000000000000e+01 1.629365801811218373e-01
63
+ 6.200000000000000000e+01 1.529535248875618092e-01
64
+ 6.300000000000000000e+01 1.433281436562538202e-01
65
+ 6.400000000000000000e+01 1.461468547582626398e-01
66
+ 6.500000000000000000e+01 1.441940695047378429e-01
67
+ 6.600000000000000000e+01 1.612766250967979376e-01
68
+ 6.700000000000000000e+01 1.452899366617202703e-01
69
+ 6.800000000000000000e+01 1.482794821262359564e-01
70
+ 6.900000000000000000e+01 1.440321028232574574e-01
71
+ 7.000000000000000000e+01 1.642172396183013972e-01
72
+ 7.100000000000000000e+01 1.393341615796089283e-01
73
+ 7.200000000000000000e+01 1.509022817015647777e-01
74
+ 7.300000000000000000e+01 1.408919513225555531e-01
75
+ 7.400000000000000000e+01 1.379217818379402105e-01
76
+ 7.500000000000000000e+01 1.428895592689514160e-01
77
+ 7.600000000000000000e+01 1.432754531502723749e-01
78
+ 7.700000000000000000e+01 1.401198387145995983e-01
79
+ 7.800000000000000000e+01 1.369169071316719000e-01
80
+ 7.900000000000000000e+01 1.263125658035278431e-01
81
+ 8.000000000000000000e+01 1.471106737852096447e-01
82
+ 8.100000000000000000e+01 1.456460162997245678e-01
83
+ 8.200000000000000000e+01 1.409068360924720653e-01
84
+ 8.300000000000000000e+01 1.299625962972640880e-01
85
+ 8.400000000000000000e+01 1.286012873053550831e-01
86
+ 8.500000000000000000e+01 1.319817781448364147e-01
87
+ 8.600000000000000000e+01 1.319406539201736506e-01
88
+ 8.700000000000000000e+01 1.317121580243110768e-01
89
+ 8.800000000000000000e+01 1.380098029971122686e-01
90
+ 8.900000000000000000e+01 1.200489461421966608e-01
91
+ 9.000000000000000000e+01 1.301555976271629445e-01
92
+ 9.100000000000000000e+01 1.210053414106369019e-01
93
+ 9.200000000000000000e+01 1.437995210289955139e-01
94
+ 9.300000000000000000e+01 1.279117614030838013e-01
95
+ 9.400000000000000000e+01 1.311841651797294728e-01
96
+ 9.500000000000000000e+01 1.248174116015434293e-01
97
+ 9.600000000000000000e+01 1.231958493590354919e-01
98
+ 9.700000000000000000e+01 1.236210078001022394e-01
99
+ 9.800000000000000000e+01 1.223133817315101568e-01
100
+ 9.900000000000000000e+01 1.261768355965614263e-01
101
+ 1.000000000000000000e+02 1.200880080461502047e-01
102
+ 1.010000000000000000e+02 1.136251106858253423e-01
103
+ 1.020000000000000000e+02 1.246556535363197299e-01
104
+ 1.030000000000000000e+02 1.171860009431839045e-01
105
+ 1.040000000000000000e+02 1.222651720046997043e-01
106
+ 1.050000000000000000e+02 1.313052698969841003e-01
107
+ 1.060000000000000000e+02 1.185701236128807040e-01
108
+ 1.070000000000000000e+02 1.071438625454902704e-01
109
+ 1.080000000000000000e+02 1.211932659149169977e-01
110
+ 1.090000000000000000e+02 1.245229557156562750e-01
111
+ 1.100000000000000000e+02 1.179555296897888128e-01
112
+ 1.110000000000000000e+02 1.208368360996246310e-01
113
+ 1.120000000000000000e+02 1.135055571794509888e-01
114
+ 1.130000000000000000e+02 1.190253823995590238e-01
115
+ 1.140000000000000000e+02 1.186841472983360263e-01
116
+ 1.150000000000000000e+02 1.164834693074226352e-01
117
+ 1.160000000000000000e+02 1.146582379937171881e-01
118
+ 1.170000000000000000e+02 1.105820655822753878e-01
119
+ 1.180000000000000000e+02 1.155817240476608304e-01
120
+ 1.190000000000000000e+02 1.081655591726303073e-01
121
+ 1.200000000000000000e+02 1.141833648085594233e-01
122
+ 1.210000000000000000e+02 1.167039632797241266e-01
123
+ 1.220000000000000000e+02 1.074711605906486511e-01
124
+ 1.230000000000000000e+02 1.112695679068565341e-01
125
+ 1.240000000000000000e+02 1.128994792699813815e-01
126
+ 1.250000000000000000e+02 1.113000258803367587e-01
127
+ 1.260000000000000000e+02 1.078018143773078891e-01
128
+ 1.270000000000000000e+02 1.133237540721893366e-01
129
+ 1.280000000000000000e+02 1.072465956211090116e-01
130
+ 1.290000000000000000e+02 9.828051179647445679e-02
131
+ 1.300000000000000000e+02 1.064729228615760859e-01
132
+ 1.310000000000000000e+02 1.107094034552574130e-01
133
+ 1.320000000000000000e+02 1.105445966124534579e-01
134
+ 1.330000000000000000e+02 1.167655438184738159e-01
135
+ 1.340000000000000000e+02 1.096195027232170077e-01
136
+ 1.350000000000000000e+02 1.055271387100219782e-01
137
+ 1.360000000000000000e+02 1.070695728063583430e-01
138
+ 1.370000000000000000e+02 1.155413240194320679e-01
139
+ 1.380000000000000000e+02 1.030856594443321228e-01
140
+ 1.390000000000000000e+02 1.036047086119651767e-01
141
+ 1.400000000000000000e+02 1.098298564553260859e-01
142
+ 1.410000000000000000e+02 1.069389253854751559e-01
143
+ 1.420000000000000000e+02 1.012663558125495855e-01
144
+ 1.430000000000000000e+02 1.033536165952682467e-01
145
+ 1.440000000000000000e+02 9.961889088153838556e-02
146
+ 1.450000000000000000e+02 1.054751142859458868e-01
147
+ 1.460000000000000000e+02 9.932236373424530029e-02
148
+ 1.470000000000000000e+02 1.011745497584342901e-01
149
+ 1.480000000000000000e+02 1.033044055104255649e-01
150
+ 1.490000000000000000e+02 1.022559776902198792e-01
deepatlas_results/Task002_rnmw153_sim_Zsc/training_results/RegNet/anatomy_reg_losses.png ADDED
deepatlas_results/Task002_rnmw153_sim_Zsc/training_results/RegNet/reg_net_best.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cc749e4e671c0952e9c02aa07070ae002ff4513eb1cfe40699333010553fdc4
3
+ size 980151
deepatlas_results/Task002_rnmw153_sim_Zsc/training_results/RegNet/reg_net_training_losses.png ADDED
deepatlas_results/Task002_rnmw153_sim_Zsc/training_results/RegNet/regularization_loss.txt ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 0.000000000000000000e+00 9.647657734375000291e+04
2
+ 1.000000000000000000e+00 5.600248164062500291e+04
3
+ 2.000000000000000000e+00 4.622505507812499854e+04
4
+ 3.000000000000000000e+00 4.267595859375000146e+04
5
+ 4.000000000000000000e+00 4.140668750000000000e+04
6
+ 5.000000000000000000e+00 4.083900624999999854e+04
7
+ 6.000000000000000000e+00 4.204608710937500291e+04
8
+ 7.000000000000000000e+00 4.270976679687500291e+04
9
+ 8.000000000000000000e+00 4.361334960937500000e+04
10
+ 9.000000000000000000e+00 4.175023124999999709e+04
11
+ 1.000000000000000000e+01 4.343883320312500291e+04
12
+ 1.100000000000000000e+01 4.333560429687499709e+04
13
+ 1.200000000000000000e+01 4.219379726562499854e+04
14
+ 1.300000000000000000e+01 4.281041914062500291e+04
15
+ 1.400000000000000000e+01 4.174859609374999854e+04
16
+ 1.500000000000000000e+01 4.179512265625000146e+04
17
+ 1.600000000000000000e+01 4.092214960937500291e+04
18
+ 1.700000000000000000e+01 3.970900468749999709e+04
19
+ 1.800000000000000000e+01 3.924284062499999709e+04
20
+ 1.900000000000000000e+01 3.987322109374999854e+04
21
+ 2.000000000000000000e+01 4.095302929687500000e+04
22
+ 2.100000000000000000e+01 4.022246718749999854e+04
23
+ 2.200000000000000000e+01 3.631811601562499709e+04
24
+ 2.300000000000000000e+01 3.764023359375000291e+04
25
+ 2.400000000000000000e+01 3.785904765624999709e+04
26
+ 2.500000000000000000e+01 3.709597187500000291e+04
27
+ 2.600000000000000000e+01 3.850280976562500291e+04
28
+ 2.700000000000000000e+01 3.662425742187500146e+04
29
+ 2.800000000000000000e+01 3.526093242187499709e+04
30
+ 2.900000000000000000e+01 3.509209101562500291e+04
31
+ 3.000000000000000000e+01 3.490247851562500000e+04
32
+ 3.100000000000000000e+01 3.441726445312499709e+04
33
+ 3.200000000000000000e+01 3.833977851562500291e+04
34
+ 3.300000000000000000e+01 3.641356210937500146e+04
35
+ 3.400000000000000000e+01 3.424167519531249854e+04
36
+ 3.500000000000000000e+01 3.409850781250000000e+04
37
+ 3.600000000000000000e+01 3.476144804687499709e+04
38
+ 3.700000000000000000e+01 3.356663320312499854e+04
39
+ 3.800000000000000000e+01 3.360199863281250146e+04
40
+ 3.900000000000000000e+01 3.168097812500000146e+04
41
+ 4.000000000000000000e+01 3.086164238281250073e+04
42
+ 4.100000000000000000e+01 3.135119082031249854e+04
43
+ 4.200000000000000000e+01 3.230545429687499927e+04
44
+ 4.300000000000000000e+01 3.258044433593750000e+04
45
+ 4.400000000000000000e+01 2.970054238281249854e+04
46
+ 4.500000000000000000e+01 2.943311367187499854e+04
47
+ 4.600000000000000000e+01 3.069561796875000073e+04
48
+ 4.700000000000000000e+01 3.084135527343750073e+04
49
+ 4.800000000000000000e+01 3.094149082031250146e+04
50
+ 4.900000000000000000e+01 2.917231191406249854e+04
51
+ 5.000000000000000000e+01 3.135592246093750146e+04
52
+ 5.100000000000000000e+01 3.039203046874999927e+04
53
+ 5.200000000000000000e+01 3.012575664062500073e+04
54
+ 5.300000000000000000e+01 2.929403671875000146e+04
55
+ 5.400000000000000000e+01 3.106284863281250000e+04
56
+ 5.500000000000000000e+01 2.953302207031250146e+04
57
+ 5.600000000000000000e+01 2.830232207031250073e+04
58
+ 5.700000000000000000e+01 2.767354257812500146e+04
59
+ 5.800000000000000000e+01 2.854511542968749927e+04
60
+ 5.900000000000000000e+01 2.938652421875000073e+04
61
+ 6.000000000000000000e+01 2.757793476562499927e+04
62
+ 6.100000000000000000e+01 2.856210996093750146e+04
63
+ 6.200000000000000000e+01 2.805230800781249854e+04
64
+ 6.300000000000000000e+01 2.844989472656249927e+04
65
+ 6.400000000000000000e+01 2.834025332031249854e+04
66
+ 6.500000000000000000e+01 2.716633906249999927e+04
67
+ 6.600000000000000000e+01 2.722970449218749854e+04
68
+ 6.700000000000000000e+01 2.794483691406250000e+04
69
+ 6.800000000000000000e+01 2.762657714843750000e+04
70
+ 6.900000000000000000e+01 2.724710195312499854e+04
71
+ 7.000000000000000000e+01 2.686259179687500000e+04
72
+ 7.100000000000000000e+01 2.665010078124999927e+04
73
+ 7.200000000000000000e+01 2.729690820312500000e+04
74
+ 7.300000000000000000e+01 2.626355722656250146e+04
75
+ 7.400000000000000000e+01 2.985540253906249927e+04
76
+ 7.500000000000000000e+01 2.803707324218750000e+04
77
+ 7.600000000000000000e+01 2.577971601562499927e+04
78
+ 7.700000000000000000e+01 2.677021953125000073e+04
79
+ 7.800000000000000000e+01 2.589360351562500000e+04
80
+ 7.900000000000000000e+01 2.609319492187500146e+04
81
+ 8.000000000000000000e+01 2.626737285156250073e+04
82
+ 8.100000000000000000e+01 2.685944707031250073e+04
83
+ 8.200000000000000000e+01 2.635484394531249927e+04
84
+ 8.300000000000000000e+01 2.569543984374999854e+04
85
+ 8.400000000000000000e+01 2.600875253906250146e+04
86
+ 8.500000000000000000e+01 2.468057636718749927e+04
87
+ 8.600000000000000000e+01 2.744214921875000073e+04
88
+ 8.700000000000000000e+01 2.801404296875000000e+04
89
+ 8.800000000000000000e+01 2.714639218750000146e+04
90
+ 8.900000000000000000e+01 2.564207910156250000e+04
91
+ 9.000000000000000000e+01 2.602158124999999927e+04
92
+ 9.100000000000000000e+01 2.704508945312500146e+04
93
+ 9.200000000000000000e+01 2.489874941406249854e+04
94
+ 9.300000000000000000e+01 2.557005625000000146e+04
95
+ 9.400000000000000000e+01 2.650752558593749927e+04
96
+ 9.500000000000000000e+01 2.507230214843749854e+04
97
+ 9.600000000000000000e+01 2.503884765625000000e+04
98
+ 9.700000000000000000e+01 2.575267187500000000e+04
99
+ 9.800000000000000000e+01 2.467629902343749927e+04
100
+ 9.900000000000000000e+01 2.569107050781249927e+04
101
+ 1.000000000000000000e+02 2.621700605468749927e+04
102
+ 1.010000000000000000e+02 2.429901914062500146e+04
103
+ 1.020000000000000000e+02 2.468174355468749854e+04
104
+ 1.030000000000000000e+02 2.494141835937499854e+04
105
+ 1.040000000000000000e+02 2.521128085937500146e+04
106
+ 1.050000000000000000e+02 2.498605429687500146e+04
107
+ 1.060000000000000000e+02 2.586065898437500073e+04
108
+ 1.070000000000000000e+02 2.588916445312499854e+04
109
+ 1.080000000000000000e+02 2.418775214843749927e+04
110
+ 1.090000000000000000e+02 2.435215859374999854e+04
111
+ 1.100000000000000000e+02 2.436132246093749927e+04
112
+ 1.110000000000000000e+02 2.536200878906250000e+04
113
+ 1.120000000000000000e+02 2.559010410156250146e+04
114
+ 1.130000000000000000e+02 2.506657070312499854e+04
115
+ 1.140000000000000000e+02 2.503773847656249927e+04
116
+ 1.150000000000000000e+02 2.437319042968750000e+04
117
+ 1.160000000000000000e+02 2.431990234375000000e+04
118
+ 1.170000000000000000e+02 2.550118984374999854e+04
119
+ 1.180000000000000000e+02 2.292500937500000146e+04
120
+ 1.190000000000000000e+02 2.438267910156249854e+04
121
+ 1.200000000000000000e+02 2.415241660156250146e+04
122
+ 1.210000000000000000e+02 2.505895195312500073e+04
123
+ 1.220000000000000000e+02 2.397140292968750146e+04
124
+ 1.230000000000000000e+02 2.385521972656250000e+04
125
+ 1.240000000000000000e+02 2.429430468750000000e+04
126
+ 1.250000000000000000e+02 2.560967167968750073e+04
127
+ 1.260000000000000000e+02 2.449504160156250146e+04
128
+ 1.270000000000000000e+02 2.443714902343750146e+04
129
+ 1.280000000000000000e+02 2.286358164062500146e+04
130
+ 1.290000000000000000e+02 2.320438867187500000e+04
131
+ 1.300000000000000000e+02 2.252850078125000073e+04
132
+ 1.310000000000000000e+02 2.351528867187500146e+04
133
+ 1.320000000000000000e+02 2.366736621093750000e+04
134
+ 1.330000000000000000e+02 2.437287714843749927e+04
135
+ 1.340000000000000000e+02 2.427623671874999854e+04
136
+ 1.350000000000000000e+02 2.388841269531250146e+04
137
+ 1.360000000000000000e+02 2.342027714843750073e+04
138
+ 1.370000000000000000e+02 2.348164023437500146e+04
139
+ 1.380000000000000000e+02 2.264112851562500146e+04
140
+ 1.390000000000000000e+02 2.282989589843749854e+04
141
+ 1.400000000000000000e+02 2.346099238281249927e+04
142
+ 1.410000000000000000e+02 2.252648867187499854e+04
143
+ 1.420000000000000000e+02 2.377134687499999927e+04
144
+ 1.430000000000000000e+02 2.372876152343750073e+04
145
+ 1.440000000000000000e+02 2.412109472656250000e+04
146
+ 1.450000000000000000e+02 2.291069023437500073e+04
147
+ 1.460000000000000000e+02 2.278465429687500000e+04
148
+ 1.470000000000000000e+02 2.236802773437499854e+04
149
+ 1.480000000000000000e+02 2.275189082031249927e+04
150
+ 1.490000000000000000e+02 2.340973125000000073e+04
deepatlas_results/Task002_rnmw153_sim_Zsc/training_results/RegNet/regularization_reg_losses.png ADDED
deepatlas_results/Task002_rnmw153_sim_Zsc/training_results/RegNet/similarity_loss_reg.txt ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 0.000000000000000000e+00 -4.168474763631820901e-01
2
+ 1.000000000000000000e+00 -4.263603955507278553e-01
3
+ 2.000000000000000000e+00 -4.308078646659850852e-01
4
+ 3.000000000000000000e+00 -4.473009109497070312e-01
5
+ 4.000000000000000000e+00 -4.597851067781448253e-01
6
+ 5.000000000000000000e+00 -4.279521614313125388e-01
7
+ 6.000000000000000000e+00 -4.499931335449218750e-01
8
+ 7.000000000000000000e+00 -4.442608237266540416e-01
9
+ 8.000000000000000000e+00 -4.535590052604675515e-01
10
+ 9.000000000000000000e+00 -4.448471128940582053e-01
11
+ 1.000000000000000000e+01 -4.357178181409835926e-01
12
+ 1.100000000000000000e+01 -4.437347769737243763e-01
13
+ 1.200000000000000000e+01 -4.458012044429778831e-01
14
+ 1.300000000000000000e+01 -4.529227972030639426e-01
15
+ 1.400000000000000000e+01 -4.295926243066787720e-01
16
+ 1.500000000000000000e+01 -4.595782548189163430e-01
17
+ 1.600000000000000000e+01 -4.517234951257705911e-01
18
+ 1.700000000000000000e+01 -4.423192799091338889e-01
19
+ 1.800000000000000000e+01 -4.439570069313049094e-01
20
+ 1.900000000000000000e+01 -4.530792713165283092e-01
21
+ 2.000000000000000000e+01 -4.530134618282318226e-01
22
+ 2.100000000000000000e+01 -4.569927573204040638e-01
23
+ 2.200000000000000000e+01 -4.498479574918746726e-01
24
+ 2.300000000000000000e+01 -4.429930806159972922e-01
25
+ 2.400000000000000000e+01 -4.545052170753479115e-01
26
+ 2.500000000000000000e+01 -4.589251607656478771e-01
27
+ 2.600000000000000000e+01 -4.480702817440033181e-01
28
+ 2.700000000000000000e+01 -4.614715576171875000e-01
29
+ 2.800000000000000000e+01 -4.644932091236114724e-01
30
+ 2.900000000000000000e+01 -4.536971420049667469e-01
31
+ 3.000000000000000000e+01 -4.618920236825942993e-01
32
+ 3.100000000000000000e+01 -4.454625546932220459e-01
33
+ 3.200000000000000000e+01 -4.432837992906570324e-01
34
+ 3.300000000000000000e+01 -4.556560337543487660e-01
35
+ 3.400000000000000000e+01 -4.563916087150573508e-01
36
+ 3.500000000000000000e+01 -4.624453812837600597e-01
37
+ 3.600000000000000000e+01 -4.610332757234573142e-01
38
+ 3.700000000000000000e+01 -4.534836649894714578e-01
39
+ 3.800000000000000000e+01 -4.493382364511490090e-01
40
+ 3.900000000000000000e+01 -4.612041652202606423e-01
41
+ 4.000000000000000000e+01 -4.481392472982406727e-01
42
+ 4.100000000000000000e+01 -4.574688106775283702e-01
43
+ 4.200000000000000000e+01 -4.585757911205291526e-01
44
+ 4.300000000000000000e+01 -4.591083139181136863e-01
45
+ 4.400000000000000000e+01 -4.620190680027008168e-01
46
+ 4.500000000000000000e+01 -4.439658433198928611e-01
47
+ 4.600000000000000000e+01 -4.682959735393524281e-01
48
+ 4.700000000000000000e+01 -4.619445204734802246e-01
49
+ 4.800000000000000000e+01 -4.570848613977432362e-01
50
+ 4.900000000000000000e+01 -4.644496768712997326e-01
51
+ 5.000000000000000000e+01 -4.555140912532806285e-01
52
+ 5.100000000000000000e+01 -4.544000834226608054e-01
53
+ 5.200000000000000000e+01 -4.539999753236770852e-01
54
+ 5.300000000000000000e+01 -4.513368487358093151e-01
55
+ 5.400000000000000000e+01 -4.691209405660629050e-01
56
+ 5.500000000000000000e+01 -4.700714707374572532e-01
57
+ 5.600000000000000000e+01 -4.616314649581909069e-01
58
+ 5.700000000000000000e+01 -4.689020931720733754e-01
59
+ 5.800000000000000000e+01 -4.532034665346145741e-01
60
+ 5.900000000000000000e+01 -4.531166106462478638e-01
61
+ 6.000000000000000000e+01 -4.678452193737029918e-01
62
+ 6.100000000000000000e+01 -4.622920960187911765e-01
63
+ 6.200000000000000000e+01 -4.603021264076233021e-01
64
+ 6.300000000000000000e+01 -4.684280097484588845e-01
65
+ 6.400000000000000000e+01 -4.558636069297790638e-01
66
+ 6.500000000000000000e+01 -4.540676534175872581e-01
67
+ 6.600000000000000000e+01 -4.655692547559738270e-01
68
+ 6.700000000000000000e+01 -4.540375351905822643e-01
69
+ 6.800000000000000000e+01 -4.677469283342361450e-01
70
+ 6.900000000000000000e+01 -4.726172119379043468e-01
71
+ 7.000000000000000000e+01 -4.646616935729980247e-01
72
+ 7.100000000000000000e+01 -4.598768830299377330e-01
73
+ 7.200000000000000000e+01 -4.578260272741317971e-01
74
+ 7.300000000000000000e+01 -4.739276260137558205e-01
75
+ 7.400000000000000000e+01 -4.653645187616348156e-01
76
+ 7.500000000000000000e+01 -4.734996885061263927e-01
77
+ 7.600000000000000000e+01 -4.475734978914260864e-01
78
+ 7.700000000000000000e+01 -4.597442090511322244e-01
79
+ 7.800000000000000000e+01 -4.581354439258575328e-01
80
+ 7.900000000000000000e+01 -4.675897061824798584e-01
81
+ 8.000000000000000000e+01 -4.581032097339630127e-01
82
+ 8.100000000000000000e+01 -4.732825607061386108e-01
83
+ 8.200000000000000000e+01 -4.634899914264679066e-01
84
+ 8.300000000000000000e+01 -4.642924636602401844e-01
85
+ 8.400000000000000000e+01 -4.618958413600921520e-01
86
+ 8.500000000000000000e+01 -4.642670065164565818e-01
87
+ 8.600000000000000000e+01 -4.682172983884811401e-01
88
+ 8.700000000000000000e+01 -4.585850000381469505e-01
89
+ 8.800000000000000000e+01 -4.672844409942626953e-01
90
+ 8.900000000000000000e+01 -4.735285311937332264e-01
91
+ 9.000000000000000000e+01 -4.580027431249618641e-01
92
+ 9.100000000000000000e+01 -4.744705915451049583e-01
93
+ 9.200000000000000000e+01 -4.685657173395156860e-01
94
+ 9.300000000000000000e+01 -4.644354701042175404e-01
95
+ 9.400000000000000000e+01 -4.517805755138397106e-01
96
+ 9.500000000000000000e+01 -4.675363123416900746e-01
97
+ 9.600000000000000000e+01 -4.589057117700576782e-01
98
+ 9.700000000000000000e+01 -4.702290564775467030e-01
99
+ 9.800000000000000000e+01 -4.703976273536681907e-01
100
+ 9.900000000000000000e+01 -4.785565227270126565e-01
101
+ 1.000000000000000000e+02 -4.528923690319061057e-01
102
+ 1.010000000000000000e+02 -4.802600026130676159e-01
103
+ 1.020000000000000000e+02 -4.595269829034805409e-01
104
+ 1.030000000000000000e+02 -4.649816811084747092e-01
105
+ 1.040000000000000000e+02 -4.689352661371231079e-01
106
+ 1.050000000000000000e+02 -4.574464380741119496e-01
107
+ 1.060000000000000000e+02 -4.783898264169693104e-01
108
+ 1.070000000000000000e+02 -4.619613021612167247e-01
109
+ 1.080000000000000000e+02 -4.732825309038162231e-01
110
+ 1.090000000000000000e+02 -4.757978379726409690e-01
111
+ 1.100000000000000000e+02 -4.625800907611846702e-01
112
+ 1.110000000000000000e+02 -4.597027510404586570e-01
113
+ 1.120000000000000000e+02 -4.668075680732727273e-01
114
+ 1.130000000000000000e+02 -4.745635837316513062e-01
115
+ 1.140000000000000000e+02 -4.598248332738876232e-01
116
+ 1.150000000000000000e+02 -4.670720726251602062e-01
117
+ 1.160000000000000000e+02 -4.626205563545227051e-01
118
+ 1.170000000000000000e+02 -4.646981835365295299e-01
119
+ 1.180000000000000000e+02 -4.784785419702529685e-01
120
+ 1.190000000000000000e+02 -4.665146172046661377e-01
121
+ 1.200000000000000000e+02 -4.636369466781616322e-01
122
+ 1.210000000000000000e+02 -4.779191941022872814e-01
123
+ 1.220000000000000000e+02 -4.682897418737411721e-01
124
+ 1.230000000000000000e+02 -4.683959364891052135e-01
125
+ 1.240000000000000000e+02 -4.700077801942825206e-01
126
+ 1.250000000000000000e+02 -4.585238367319107167e-01
127
+ 1.260000000000000000e+02 -4.785058617591858021e-01
128
+ 1.270000000000000000e+02 -4.674418091773986927e-01
129
+ 1.280000000000000000e+02 -4.701861381530761941e-01
130
+ 1.290000000000000000e+02 -4.776402652263641579e-01
131
+ 1.300000000000000000e+02 -4.825747549533844216e-01
132
+ 1.310000000000000000e+02 -4.672817528247833474e-01
133
+ 1.320000000000000000e+02 -4.682697743177414162e-01
134
+ 1.330000000000000000e+02 -4.557759791612625011e-01
135
+ 1.340000000000000000e+02 -4.726522713899612427e-01
136
+ 1.350000000000000000e+02 -4.733480244874954113e-01
137
+ 1.360000000000000000e+02 -4.721144646406173484e-01
138
+ 1.370000000000000000e+02 -4.663318544626235962e-01
139
+ 1.380000000000000000e+02 -4.689928203821182029e-01
140
+ 1.390000000000000000e+02 -4.694450110197067039e-01
141
+ 1.400000000000000000e+02 -4.788974851369857677e-01
142
+ 1.410000000000000000e+02 -4.824941486120223888e-01
143
+ 1.420000000000000000e+02 -4.659789711236953513e-01
144
+ 1.430000000000000000e+02 -4.698489278554916493e-01
145
+ 1.440000000000000000e+02 -4.578673005104064719e-01
146
+ 1.450000000000000000e+02 -4.711187154054641835e-01
147
+ 1.460000000000000000e+02 -4.698738873004913441e-01
148
+ 1.470000000000000000e+02 -4.764148712158203236e-01
149
+ 1.480000000000000000e+02 -4.688631236553192361e-01
150
+ 1.490000000000000000e+02 -4.754804491996765248e-01
deepatlas_results/Task002_rnmw153_sim_Zsc/training_results/RegNet/similarity_reg_losses.png ADDED
deepatlas_results/Task002_rnmw153_sim_Zsc/training_results/SegNet/anatomy_loss_seg.txt ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 0.000000000000000000e+00 3.573212802410125621e-01
2
+ 1.000000000000000000e+00 3.016310274600982888e-01
3
+ 2.000000000000000000e+00 3.048958837985992210e-01
4
+ 3.000000000000000000e+00 2.943183720111847146e-01
5
+ 4.000000000000000000e+00 2.476005643606185858e-01
6
+ 5.000000000000000000e+00 2.876864552497863659e-01
7
+ 6.000000000000000000e+00 2.401628106832504328e-01
8
+ 7.000000000000000000e+00 2.676642984151840210e-01
9
+ 8.000000000000000000e+00 3.030611872673034668e-01
10
+ 9.000000000000000000e+00 2.673193246126174816e-01
11
+ 1.000000000000000000e+01 2.679113417863845936e-01
12
+ 1.100000000000000000e+01 2.307161211967468373e-01
13
+ 1.200000000000000000e+01 2.748242348432540783e-01
14
+ 1.300000000000000000e+01 2.454477936029434149e-01
15
+ 1.400000000000000000e+01 2.550766199827194103e-01
16
+ 1.500000000000000000e+01 2.454825192689895630e-01
17
+ 1.600000000000000000e+01 2.595056265592575184e-01
18
+ 1.700000000000000000e+01 2.408376395702362116e-01
19
+ 1.800000000000000000e+01 2.127094238996505848e-01
20
+ 1.900000000000000000e+01 2.307042092084884699e-01
21
+ 2.000000000000000000e+01 2.654647767543792947e-01
22
+ 2.100000000000000000e+01 2.419243335723876842e-01
23
+ 2.200000000000000000e+01 1.945116668939590454e-01
24
+ 2.300000000000000000e+01 2.601417511701583640e-01
25
+ 2.400000000000000000e+01 2.282205909490585272e-01
26
+ 2.500000000000000000e+01 1.868903011083603016e-01
27
+ 2.600000000000000000e+01 2.246729880571365356e-01
28
+ 2.700000000000000000e+01 1.985089689493179377e-01
29
+ 2.800000000000000000e+01 2.106462895870208629e-01
30
+ 2.900000000000000000e+01 1.747411251068115290e-01
31
+ 3.000000000000000000e+01 2.289323359727859497e-01
32
+ 3.100000000000000000e+01 1.958036184310913141e-01
33
+ 3.200000000000000000e+01 2.018931955099105835e-01
34
+ 3.300000000000000000e+01 2.011176228523254284e-01
35
+ 3.400000000000000000e+01 1.889271199703216608e-01
36
+ 3.500000000000000000e+01 1.674630045890808105e-01
37
+ 3.600000000000000000e+01 1.861122012138366755e-01
38
+ 3.700000000000000000e+01 1.871975004673004095e-01
39
+ 3.800000000000000000e+01 1.833468735218048151e-01
40
+ 3.900000000000000000e+01 1.996963977813720759e-01
41
+ 4.000000000000000000e+01 1.846072256565093883e-01
42
+ 4.100000000000000000e+01 1.742737770080566295e-01
43
+ 4.200000000000000000e+01 2.073888182640075684e-01
44
+ 4.300000000000000000e+01 1.943298697471618763e-01
45
+ 4.400000000000000000e+01 1.664489269256591852e-01
46
+ 4.500000000000000000e+01 1.703320384025573675e-01
47
+ 4.600000000000000000e+01 1.856246024370193370e-01
48
+ 4.700000000000000000e+01 1.825020164251327515e-01
49
+ 4.800000000000000000e+01 1.809851735830307062e-01
50
+ 4.900000000000000000e+01 1.618491530418396107e-01
51
+ 5.000000000000000000e+01 1.872929453849792591e-01
52
+ 5.100000000000000000e+01 1.632001817226410023e-01
53
+ 5.200000000000000000e+01 1.654672086238861139e-01
54
+ 5.300000000000000000e+01 1.620430082082748302e-01
55
+ 5.400000000000000000e+01 1.371830523014068715e-01
56
+ 5.500000000000000000e+01 1.343375712633133046e-01
57
+ 5.600000000000000000e+01 1.760016560554504284e-01
58
+ 5.700000000000000000e+01 1.373514264822006115e-01
59
+ 5.800000000000000000e+01 1.455889701843261774e-01
60
+ 5.900000000000000000e+01 1.405129104852676503e-01
61
+ 6.000000000000000000e+01 1.435708492994308416e-01
62
+ 6.100000000000000000e+01 1.453709840774536077e-01
63
+ 6.200000000000000000e+01 1.409398466348648071e-01
64
+ 6.300000000000000000e+01 1.430866688489914051e-01
65
+ 6.400000000000000000e+01 1.614443391561508068e-01
66
+ 6.500000000000000000e+01 1.450980633497238048e-01
67
+ 6.600000000000000000e+01 1.543001860380172618e-01
68
+ 6.700000000000000000e+01 1.640612185001373291e-01
69
+ 6.800000000000000000e+01 1.423606038093566950e-01
70
+ 6.900000000000000000e+01 1.569118171930313221e-01
71
+ 7.000000000000000000e+01 1.287293732166290283e-01
72
+ 7.100000000000000000e+01 1.371597200632095337e-01
73
+ 7.200000000000000000e+01 1.361799329519271795e-01
74
+ 7.300000000000000000e+01 1.242990702390670832e-01
75
+ 7.400000000000000000e+01 1.306319057941436879e-01
76
+ 7.500000000000000000e+01 1.288085937500000056e-01
77
+ 7.600000000000000000e+01 1.403280854225158747e-01
78
+ 7.700000000000000000e+01 1.227280646562576294e-01
79
+ 7.800000000000000000e+01 1.201626896858215277e-01
80
+ 7.900000000000000000e+01 1.346704751253127996e-01
81
+ 8.000000000000000000e+01 1.323057174682617243e-01
82
+ 8.100000000000000000e+01 1.253492325544357244e-01
83
+ 8.200000000000000000e+01 1.406400591135024969e-01
84
+ 8.300000000000000000e+01 1.207172423601150513e-01
85
+ 8.400000000000000000e+01 1.101396143436431940e-01
86
+ 8.500000000000000000e+01 1.376777350902557429e-01
87
+ 8.600000000000000000e+01 1.213785856962203979e-01
88
+ 8.700000000000000000e+01 1.210385411977767944e-01
89
+ 8.800000000000000000e+01 1.205412358045578058e-01
90
+ 8.900000000000000000e+01 1.240068972110748291e-01
91
+ 9.000000000000000000e+01 1.313861519098281916e-01
92
+ 9.100000000000000000e+01 1.162600874900817927e-01
93
+ 9.200000000000000000e+01 1.444916546344757191e-01
94
+ 9.300000000000000000e+01 1.275196611881256048e-01
95
+ 9.400000000000000000e+01 1.161691457033157404e-01
96
+ 9.500000000000000000e+01 1.160799205303192083e-01
97
+ 9.600000000000000000e+01 1.090261936187744141e-01
98
+ 9.700000000000000000e+01 1.152943998575210599e-01
99
+ 9.800000000000000000e+01 1.182651966810226440e-01
100
+ 9.900000000000000000e+01 1.337907671928405817e-01
101
+ 1.000000000000000000e+02 1.144938826560974177e-01
102
+ 1.010000000000000000e+02 1.012444019317626925e-01
103
+ 1.020000000000000000e+02 1.067855119705200251e-01
104
+ 1.030000000000000000e+02 1.236552417278289767e-01
105
+ 1.040000000000000000e+02 1.269590228796005360e-01
106
+ 1.050000000000000000e+02 1.210491657257080078e-01
107
+ 1.060000000000000000e+02 1.159689813852310125e-01
108
+ 1.070000000000000000e+02 1.056069612503051786e-01
109
+ 1.080000000000000000e+02 1.093798547983169528e-01
110
+ 1.090000000000000000e+02 1.066102117300033597e-01
111
+ 1.100000000000000000e+02 1.021177589893341037e-01
112
+ 1.110000000000000000e+02 1.069174706935882568e-01
113
+ 1.120000000000000000e+02 1.110250294208526556e-01
114
+ 1.130000000000000000e+02 9.386113286018371582e-02
115
+ 1.140000000000000000e+02 1.091269701719284058e-01
116
+ 1.150000000000000000e+02 1.106141597032546942e-01
117
+ 1.160000000000000000e+02 1.080716401338577271e-01
118
+ 1.170000000000000000e+02 1.028281450271606445e-01
119
+ 1.180000000000000000e+02 1.077720254659652738e-01
120
+ 1.190000000000000000e+02 1.014000862836837713e-01
121
+ 1.200000000000000000e+02 9.986269772052765170e-02
122
+ 1.210000000000000000e+02 1.082169353961944525e-01
123
+ 1.220000000000000000e+02 1.062078773975372314e-01
124
+ 1.230000000000000000e+02 9.392457008361816684e-02
125
+ 1.240000000000000000e+02 9.641477167606353482e-02
126
+ 1.250000000000000000e+02 9.422739148139953891e-02
127
+ 1.260000000000000000e+02 1.075347006320953314e-01
128
+ 1.270000000000000000e+02 1.135191649198532132e-01
129
+ 1.280000000000000000e+02 1.168108433485031128e-01
130
+ 1.290000000000000000e+02 9.352702796459197443e-02
131
+ 1.300000000000000000e+02 9.605710506439209262e-02
132
+ 1.310000000000000000e+02 1.090006858110427884e-01
133
+ 1.320000000000000000e+02 9.683288931846618930e-02
134
+ 1.330000000000000000e+02 9.844864010810852606e-02
135
+ 1.340000000000000000e+02 1.054120570421218928e-01
136
+ 1.350000000000000000e+02 9.830847680568695346e-02
137
+ 1.360000000000000000e+02 1.000306367874145563e-01
138
+ 1.370000000000000000e+02 9.272636771202087680e-02
139
+ 1.380000000000000000e+02 1.006171077489852878e-01
140
+ 1.390000000000000000e+02 1.118859261274337824e-01
141
+ 1.400000000000000000e+02 1.046191841363906805e-01
142
+ 1.410000000000000000e+02 8.171942234039306086e-02
143
+ 1.420000000000000000e+02 1.026437968015670721e-01
144
+ 1.430000000000000000e+02 9.383668601512909491e-02
145
+ 1.440000000000000000e+02 9.504491984844207209e-02
146
+ 1.450000000000000000e+02 9.691132009029387873e-02
147
+ 1.460000000000000000e+02 8.896677196025848389e-02
148
+ 1.470000000000000000e+02 9.473379850387572687e-02
149
+ 1.480000000000000000e+02 1.006733357906341580e-01
150
+ 1.490000000000000000e+02 9.622145295143127997e-02
deepatlas_results/Task002_rnmw153_sim_Zsc/training_results/SegNet/anatomy_seg_losses.png ADDED
deepatlas_results/Task002_rnmw153_sim_Zsc/training_results/SegNet/seg_net_best.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e382064530f8590b4127db57db6ee98b4b79040a590525f63d95fa460d690262
3
+ size 1776995
deepatlas_results/Task002_rnmw153_sim_Zsc/training_results/SegNet/seg_net_training_losses.png ADDED
deepatlas_results/Task002_rnmw153_sim_Zsc/training_results/SegNet/supervised_loss_seg.txt ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 0.000000000000000000e+00 1.733888983726501465e+00
2
+ 1.000000000000000000e+00 1.593058300018310502e+00
3
+ 2.000000000000000000e+00 1.494779181480407670e+00
4
+ 3.000000000000000000e+00 1.429337716102600186e+00
5
+ 4.000000000000000000e+00 1.373311591148376420e+00
6
+ 5.000000000000000000e+00 1.280220198631286666e+00
7
+ 6.000000000000000000e+00 1.216743969917297274e+00
8
+ 7.000000000000000000e+00 1.151879334449767978e+00
9
+ 8.000000000000000000e+00 1.086084175109863326e+00
10
+ 9.000000000000000000e+00 1.050439834594726562e+00
11
+ 1.000000000000000000e+01 9.904136300086975320e-01
12
+ 1.100000000000000000e+01 9.567036032676696777e-01
13
+ 1.200000000000000000e+01 8.834496736526489258e-01
14
+ 1.300000000000000000e+01 7.900542855262756570e-01
15
+ 1.400000000000000000e+01 7.233480453491211160e-01
16
+ 1.500000000000000000e+01 6.199273467063903809e-01
17
+ 1.600000000000000000e+01 5.056837856769561546e-01
18
+ 1.700000000000000000e+01 4.857193410396575817e-01
19
+ 1.800000000000000000e+01 4.253516137599945290e-01
20
+ 1.900000000000000000e+01 4.271430134773254172e-01
21
+ 2.000000000000000000e+01 3.491494953632354736e-01
22
+ 2.100000000000000000e+01 3.284499764442443959e-01
23
+ 2.200000000000000000e+01 3.067165911197662354e-01
24
+ 2.300000000000000000e+01 2.683328092098236084e-01
25
+ 2.400000000000000000e+01 2.582681179046630859e-01
26
+ 2.500000000000000000e+01 2.431777745485305675e-01
27
+ 2.600000000000000000e+01 2.249853909015655518e-01
28
+ 2.700000000000000000e+01 2.175073951482772716e-01
29
+ 2.800000000000000000e+01 1.966677635908126720e-01
30
+ 2.900000000000000000e+01 1.976582705974578857e-01
31
+ 3.000000000000000000e+01 1.807560890913009755e-01
32
+ 3.100000000000000000e+01 1.774290740489959661e-01
33
+ 3.200000000000000000e+01 1.617186486721038929e-01
34
+ 3.300000000000000000e+01 1.491658687591552734e-01
35
+ 3.400000000000000000e+01 1.535357415676116832e-01
36
+ 3.500000000000000000e+01 1.475964188575744629e-01
37
+ 3.600000000000000000e+01 1.511343628168106135e-01
38
+ 3.700000000000000000e+01 1.474127322435379028e-01
39
+ 3.800000000000000000e+01 1.469620764255523682e-01
40
+ 3.900000000000000000e+01 1.410338491201400812e-01
41
+ 4.000000000000000000e+01 1.299730122089385875e-01
42
+ 4.100000000000000000e+01 1.394026368856429998e-01
43
+ 4.200000000000000000e+01 1.289871841669082697e-01
44
+ 4.300000000000000000e+01 1.210025578737258939e-01
45
+ 4.400000000000000000e+01 1.204812556505203275e-01
46
+ 4.500000000000000000e+01 1.196763992309570340e-01
47
+ 4.600000000000000000e+01 1.261142402887344416e-01
48
+ 4.700000000000000000e+01 1.218511372804641779e-01
49
+ 4.800000000000000000e+01 1.194722831249237088e-01
50
+ 4.900000000000000000e+01 1.288411766290664673e-01
51
+ 5.000000000000000000e+01 1.119768887758254977e-01
52
+ 5.100000000000000000e+01 1.179918527603149442e-01
53
+ 5.200000000000000000e+01 1.148449391126632746e-01
54
+ 5.300000000000000000e+01 1.055811911821365329e-01
55
+ 5.400000000000000000e+01 1.184314042329788208e-01
56
+ 5.500000000000000000e+01 1.090945541858673040e-01
57
+ 5.600000000000000000e+01 1.205644398927688543e-01
58
+ 5.700000000000000000e+01 1.117961019277572687e-01
59
+ 5.800000000000000000e+01 1.032131254673004095e-01
60
+ 5.900000000000000000e+01 1.119620203971862793e-01
61
+ 6.000000000000000000e+01 1.121469020843505887e-01
62
+ 6.100000000000000000e+01 1.092023879289627103e-01
63
+ 6.200000000000000000e+01 9.816411733627319891e-02
64
+ 6.300000000000000000e+01 1.041563123464584323e-01
65
+ 6.400000000000000000e+01 9.367077052593231201e-02
66
+ 6.500000000000000000e+01 9.781143367290497104e-02
67
+ 6.600000000000000000e+01 1.039855718612670843e-01
68
+ 6.700000000000000000e+01 1.064048498868942205e-01
69
+ 6.800000000000000000e+01 9.728896617889404297e-02
70
+ 6.900000000000000000e+01 9.509651362895965576e-02
71
+ 7.000000000000000000e+01 1.091342180967330988e-01
72
+ 7.100000000000000000e+01 1.016811817884445135e-01
73
+ 7.200000000000000000e+01 9.881042540073395331e-02
74
+ 7.300000000000000000e+01 9.882293939590454379e-02
75
+ 7.400000000000000000e+01 9.637998342514038363e-02
76
+ 7.500000000000000000e+01 8.356971442699431818e-02
77
+ 7.600000000000000000e+01 9.902868270874023993e-02
78
+ 7.700000000000000000e+01 9.420894980430602472e-02
79
+ 7.800000000000000000e+01 9.882737100124358576e-02
80
+ 7.900000000000000000e+01 8.918197154998779019e-02
81
+ 8.000000000000000000e+01 9.358719289302826205e-02
82
+ 8.100000000000000000e+01 9.849358797073363980e-02
83
+ 8.200000000000000000e+01 9.094970524311066229e-02
84
+ 8.300000000000000000e+01 8.850618600845336359e-02
85
+ 8.400000000000000000e+01 9.836201965808868963e-02
86
+ 8.500000000000000000e+01 8.451730310916900912e-02
87
+ 8.600000000000000000e+01 9.442334473133087713e-02
88
+ 8.700000000000000000e+01 9.012276530265808661e-02
89
+ 8.800000000000000000e+01 9.159993231296539862e-02
90
+ 8.900000000000000000e+01 9.661512672901154120e-02
91
+ 9.000000000000000000e+01 8.947120308876037320e-02
92
+ 9.100000000000000000e+01 8.729224503040314276e-02
93
+ 9.200000000000000000e+01 8.756603598594665805e-02
94
+ 9.300000000000000000e+01 9.022055566310882568e-02
95
+ 9.400000000000000000e+01 8.931825757026672086e-02
96
+ 9.500000000000000000e+01 8.704317510128020685e-02
97
+ 9.600000000000000000e+01 7.626191675662993830e-02
98
+ 9.700000000000000000e+01 8.811458647251128873e-02
99
+ 9.800000000000000000e+01 8.427759408950805109e-02
100
+ 9.900000000000000000e+01 9.522518515586853027e-02
101
+ 1.000000000000000000e+02 8.465408682823180597e-02
102
+ 1.010000000000000000e+02 9.000419974327086847e-02
103
+ 1.020000000000000000e+02 8.839070796966552734e-02
104
+ 1.030000000000000000e+02 8.643541038036346713e-02
105
+ 1.040000000000000000e+02 8.619004189968108853e-02
106
+ 1.050000000000000000e+02 8.245641589164734442e-02
107
+ 1.060000000000000000e+02 8.474771082401275080e-02
108
+ 1.070000000000000000e+02 7.968198657035827082e-02
109
+ 1.080000000000000000e+02 8.239192962646484097e-02
110
+ 1.090000000000000000e+02 8.238035738468170444e-02
111
+ 1.100000000000000000e+02 7.307147681713103970e-02
112
+ 1.110000000000000000e+02 8.209990262985229770e-02
113
+ 1.120000000000000000e+02 8.047128319740295965e-02
114
+ 1.130000000000000000e+02 8.432724773883819303e-02
115
+ 1.140000000000000000e+02 8.143997788429260809e-02
116
+ 1.150000000000000000e+02 8.467923998832702082e-02
117
+ 1.160000000000000000e+02 8.954935073852539340e-02
118
+ 1.170000000000000000e+02 8.563154935836791992e-02
119
+ 1.180000000000000000e+02 8.093344271183014471e-02
120
+ 1.190000000000000000e+02 8.262177705764769953e-02
121
+ 1.200000000000000000e+02 7.641595602035522461e-02
122
+ 1.210000000000000000e+02 8.638529181480407437e-02
123
+ 1.220000000000000000e+02 7.834772765636444092e-02
124
+ 1.230000000000000000e+02 7.112940549850463312e-02
125
+ 1.240000000000000000e+02 7.886535525321961004e-02
126
+ 1.250000000000000000e+02 8.096686303615570346e-02
127
+ 1.260000000000000000e+02 8.270101547241211215e-02
128
+ 1.270000000000000000e+02 8.261188268661498468e-02
129
+ 1.280000000000000000e+02 7.751703858375549872e-02
130
+ 1.290000000000000000e+02 7.759350836277008334e-02
131
+ 1.300000000000000000e+02 7.186951637268065851e-02
132
+ 1.310000000000000000e+02 8.247119784355164129e-02
133
+ 1.320000000000000000e+02 8.166118562221527655e-02
134
+ 1.330000000000000000e+02 7.848928868770599365e-02
135
+ 1.340000000000000000e+02 7.621280252933501642e-02
136
+ 1.350000000000000000e+02 7.483861744403838556e-02
137
+ 1.360000000000000000e+02 7.931372225284576138e-02
138
+ 1.370000000000000000e+02 8.355411291122435968e-02
139
+ 1.380000000000000000e+02 8.021936714649199884e-02
140
+ 1.390000000000000000e+02 7.726681828498840887e-02
141
+ 1.400000000000000000e+02 7.638082504272461493e-02
142
+ 1.410000000000000000e+02 7.744624912738799771e-02
143
+ 1.420000000000000000e+02 8.268800675868988315e-02
144
+ 1.430000000000000000e+02 7.466401755809784491e-02
145
+ 1.440000000000000000e+02 7.775481343269348422e-02
146
+ 1.450000000000000000e+02 7.511828839778900146e-02
147
+ 1.460000000000000000e+02 7.727222442626953403e-02
148
+ 1.470000000000000000e+02 7.235211730003357489e-02
149
+ 1.480000000000000000e+02 6.813707947731018066e-02
150
+ 1.490000000000000000e+02 8.162583112716674527e-02
deepatlas_results/Task002_rnmw153_sim_Zsc/training_results/SegNet/supervised_seg_losses.png ADDED
deepatlas_results/Task002_rnmw153_sim_Zsc/training_results/training_log.txt ADDED
@@ -0,0 +1,515 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Deep Atlas Training Log
2
+ Epoch 1/150:
3
+ reg training loss: 4.7686481952667235
4
+ reg validation loss: 10.761622428894043
5
+ seg training loss: 0.8673096179962159
6
+ seg validation loss: 0.8539321422576904
7
+ Epoch 2/150:
8
+ reg training loss: 4.440142178535462
9
+ seg training loss: 0.7938556432723999
10
+ Epoch 3/150:
11
+ reg training loss: 4.576620316505432
12
+ seg training loss: 0.7535318613052369
13
+ Epoch 4/150:
14
+ reg training loss: 4.510968661308288
15
+ seg training loss: 0.7162851691246033
16
+ Epoch 5/150:
17
+ reg training loss: 3.9539534568786623
18
+ seg training loss: 0.6908728837966919
19
+ Epoch 6/150:
20
+ reg training loss: 4.039778637886047
21
+ reg validation loss: 9.18710708618164
22
+ seg training loss: 0.6406762003898621
23
+ seg validation loss: 0.6347227096557617
24
+ Epoch 7/150:
25
+ reg training loss: 3.7184645175933837
26
+ seg training loss: 0.6085501194000245
27
+ Epoch 8/150:
28
+ reg training loss: 3.935744595527649
29
+ seg training loss: 0.5736919283866883
30
+ Epoch 9/150:
31
+ reg training loss: 4.056983923912048
32
+ seg training loss: 0.5408016562461853
33
+ Epoch 10/150:
34
+ reg training loss: 3.8200072050094604
35
+ seg training loss: 0.514945924282074
36
+ Epoch 11/150:
37
+ reg training loss: 4.281972050666809
38
+ reg validation loss: 8.052876472473145
39
+ seg training loss: 0.49536322355270385
40
+ seg validation loss: 0.5146540403366089
41
+ Epoch 12/150:
42
+ reg training loss: 3.855319881439209
43
+ seg training loss: 0.469755756855011
44
+ Epoch 13/150:
45
+ reg training loss: 3.840291428565979
46
+ seg training loss: 0.42938312888145447
47
+ Epoch 14/150:
48
+ reg training loss: 3.583286929130554
49
+ seg training loss: 0.4022633254528046
50
+ Epoch 15/150:
51
+ reg training loss: 3.728816819190979
52
+ seg training loss: 0.35690702199935914
53
+ Epoch 16/150:
54
+ reg training loss: 3.199250864982605
55
+ reg validation loss: 7.420182704925537
56
+ seg training loss: 0.3086260259151459
57
+ seg validation loss: 0.3790477514266968
58
+ Epoch 17/150:
59
+ reg training loss: 3.4572808742523193
60
+ seg training loss: 0.2525585561990738
61
+ Epoch 18/150:
62
+ reg training loss: 3.419553208351135
63
+ seg training loss: 0.25260117650032043
64
+ Epoch 19/150:
65
+ reg training loss: 3.5318825006484986
66
+ seg training loss: 0.2083510458469391
67
+ Epoch 20/150:
68
+ reg training loss: 3.3581162214279177
69
+ seg training loss: 0.19577946960926057
70
+ Epoch 21/150:
71
+ reg training loss: 3.4845890760421754
72
+ reg validation loss: 6.422091484069824
73
+ seg training loss: 0.1768558830022812
74
+ seg validation loss: 0.27210915088653564
75
+ Epoch 22/150:
76
+ reg training loss: 3.116759204864502
77
+ seg training loss: 0.173543781042099
78
+ Epoch 23/150:
79
+ reg training loss: 2.9422287225723265
80
+ seg training loss: 0.15092406868934632
81
+ Epoch 24/150:
82
+ reg training loss: 3.36854088306427
83
+ seg training loss: 0.13847693502902986
84
+ Epoch 25/150:
85
+ reg training loss: 2.9736003637313844
86
+ seg training loss: 0.12616726756095886
87
+ Epoch 26/150:
88
+ reg training loss: 3.2114731550216673
89
+ reg validation loss: 5.498345851898193
90
+ seg training loss: 0.12545039653778076
91
+ seg validation loss: 0.18503624200820923
92
+ Epoch 27/150:
93
+ reg training loss: 3.0050987243652343
94
+ seg training loss: 0.10894768834114074
95
+ Epoch 28/150:
96
+ reg training loss: 2.9660191535949707
97
+ seg training loss: 0.11309540569782257
98
+ Epoch 29/150:
99
+ reg training loss: 3.056342911720276
100
+ seg training loss: 0.09028413593769073
101
+ Epoch 30/150:
102
+ reg training loss: 2.84787540435791
103
+ seg training loss: 0.0988813042640686
104
+ Epoch 31/150:
105
+ reg training loss: 2.8773739337921143
106
+ reg validation loss: 4.607907295227051
107
+ seg training loss: 0.08548430800437927
108
+ seg validation loss: 0.15550243854522705
109
+ Epoch 32/150:
110
+ reg training loss: 2.5162493228912353
111
+ seg training loss: 0.09200616478919983
112
+ Epoch 33/150:
113
+ reg training loss: 2.9851715087890627
114
+ seg training loss: 0.08785836696624756
115
+ Epoch 34/150:
116
+ reg training loss: 2.868598449230194
117
+ seg training loss: 0.07595799565315246
118
+ Epoch 35/150:
119
+ reg training loss: 2.72953519821167
120
+ seg training loss: 0.07890819311141968
121
+ Epoch 36/150:
122
+ reg training loss: 2.823525643348694
123
+ reg validation loss: 4.391824245452881
124
+ seg training loss: 0.07334268689155579
125
+ seg validation loss: 0.12950095534324646
126
+ Epoch 37/150:
127
+ reg training loss: 2.5901314973831178
128
+ seg training loss: 0.077168008685112
129
+ Epoch 38/150:
130
+ reg training loss: 2.7292463064193724
131
+ seg training loss: 0.07764174640178681
132
+ Epoch 39/150:
133
+ reg training loss: 2.2696269154548645
134
+ seg training loss: 0.07477926909923553
135
+ Epoch 40/150:
136
+ reg training loss: 2.641170156002045
137
+ seg training loss: 0.061516857147216795
138
+ Epoch 41/150:
139
+ reg training loss: 2.665408182144165
140
+ reg validation loss: 4.314706802368164
141
+ seg training loss: 0.06822904944419861
142
+ seg validation loss: 0.12480152398347855
143
+ Epoch 42/150:
144
+ reg training loss: 2.373495626449585
145
+ seg training loss: 0.06400893926620484
146
+ Epoch 43/150:
147
+ reg training loss: 2.5231226563453673
148
+ seg training loss: 0.06552021205425262
149
+ Epoch 44/150:
150
+ reg training loss: 2.610878252983093
151
+ seg training loss: 0.05810818076133728
152
+ Epoch 45/150:
153
+ reg training loss: 2.256824862957001
154
+ seg training loss: 0.06144440770149231
155
+ Epoch 46/150:
156
+ reg training loss: 2.1527690529823302
157
+ reg validation loss: 3.9034616947174072
158
+ seg training loss: 0.06828641891479492
159
+ seg validation loss: 0.130331888794899
160
+ Epoch 47/150:
161
+ reg training loss: 2.4612895011901856
162
+ seg training loss: 0.06633291244506836
163
+ Epoch 48/150:
164
+ reg training loss: 2.3212251901626586
165
+ seg training loss: 0.061132407188415526
166
+ Epoch 49/150:
167
+ reg training loss: 2.3259034633636473
168
+ seg training loss: 0.05672590732574463
169
+ Epoch 50/150:
170
+ reg training loss: 2.3421448111534118
171
+ seg training loss: 0.06588823199272156
172
+ Epoch 51/150:
173
+ reg training loss: 2.237171542644501
174
+ reg validation loss: 4.078887939453125
175
+ seg training loss: 0.050005200505256656
176
+ seg validation loss: 0.1283894181251526
177
+ Epoch 52/150:
178
+ reg training loss: 2.351637578010559
179
+ seg training loss: 0.06055374145507812
180
+ Epoch 53/150:
181
+ reg training loss: 2.1713146924972535
182
+ seg training loss: 0.05193628370761871
183
+ Epoch 54/150:
184
+ reg training loss: 2.2294365763664246
185
+ seg training loss: 0.0519787073135376
186
+ Epoch 55/150:
187
+ reg training loss: 2.107238698005676
188
+ seg training loss: 0.060818183422088626
189
+ Epoch 56/150:
190
+ reg training loss: 2.159134030342102
191
+ reg validation loss: 3.430917263031006
192
+ seg training loss: 0.05758752822875977
193
+ seg validation loss: 0.1234513595700264
194
+ Epoch 57/150:
195
+ reg training loss: 2.136767840385437
196
+ seg training loss: 0.060886308550834656
197
+ Epoch 58/150:
198
+ reg training loss: 2.056178319454193
199
+ seg training loss: 0.059524261951446535
200
+ Epoch 59/150:
201
+ reg training loss: 2.1131672978401186
202
+ seg training loss: 0.05477309226989746
203
+ Epoch 60/150:
204
+ reg training loss: 2.0785787463188172
205
+ seg training loss: 0.05750834941864014
206
+ Epoch 61/150:
207
+ reg training loss: 1.9642852187156676
208
+ reg validation loss: 3.592482089996338
209
+ seg training loss: 0.053974205255508424
210
+ seg validation loss: 0.12239720672369003
211
+ Epoch 62/150:
212
+ reg training loss: 2.1245671629905702
213
+ seg training loss: 0.049260133504867555
214
+ Epoch 63/150:
215
+ reg training loss: 1.9742623448371888
216
+ seg training loss: 0.05120005905628204
217
+ Epoch 64/150:
218
+ reg training loss: 1.8237436056137084
219
+ seg training loss: 0.05164272785186767
220
+ Epoch 65/150:
221
+ reg training loss: 1.8780404806137085
222
+ seg training loss: 0.04497153162956238
223
+ Epoch 66/150:
224
+ reg training loss: 1.8446750402450562
225
+ reg validation loss: 3.5688796043395996
226
+ seg training loss: 0.05031183362007141
227
+ seg validation loss: 0.11342131346464157
228
+ Epoch 67/150:
229
+ reg training loss: 2.089728665351868
230
+ seg training loss: 0.04955594837665558
231
+ Epoch 68/150:
232
+ reg training loss: 1.8650356769561767
233
+ seg training loss: 0.05126414000988007
234
+ Epoch 69/150:
235
+ reg training loss: 1.8945781350135804
236
+ seg training loss: 0.05113123655319214
237
+ Epoch 70/150:
238
+ reg training loss: 1.824099862575531
239
+ seg training loss: 0.050078588724136355
240
+ Epoch 71/150:
241
+ reg training loss: 2.132909834384918
242
+ reg validation loss: 3.5110559463500977
243
+ seg training loss: 0.05057660043239594
244
+ seg validation loss: 0.11465368419885635
245
+ Epoch 72/150:
246
+ reg training loss: 1.7633860468864442
247
+ seg training loss: 0.04853618741035461
248
+ Epoch 73/150:
249
+ reg training loss: 1.9421926975250243
250
+ seg training loss: 0.048684078454971316
251
+ Epoch 74/150:
252
+ reg training loss: 1.770769429206848
253
+ seg training loss: 0.0517791211605072
254
+ Epoch 75/150:
255
+ reg training loss: 1.7527392029762268
256
+ seg training loss: 0.049894532561302184
257
+ Epoch 76/150:
258
+ reg training loss: 1.8100290656089784
259
+ reg validation loss: 3.362490653991699
260
+ seg training loss: 0.03936699628829956
261
+ seg validation loss: 0.11438566446304321
262
+ Epoch 77/150:
263
+ reg training loss: 1.8304568886756898
264
+ seg training loss: 0.04833044111728668
265
+ Epoch 78/150:
266
+ reg training loss: 1.7759044528007508
267
+ seg training loss: 0.05147920846939087
268
+ Epoch 79/150:
269
+ reg training loss: 1.7250861644744873
270
+ seg training loss: 0.048552751541137695
271
+ Epoch 80/150:
272
+ reg training loss: 1.5575646996498107
273
+ seg training loss: 0.04389434158802032
274
+ Epoch 81/150:
275
+ reg training loss: 1.879893720149994
276
+ reg validation loss: 3.3051137924194336
277
+ seg training loss: 0.0443111389875412
278
+ seg validation loss: 0.12079031765460968
279
+ Epoch 82/150:
280
+ reg training loss: 1.8457049012184144
281
+ seg training loss: 0.04815846085548401
282
+ Epoch 83/150:
283
+ reg training loss: 1.7818867683410644
284
+ seg training loss: 0.048137256503105165
285
+ Epoch 84/150:
286
+ reg training loss: 1.6136236906051635
287
+ seg training loss: 0.0441769927740097
288
+ Epoch 85/150:
289
+ reg training loss: 1.5971672058105468
290
+ seg training loss: 0.05148882269859314
291
+ Epoch 86/150:
292
+ reg training loss: 1.6388625502586365
293
+ reg validation loss: 3.5887608528137207
294
+ seg training loss: 0.040018555521965024
295
+ seg validation loss: 0.12744276225566864
296
+ Epoch 87/150:
297
+ reg training loss: 1.6481032490730285
298
+ seg training loss: 0.04862010776996613
299
+ Epoch 88/150:
300
+ reg training loss: 1.6571676015853882
301
+ seg training loss: 0.04402497112751007
302
+ Epoch 89/150:
303
+ reg training loss: 1.7385945796966553
304
+ seg training loss: 0.04975014328956604
305
+ Epoch 90/150:
306
+ reg training loss: 1.4554160475730895
307
+ seg training loss: 0.04796398878097534
308
+ Epoch 91/150:
309
+ reg training loss: 1.6244391560554505
310
+ reg validation loss: 3.2644922733306885
311
+ seg training loss: 0.03874439895153046
312
+ seg validation loss: 0.11166677623987198
313
+ Epoch 92/150:
314
+ reg training loss: 1.475834959745407
315
+ seg training loss: 0.04821090996265411
316
+ Epoch 93/150:
317
+ reg training loss: 1.8129208445549012
318
+ seg training loss: 0.04149874150753021
319
+ Epoch 94/150:
320
+ reg training loss: 1.582091224193573
321
+ seg training loss: 0.04493129253387451
322
+ Epoch 95/150:
323
+ reg training loss: 1.648519515991211
324
+ seg training loss: 0.044540634751319884
325
+ Epoch 96/150:
326
+ reg training loss: 1.530086386203766
327
+ reg validation loss: 3.317974090576172
328
+ seg training loss: 0.0466022789478302
329
+ seg validation loss: 0.13395804166793823
330
+ Epoch 97/150:
331
+ reg training loss: 1.514226257801056
332
+ seg training loss: 0.03777629733085632
333
+ Epoch 98/150:
334
+ reg training loss: 1.5128494262695313
335
+ seg training loss: 0.04635457992553711
336
+ Epoch 99/150:
337
+ reg training loss: 1.4876845955848694
338
+ seg training loss: 0.041973233222961426
339
+ Epoch 100/150:
340
+ reg training loss: 1.5425513625144958
341
+ seg training loss: 0.043033072352409364
342
+ Epoch 101/150:
343
+ reg training loss: 1.4795127868652345
344
+ reg validation loss: 3.278428554534912
345
+ seg training loss: 0.04391896724700928
346
+ seg validation loss: 0.10932398587465286
347
+ Epoch 102/150:
348
+ reg training loss: 1.3456117391586304
349
+ seg training loss: 0.0463683158159256
350
+ Epoch 103/150:
351
+ reg training loss: 1.5337165355682374
352
+ seg training loss: 0.04557084739208221
353
+ Epoch 104/150:
354
+ reg training loss: 1.4175154209136962
355
+ seg training loss: 0.041203317046165464
356
+ Epoch 105/150:
357
+ reg training loss: 1.491098737716675
358
+ seg training loss: 0.04438801407814026
359
+ Epoch 106/150:
360
+ reg training loss: 1.637062907218933
361
+ reg validation loss: 3.325711250305176
362
+ seg training loss: 0.039811649918556215
363
+ seg validation loss: 0.11083932965993881
364
+ Epoch 107/150:
365
+ reg training loss: 1.4294653177261352
366
+ seg training loss: 0.040769177675247195
367
+ Epoch 108/150:
368
+ reg training loss: 1.2746424674987793
369
+ seg training loss: 0.039609703421592715
370
+ Epoch 109/150:
371
+ reg training loss: 1.465555238723755
372
+ seg training loss: 0.04165233075618744
373
+ Epoch 110/150:
374
+ reg training loss: 1.5138072729110719
375
+ seg training loss: 0.0392852783203125
376
+ Epoch 111/150:
377
+ reg training loss: 1.4285594701766968
378
+ reg validation loss: 3.0637738704681396
379
+ seg training loss: 0.04050336182117462
380
+ seg validation loss: 0.10757420212030411
381
+ Epoch 112/150:
382
+ reg training loss: 1.4796598553657532
383
+ seg training loss: 0.04065462350845337
384
+ Epoch 113/150:
385
+ reg training loss: 1.36372629404068
386
+ seg training loss: 0.040871036052703855
387
+ Epoch 114/150:
388
+ reg training loss: 1.4361500144004822
389
+ seg training loss: 0.04593601524829864
390
+ Epoch 115/150:
391
+ reg training loss: 1.4456260681152344
392
+ seg training loss: 0.04071014523506165
393
+ Epoch 116/150:
394
+ reg training loss: 1.4020459055900574
395
+ reg validation loss: 3.108996868133545
396
+ seg training loss: 0.04369308948516846
397
+ seg validation loss: 0.11938655376434326
398
+ Epoch 117/150:
399
+ reg training loss: 1.3788525223731996
400
+ seg training loss: 0.04722652137279511
401
+ Epoch 118/150:
402
+ reg training loss: 1.3215387344360352
403
+ seg training loss: 0.04317383766174317
404
+ Epoch 119/150:
405
+ reg training loss: 1.3698723673820496
406
+ seg training loss: 0.03398588299751282
407
+ Epoch 120/150:
408
+ reg training loss: 1.2778821647167207
409
+ seg training loss: 0.04034335613250732
410
+ Epoch 121/150:
411
+ reg training loss: 1.3698755979537964
412
+ reg validation loss: 2.9927785396575928
413
+ seg training loss: 0.036156189441680905
414
+ seg validation loss: 0.11336451023817062
415
+ Epoch 122/150:
416
+ reg training loss: 1.3979350209236145
417
+ seg training loss: 0.03906717300415039
418
+ Epoch 123/150:
419
+ reg training loss: 1.2636346697807312
420
+ seg training loss: 0.04145587384700775
421
+ Epoch 124/150:
422
+ reg training loss: 1.3199236750602723
423
+ seg training loss: 0.03506969213485718
424
+ Epoch 125/150:
425
+ reg training loss: 1.3449559211730957
426
+ seg training loss: 0.038923582434654234
427
+ Epoch 126/150:
428
+ reg training loss: 1.339024943113327
429
+ reg validation loss: 3.245039463043213
430
+ seg training loss: 0.03790881633758545
431
+ seg validation loss: 0.1098380908370018
432
+ Epoch 127/150:
433
+ reg training loss: 1.260996562242508
434
+ seg training loss: 0.03813325166702271
435
+ Epoch 128/150:
436
+ reg training loss: 1.3546002149581908
437
+ seg training loss: 0.03687379956245422
438
+ Epoch 129/150:
439
+ reg training loss: 1.252830719947815
440
+ seg training loss: 0.040056157112121585
441
+ Epoch 130/150:
442
+ reg training loss: 1.1125893533229827
443
+ seg training loss: 0.0403405487537384
444
+ Epoch 131/150:
445
+ reg training loss: 1.2271615982055664
446
+ reg validation loss: 2.9399733543395996
447
+ seg training loss: 0.03758984208106995
448
+ seg validation loss: 0.10584234446287155
449
+ Epoch 132/150:
450
+ reg training loss: 1.3109357595443725
451
+ seg training loss: 0.046701446175575256
452
+ Epoch 133/150:
453
+ reg training loss: 1.3082359910011292
454
+ seg training loss: 0.04108454883098602
455
+ Epoch 134/150:
456
+ reg training loss: 1.4175715327262879
457
+ seg training loss: 0.03957531452178955
458
+ Epoch 135/150:
459
+ reg training loss: 1.2930214643478393
460
+ seg training loss: 0.036753672361373904
461
+ Epoch 136/150:
462
+ reg training loss: 1.2290011167526245
463
+ reg validation loss: 3.009814500808716
464
+ seg training loss: 0.03787890374660492
465
+ seg validation loss: 0.11481118947267532
466
+ Epoch 137/150:
467
+ reg training loss: 1.2510305166244506
468
+ seg training loss: 0.03525933623313904
469
+ Epoch 138/150:
470
+ reg training loss: 1.3841961979866029
471
+ seg training loss: 0.045733338594436644
472
+ Epoch 139/150:
473
+ reg training loss: 1.1904977083206176
474
+ seg training loss: 0.041322562098503116
475
+ Epoch 140/150:
476
+ reg training loss: 1.1987750947475433
477
+ seg training loss: 0.03969658315181732
478
+ Epoch 141/150:
479
+ reg training loss: 1.2858553409576416
480
+ reg validation loss: 3.063588857650757
481
+ seg training loss: 0.03913751244544983
482
+ seg validation loss: 0.1053217351436615
483
+ Epoch 142/150:
484
+ reg training loss: 1.234222173690796
485
+ seg training loss: 0.04026820361614227
486
+ Epoch 143/150:
487
+ reg training loss: 1.1718730866909026
488
+ seg training loss: 0.03998347520828247
489
+ Epoch 144/150:
490
+ reg training loss: 1.1990990936756134
491
+ seg training loss: 0.03557175695896149
492
+ Epoch 145/150:
493
+ reg training loss: 1.157021552324295
494
+ seg training loss: 0.03615144789218903
495
+ Epoch 146/150:
496
+ reg training loss: 1.2255614519119262
497
+ reg validation loss: 3.1661453247070312
498
+ seg training loss: 0.0391613632440567
499
+ seg validation loss: 0.13170355558395386
500
+ Epoch 147/150:
501
+ reg training loss: 1.1338848412036895
502
+ seg training loss: 0.03860030472278595
503
+ Epoch 148/150:
504
+ reg training loss: 1.1530435025691985
505
+ seg training loss: 0.03843391835689545
506
+ Epoch 149/150:
507
+ reg training loss: 1.19446240067482
508
+ seg training loss: 0.03256092071533203
509
+ Epoch 150/150:
510
+ reg training loss: 1.175407838821411
511
+ seg training loss: 0.03730123043060303
512
+
513
+
514
+ Best reg_net validation loss: 2.9399733543395996
515
+ Best seg_net validation loss: 0.1053217351436615
requirements.txt ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ absl-py==1.0.0
2
+ alembic==1.7.7
3
+ antspyx==0.3.2
4
+ argon2-cffi==21.3.0
5
+ argon2-cffi-bindings==21.2.0
6
+ asttokens==2.0.5
7
+ attrs==21.4.0
8
+ autopep8==1.6.0
9
+ backcall==0.2.0
10
+ beautifulsoup4==4.11.1
11
+ bleach==5.0.1
12
+ cachetools==5.1.0
13
+ certifi==2022.5.18.1
14
+ cffi==1.15.1
15
+ charset-normalizer==2.0.12
16
+ chart-studio==1.1.0
17
+ click==8.1.3
18
+ cloudpickle==2.1.0
19
+ cucim==22.4.0
20
+ cycler==0.11.0
21
+ databricks-cli==0.16.6
22
+ debugpy==1.6.0
23
+ decorator==5.1.1
24
+ defusedxml==0.7.1
25
+ docker==5.0.3
26
+ einops==0.4.1
27
+ entrypoints==0.4
28
+ executing==0.8.3
29
+ fastjsonschema==2.16.1
30
+ filelock==3.7.0
31
+ fire==0.4.0
32
+ Flask==2.1.2
33
+ fonttools==4.33.3
34
+ gdown==4.4.0
35
+ gitdb==4.0.9
36
+ GitPython==3.1.27
37
+ google-auth==2.6.6
38
+ google-auth-oauthlib==0.4.6
39
+ greenlet==1.1.2
40
+ grpcio==1.46.3
41
+ gunicorn==20.1.0
42
+ huggingface-hub==0.6.0
43
+ idna==3.3
44
+ imagecodecs==2022.2.22
45
+ imageio==2.19.2
46
+ importlib-metadata==4.11.3
47
+ importlib-resources==5.7.1
48
+ install==1.3.5
49
+ ipykernel==6.13.0
50
+ ipython==8.3.0
51
+ ipython-genutils==0.2.0
52
+ itk==5.2.1.post1
53
+ itk-core==5.2.1.post1
54
+ itk-filtering==5.2.1.post1
55
+ itk-io==5.2.1.post1
56
+ itk-numerics==5.2.1.post1
57
+ itk-registration==5.2.1.post1
58
+ itk-segmentation==5.2.1.post1
59
+ itsdangerous==2.1.2
60
+ jedi==0.18.1
61
+ Jinja2==3.1.2
62
+ joblib==1.1.0
63
+ jsonschema==4.5.1
64
+ jupyter-client==7.3.1
65
+ jupyter-core==4.10.0
66
+ jupyterlab-pygments==0.2.2
67
+ kiwisolver==1.4.2
68
+ lmdb==1.3.0
69
+ Mako==1.2.0
70
+ Markdown==3.3.7
71
+ MarkupSafe==2.1.1
72
+ matplotlib==3.5.2
73
+ matplotlib-inline==0.1.3
74
+ mistune==2.0.3
75
+ mlflow==1.26.0
76
+ -e git+https://github.com/Project-MONAI/MONAI.git@ed233d9b48bd71eb623cf9777ad9b60142c8ad66#egg=monai
77
+ nbclient==0.6.6
78
+ nbconvert==6.5.0
79
+ nbformat==5.4.0
80
+ nest-asyncio==1.5.5
81
+ networkx==2.8.1
82
+ nibabel==3.2.2
83
+ notebook==6.4.12
84
+ numpy==1.22.3
85
+ oauthlib==3.2.0
86
+ openslide-python==1.1.2
87
+ packaging==21.3
88
+ pandas==1.4.2
89
+ pandocfilters==1.5.0
90
+ parso==0.8.3
91
+ patsy==0.5.2
92
+ pexpect==4.8.0
93
+ pickleshare==0.7.5
94
+ Pillow==9.1.1
95
+ plotly==5.8.0
96
+ prometheus-client==0.14.1
97
+ prometheus-flask-exporter==0.20.1
98
+ prompt-toolkit==3.0.29
99
+ protobuf==3.20.1
100
+ psutil==5.9.0
101
+ ptyprocess==0.7.0
102
+ pure-eval==0.2.2
103
+ pyasn1==0.4.8
104
+ pyasn1-modules==0.2.8
105
+ pycodestyle==2.8.0
106
+ pycparser==2.21
107
+ Pygments==2.12.0
108
+ PyJWT==2.4.0
109
+ pynrrd==0.4.3
110
+ pyparsing==3.0.9
111
+ pyrsistent==0.18.1
112
+ PySocks==1.7.1
113
+ python-dateutil==2.8.2
114
+ pytorch-ignite==0.4.8
115
+ pytz==2022.1
116
+ PyWavelets==1.3.0
117
+ PyYAML==6.0
118
+ pyzmq==22.3.0
119
+ querystring-parser==1.2.4
120
+ regex==2022.4.24
121
+ requests==2.27.1
122
+ requests-oauthlib==1.3.1
123
+ retrying==1.3.3
124
+ rsa==4.8
125
+ scikit-image==0.19.2
126
+ scikit-learn==1.1.1
127
+ scipy==1.8.1
128
+ Send2Trash==1.8.0
129
+ six==1.16.0
130
+ slicerio==0.1.3
131
+ smmap==5.0.0
132
+ soupsieve==2.3.2.post1
133
+ SQLAlchemy==1.4.36
134
+ sqlparse==0.4.2
135
+ stack-data==0.2.0
136
+ statsmodels==0.13.2
137
+ tabulate==0.8.9
138
+ tenacity==8.0.1
139
+ tensorboard==2.9.0
140
+ tensorboard-data-server==0.6.1
141
+ tensorboard-plugin-wit==1.8.1
142
+ tensorboardX==2.5
143
+ termcolor==1.1.0
144
+ terminado==0.15.0
145
+ threadpoolctl==3.1.0
146
+ tifffile==2022.5.4
147
+ tinycss2==1.1.1
148
+ tokenizers==0.12.1
149
+ toml==0.10.2
150
+ torch==1.11.0+cu113
151
+ torchaudio==0.11.0+cu113
152
+ torchvision==0.12.0+cu113
153
+ tornado==6.1
154
+ tqdm==4.64.0
155
+ traitlets==5.3.0
156
+ transformers==4.19.2
157
+ typing_extensions==4.2.0
158
+ urllib3==1.26.9
159
+ wcwidth==0.2.5
160
+ webcolors==1.12
161
+ webencodings==0.5.1
162
+ websocket-client==1.3.2
163
+ Werkzeug==2.1.2
164
+ zipp==3.8.0