Datasets:

Modalities:
Text
ArXiv:
License:
xmcmic commited on
Commit
701efb1
1 Parent(s): 7e0c0ee

Upload folder using huggingface_hub

Browse files
processed_code/preprocess_ctrate_train.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import nibabel as nib
3
+ import pandas as pd
4
+ import numpy as np
5
+ import torch
6
+ import monai
7
+ import torch.nn.functional as F
8
+ from multiprocessing import Pool
9
+ from tqdm import tqdm
10
+
11
+ def read_nii_files(directory):
12
+ """
13
+ Retrieve paths of all NIfTI files in the given directory.
14
+
15
+ Args:
16
+ directory (str): Path to the directory containing NIfTI files.
17
+
18
+ Returns:
19
+ list: List of paths to NIfTI files.
20
+ """
21
+ nii_files = []
22
+ for root, dirs, files in os.walk(directory):
23
+ for file in files:
24
+ if file.endswith('1.nii.gz'):
25
+ # /mnt/petrelfs/share_data/zhangxiaoman/DATA/CT-RATE/dataset/train_preprocessed
26
+ # preprocessed_file = file.replace('/mnt/petrelfs/share_data/zhangxiaoman/DATA/CT-RATE/dataset/train','/mnt/petrelfs/share_data/zhangxiaoman/DATA/CT-RATE/dataset/train_preprocessed')
27
+ nii_files.append(os.path.join(root, file))
28
+ return nii_files
29
+
30
+ def read_nii_data(file_path):
31
+ """
32
+ Read NIfTI file data.
33
+
34
+ Args:
35
+ file_path (str): Path to the NIfTI file.
36
+
37
+ Returns:
38
+ np.ndarray: NIfTI file data.
39
+ """
40
+ try:
41
+ nii_img = nib.load(file_path)
42
+ nii_data = nii_img.get_fdata()
43
+ return nii_data
44
+ except Exception as e:
45
+ print(f"Error reading file {file_path}: {e}")
46
+ return None
47
+
48
+ def resize_array(array, current_spacing, target_spacing):
49
+ """
50
+ Resize the array to match the target spacing.
51
+
52
+ Args:
53
+ array (torch.Tensor): Input array to be resized.
54
+ current_spacing (tuple): Current voxel spacing (z_spacing, xy_spacing, xy_spacing).
55
+ target_spacing (tuple): Target voxel spacing (target_z_spacing, target_x_spacing, target_y_spacing).
56
+
57
+ Returns:
58
+ np.ndarray: Resized array.
59
+ """
60
+ # Calculate new dimensions
61
+ original_shape = array.shape[2:]
62
+ scaling_factors = [
63
+ current_spacing[i] / target_spacing[i] for i in range(len(original_shape))
64
+ ]
65
+ new_shape = [
66
+ int(original_shape[i] * scaling_factors[i]) for i in range(len(original_shape))
67
+ ]
68
+ # Resize the array
69
+ resized_array = F.interpolate(array, size=new_shape, mode='trilinear', align_corners=False).cpu().numpy()
70
+ return resized_array
71
+
72
+ def process_file(file_path):
73
+ """
74
+ Process a single NIfTI file.
75
+
76
+ Args:
77
+ file_path (str): Path to the NIfTI file.
78
+
79
+ Returns:
80
+ None
81
+ """
82
+ monai_loader = monai.transforms.Compose(
83
+ [
84
+ monai.transforms.LoadImaged(keys=['image']),
85
+ monai.transforms.AddChanneld(keys=['image']),
86
+ monai.transforms.Orientationd(axcodes="LPS", keys=['image']), # zyx
87
+ # monai.transforms.Spacingd(keys=["image"], pixdim=(1, 1, 3), mode=("bilinear")),
88
+ monai.transforms.CropForegroundd(keys=["image"], source_key="image"),
89
+ monai.transforms.ToTensord(keys=["image"]),
90
+ ]
91
+ )
92
+
93
+ dictionary = monai_loader({'image':file_path})
94
+ img_data = dictionary['image']
95
+
96
+ file_name = os.path.basename(file_path)
97
+ row = df[df['VolumeName'] == file_name]
98
+ slope = float(row["RescaleSlope"].iloc[0])
99
+ intercept = float(row["RescaleIntercept"].iloc[0])
100
+ xy_spacing = float(row["XYSpacing"].iloc[0][1:][:-2].split(",")[0])
101
+ z_spacing = float(row["ZSpacing"].iloc[0])
102
+
103
+ # Define the target spacing values for SAT segmentation
104
+ target_x_spacing = 1.0
105
+ target_y_spacing = 1.0
106
+ target_z_spacing = 3.0
107
+
108
+ current = (z_spacing, xy_spacing, xy_spacing)
109
+ target = (target_z_spacing, target_x_spacing, target_y_spacing)
110
+ img_data = slope * img_data + intercept
111
+
112
+ img_data = img_data[0].numpy()
113
+ img_data = img_data.transpose(2, 0, 1)
114
+ tensor = torch.tensor(img_data)
115
+ tensor = tensor.unsqueeze(0).unsqueeze(0)
116
+
117
+ resized_array = resize_array(tensor, current, target)
118
+ resized_array = resized_array[0][0]
119
+ resized_array = resized_array.transpose(1,2,0)
120
+ # print('resized:',resized_array.shape)
121
+ # resized: (231, 387, 387)
122
+
123
+ save_folder = "../upload_data/train_preprocessed/" #save folder for preprocessed
124
+ folder_path_new = os.path.join(save_folder, "train_" + file_name.split("_")[1], "train_" + file_name.split("_")[1] + file_name.split("_")[2]) #folder name for train or validation
125
+ os.makedirs(folder_path_new, exist_ok=True)
126
+ save_path = os.path.join(folder_path_new, file_name)
127
+ # np.savez(save_path, resized_array)
128
+ # Create an identity matrix
129
+
130
+ image_nifti = nib.Nifti1Image(resized_array,affine = np.eye(4))
131
+ nib.save(image_nifti, save_path)
132
+
133
+
134
+
135
+
136
+ # Example usage:
137
+ if __name__ == "__main__":
138
+ split_to_preprocess = '../src_data/train' #select the validation or test split
139
+ nii_files = read_nii_files(split_to_preprocess)
140
+ print(len(nii_files))
141
+
142
+ df = pd.read_csv("../src_data/metadata/train_metadata.csv") #select the metadata
143
+
144
+ num_workers = 18 # Number of worker processes
145
+
146
+ # # # Process files using multiprocessing with tqdm progress bar
147
+ with Pool(num_workers) as pool:
148
+ list(tqdm(pool.imap(process_file, nii_files), total=len(nii_files)))