forked from QSMxT/QSMxT
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathrun_5_analysis.py
executable file
·186 lines (153 loc) · 6.11 KB
/
run_5_analysis.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
#!/usr/bin/env python3
import numpy as np
import nibabel as nib
import argparse
import os
from glob import glob
# get labels dictionary by parsing a labels CSV file
def load_labels(label_filepath):
# read label file
label_file = open(label_filepath)
lines = label_file.readlines()
label_file.close()
# get all labels numbers and names
label_nums = []
label_names = []
for line in lines:
label_num, label_name = line.strip().split(',')
label_nums.append(label_num)
label_names.append(label_name)
# fill labels dictionary -- e.g. { 'Accumbens' : [2, 13] }
labels = {}
for label_name in sorted(list(set(label_names))):
labels[label_name] = []
for i in range(len(label_names)):
if label_names[i] == label_name:
labels[label_name].append(int(label_nums[i]))
return labels
# give names to segmentation labels that don't have one
def update_labels(labels, segmentation):
for seg_num in sorted(list(set(seg))):
# get segmentation name
if seg_num == 0: continue
seg_name = None
for label_name in labels.keys():
if seg_num in labels[label_name]:
seg_name = label_name
break
if not seg_name: labels[str(int(seg_num))] = [int(seg_num)]
# get statistics for each label based on segmentations and qsm data
def get_stats(labels, seg, qsm):
label_stats = {}
for label_name in labels.keys():
# get qsm values for this label
qsm_seg = np.zeros_like(seg)
for label_id in labels[label_name]:
qsm_seg = np.logical_or(qsm_seg, seg == label_id)
qsm_values = qsm[np.logical_and(qsm != 0, qsm_seg)]
# skip if no values
if len(qsm_values) == 0:
label_stats[label_name] = []
continue
# get statistics and store them
num_voxels = len(qsm_values)
min_v = np.min(qsm_values)
max_v = np.max(qsm_values)
median = np.median(qsm_values)
mean = np.mean(qsm_values)
std = np.std(qsm_values)
label_stats[label_name] = [num_voxels, min_v, max_v, median, mean, std]
return label_stats
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="QSMxT qsm: QSM Reconstruction Pipeline",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--segmentations',
nargs='+',
required=True,
help='Segmentation files to use. This can be either a single segmentation file, or one for each result in the QSM directory.'
)
parser.add_argument(
'--qsm_files',
nargs='+',
required=True,
help='QSM files to analyse using the segmentation/s.'
)
parser.add_argument(
'--out_dir',
help='Output directory to write the quantitative data to.'
)
parser.add_argument(
'--labels_file',
default=None,
help='Optional labels CSV file to include named fields in the output. The CSV should contain '+
'segmentation numbers in the first column and ROI names in the second. The aseg_labels.csv '+
'file contains labels for the aseg atlas used in the segmentation pipeline.'
)
args = parser.parse_args()
# ensure directories are complete and absolute
args.out_dir = os.path.abspath(args.out_dir)
os.makedirs(os.path.abspath(args.out_dir), exist_ok=True)
files_qsm = sorted(args.qsm_files)
if args.labels_file:
args.labels_file = os.path.abspath(args.labels_file)
labels_orig = load_labels(args.labels_file)
else:
labels_orig = {}
# for each segmentation file
if len(args.segmentations) > 1:
files_seg = sorted(args.segmentations)
for i in range(len(files_seg)):
print(f"Analysing file {os.path.split(files_qsm[i])[-1]} with segmentation {os.path.split(files_seg[i])[-1]}")
# load subject and segmentation data
nii_seg = nib.load(files_seg[i])
nii_qsm = nib.load(files_qsm[i])
seg = nii_seg.get_fdata().flatten()
qsm = nii_qsm.get_fdata().flatten()
# update labels with this segmentation
labels = labels_orig.copy()
update_labels(labels, seg)
# get statistics for each label name
label_stats = get_stats(labels, seg, qsm)
# write header to file
f_name = (files_seg[i].split('/')[-1]).replace('.nii.gz', '.nii').replace('.nii', '.csv')
f = open(os.path.join(args.out_dir, f_name), 'w')
f.write('roi,num_voxels,min,max,median,mean,std\n')
# write data to file
for label_name in labels.keys():
line = [label_name]
line.extend(label_stats[label_name])
line = ",".join([str(x) for x in line])
f.write(line)
f.write('\n')
# close file
f.close()
else:
# single segmentation file
nii_seg = nib.load(args.segmentations[0])
seg = nii_seg.get_fdata().flatten()
# update labels with this segmentation
labels = labels_orig.copy()
update_labels(labels, seg)
# write header to file
f_name = os.path.split(args.segmentations[0])[1].replace('.nii.gz', '.nii').replace('.nii', '.csv')
f = open(os.path.join(args.out_dir, f_name), 'w')
f.write('subject,roi,num_voxels,min,max,median,mean,std\n')
# for each subject
for i in range(len(files_qsm)):
# load the data
nii_qsm = nib.load(files_qsm[i])
qsm = nii_qsm.get_fdata().flatten()
# get statistics for each label name
label_stats = get_stats(labels, seg, qsm)
# write data to file
for label_name in labels.keys():
line = [os.path.split(files_qsm[i])[1], label_name]
line.extend(label_stats[label_name])
line = ",".join([str(x) for x in line])
f.write(line)
f.write('\n')
# close file
f.close()