Chris Oswald
commited on
Commit
·
01d5f4f
1
Parent(s):
dec2fcc
added resize parameter
Browse files
SPIDER.py
CHANGED
|
@@ -36,10 +36,18 @@ def import_csv_data(filepath: str) -> List[Dict[str, str]]:
|
|
| 36 |
results.append(line)
|
| 37 |
return results
|
| 38 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
# Define constants
|
| 40 |
N_PATIENTS = 218
|
| 41 |
MIN_IVD = 0
|
| 42 |
MAX_IVD = 9
|
|
|
|
|
|
|
| 43 |
|
| 44 |
# TODO: Add BibTeX citation
|
| 45 |
# Find for instance the citation on arxiv or on the dataset repo/website
|
|
@@ -82,10 +90,12 @@ class CustomBuilderConfig(datasets.BuilderConfig):
|
|
| 82 |
data_dir: Optional[str] = None,
|
| 83 |
data_files: Optional[Union[str, Sequence, Mapping]] = None,
|
| 84 |
description: Optional[str] = None,
|
| 85 |
-
scan_types: List[str] =
|
|
|
|
| 86 |
):
|
| 87 |
super().__init__(name, version, data_dir, data_files, description)
|
| 88 |
self.scan_types = scan_types
|
|
|
|
| 89 |
|
| 90 |
|
| 91 |
class SPIDER(datasets.GeneratorBasedBuilder):
|
|
@@ -95,34 +105,49 @@ class SPIDER(datasets.GeneratorBasedBuilder):
|
|
| 95 |
|
| 96 |
BUILDER_CONFIG_CLASS = CustomBuilderConfig
|
| 97 |
|
| 98 |
-
BUILDER_CONFIGS = [
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 126 |
|
| 127 |
def _info(self):
|
| 128 |
"""
|
|
@@ -133,7 +158,7 @@ class SPIDER(datasets.GeneratorBasedBuilder):
|
|
| 133 |
"patient_id": datasets.Value("string"),
|
| 134 |
"scan_type": datasets.Value("string"),
|
| 135 |
# "raw_image": datasets.Image(),
|
| 136 |
-
"numeric_array": datasets.Array3D(dtype='int16'),
|
| 137 |
"metadata": {
|
| 138 |
"num_vertebrae": datasets.Value(dtype="string"),
|
| 139 |
"num_discs": datasets.Value(dtype="string"),
|
|
@@ -214,14 +239,14 @@ class SPIDER(datasets.GeneratorBasedBuilder):
|
|
| 214 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
| 215 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
| 216 |
paths_dict = dl_manager.download_and_extract(_URLS)
|
| 217 |
-
scan_types = self.config.scan_types
|
| 218 |
return [
|
| 219 |
datasets.SplitGenerator(
|
| 220 |
name=datasets.Split.TRAIN,
|
| 221 |
gen_kwargs={
|
| 222 |
"paths_dict": paths_dict,
|
| 223 |
"split": "train",
|
| 224 |
-
"scan_types": scan_types,
|
|
|
|
| 225 |
},
|
| 226 |
),
|
| 227 |
datasets.SplitGenerator(
|
|
@@ -229,7 +254,8 @@ class SPIDER(datasets.GeneratorBasedBuilder):
|
|
| 229 |
gen_kwargs={
|
| 230 |
"paths_dict": paths_dict,
|
| 231 |
"split": "validate",
|
| 232 |
-
"scan_types": scan_types,
|
|
|
|
| 233 |
},
|
| 234 |
),
|
| 235 |
datasets.SplitGenerator(
|
|
@@ -237,7 +263,8 @@ class SPIDER(datasets.GeneratorBasedBuilder):
|
|
| 237 |
gen_kwargs={
|
| 238 |
"paths_dict": paths_dict,
|
| 239 |
"split": "test",
|
| 240 |
-
"scan_types": scan_types,
|
|
|
|
| 241 |
},
|
| 242 |
),
|
| 243 |
]
|
|
@@ -245,8 +272,9 @@ class SPIDER(datasets.GeneratorBasedBuilder):
|
|
| 245 |
def _generate_examples(
|
| 246 |
self,
|
| 247 |
paths_dict: Dict[str, str],
|
| 248 |
-
split: str
|
| 249 |
-
scan_types: List[str]
|
|
|
|
| 250 |
validate_share: float = 0.3,
|
| 251 |
test_share: float = 0.2,
|
| 252 |
raw_image: bool = True,
|
|
@@ -385,7 +413,7 @@ class SPIDER(datasets.GeneratorBasedBuilder):
|
|
| 385 |
if col not in ['Patient']
|
| 386 |
}
|
| 387 |
|
| 388 |
-
#
|
| 389 |
image_files = [
|
| 390 |
file for file in os.listdir(os.path.join(paths_dict['images'], 'images'))
|
| 391 |
if file.endswith('.mha')
|
|
@@ -398,7 +426,7 @@ class SPIDER(datasets.GeneratorBasedBuilder):
|
|
| 398 |
]
|
| 399 |
assert len(mask_files) > 0, "No mask files found--check directory path."
|
| 400 |
|
| 401 |
-
# Filter image and mask data based on scan types
|
| 402 |
image_files = [
|
| 403 |
file for file in image_files
|
| 404 |
if any(scan_type in file for scan_type in scan_types)
|
|
|
|
| 36 |
results.append(line)
|
| 37 |
return results
|
| 38 |
|
| 39 |
+
def standardize_3D_image(image: np.ndarray) -> np.ndarray:
|
| 40 |
+
"""TODO"""
|
| 41 |
+
if image.shape[0] < image.shape[2]:
|
| 42 |
+
image = np.transpose(image, axes=[1, 2, 0])
|
| 43 |
+
return image
|
| 44 |
+
|
| 45 |
# Define constants
|
| 46 |
N_PATIENTS = 218
|
| 47 |
MIN_IVD = 0
|
| 48 |
MAX_IVD = 9
|
| 49 |
+
DEFAULT_SCAN_TYPES = ['t1', 't2', 't2_SPACE']
|
| 50 |
+
DEFAULT_RESIZE = (512, 512, 30)
|
| 51 |
|
| 52 |
# TODO: Add BibTeX citation
|
| 53 |
# Find for instance the citation on arxiv or on the dataset repo/website
|
|
|
|
| 90 |
data_dir: Optional[str] = None,
|
| 91 |
data_files: Optional[Union[str, Sequence, Mapping]] = None,
|
| 92 |
description: Optional[str] = None,
|
| 93 |
+
scan_types: List[str] = DEFAULT_SCAN_TYPES,
|
| 94 |
+
resize_dims: Tuple[int, int, int] = DEFAULT_RESIZE,
|
| 95 |
):
|
| 96 |
super().__init__(name, version, data_dir, data_files, description)
|
| 97 |
self.scan_types = scan_types
|
| 98 |
+
self.resize_dims = resize_dims
|
| 99 |
|
| 100 |
|
| 101 |
class SPIDER(datasets.GeneratorBasedBuilder):
|
|
|
|
| 105 |
|
| 106 |
BUILDER_CONFIG_CLASS = CustomBuilderConfig
|
| 107 |
|
| 108 |
+
# BUILDER_CONFIGS = [
|
| 109 |
+
# CustomBuilderConfig(
|
| 110 |
+
# name="all_scan_types",
|
| 111 |
+
# version=VERSION,
|
| 112 |
+
# description="Use images of all scan types (t1, t2, t2 SPACE)",
|
| 113 |
+
# scan_types=['t1', 't2', 't2_SPACE'],
|
| 114 |
+
# resize_dims=DEFAULT_RESIZE,
|
| 115 |
+
# ),
|
| 116 |
+
# CustomBuilderConfig(
|
| 117 |
+
# name="t1_scan_types",
|
| 118 |
+
# version=VERSION,
|
| 119 |
+
# description="Use images of t1 scan types only",
|
| 120 |
+
# scan_types=['t1'],
|
| 121 |
+
# resize_dims=DEFAULT_RESIZE,
|
| 122 |
+
# ),
|
| 123 |
+
# CustomBuilderConfig(
|
| 124 |
+
# name="t2_scan_types",
|
| 125 |
+
# version=VERSION,
|
| 126 |
+
# description="Use images of t2 scan types only",
|
| 127 |
+
# scan_types=['t2'],
|
| 128 |
+
# resize_dims=DEFAULT_RESIZE,
|
| 129 |
+
# ),
|
| 130 |
+
# CustomBuilderConfig(
|
| 131 |
+
# name="t2_SPACE_scan_types",
|
| 132 |
+
# version=VERSION,
|
| 133 |
+
# description="Use images of t2 SPACE scan types only",
|
| 134 |
+
# scan_types=['t2_SPACE'],
|
| 135 |
+
# resize_dims=DEFAULT_RESIZE,
|
| 136 |
+
# ),
|
| 137 |
+
# ]
|
| 138 |
+
|
| 139 |
+
# DEFAULT_CONFIG_NAME = "all_scan_types"
|
| 140 |
+
|
| 141 |
+
def __init__(
|
| 142 |
+
self,
|
| 143 |
+
*args,
|
| 144 |
+
scan_types: List[str] = DEFAULT_SCAN_TYPES,
|
| 145 |
+
resize_dims: Tuple[int, int, int] = DEFAULT_RESIZE,
|
| 146 |
+
**kwargs,
|
| 147 |
+
):
|
| 148 |
+
super().__init__(*args, **kwargs)
|
| 149 |
+
self.scan_types = scan_types
|
| 150 |
+
self.resize_dims = resize_dims
|
| 151 |
|
| 152 |
def _info(self):
|
| 153 |
"""
|
|
|
|
| 158 |
"patient_id": datasets.Value("string"),
|
| 159 |
"scan_type": datasets.Value("string"),
|
| 160 |
# "raw_image": datasets.Image(),
|
| 161 |
+
"numeric_array": datasets.Array3D(shape=self.resize_dims, dtype='int16'),
|
| 162 |
"metadata": {
|
| 163 |
"num_vertebrae": datasets.Value(dtype="string"),
|
| 164 |
"num_discs": datasets.Value(dtype="string"),
|
|
|
|
| 239 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
| 240 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
| 241 |
paths_dict = dl_manager.download_and_extract(_URLS)
|
|
|
|
| 242 |
return [
|
| 243 |
datasets.SplitGenerator(
|
| 244 |
name=datasets.Split.TRAIN,
|
| 245 |
gen_kwargs={
|
| 246 |
"paths_dict": paths_dict,
|
| 247 |
"split": "train",
|
| 248 |
+
"scan_types": self.scan_types,
|
| 249 |
+
"resize_dims": self.resize_dims,
|
| 250 |
},
|
| 251 |
),
|
| 252 |
datasets.SplitGenerator(
|
|
|
|
| 254 |
gen_kwargs={
|
| 255 |
"paths_dict": paths_dict,
|
| 256 |
"split": "validate",
|
| 257 |
+
"scan_types": self.scan_types,
|
| 258 |
+
"resize_dims": self.resize_dims,
|
| 259 |
},
|
| 260 |
),
|
| 261 |
datasets.SplitGenerator(
|
|
|
|
| 263 |
gen_kwargs={
|
| 264 |
"paths_dict": paths_dict,
|
| 265 |
"split": "test",
|
| 266 |
+
"scan_types": self.scan_types,
|
| 267 |
+
"resize_dims": self.resize_dims,
|
| 268 |
},
|
| 269 |
),
|
| 270 |
]
|
|
|
|
| 272 |
def _generate_examples(
|
| 273 |
self,
|
| 274 |
paths_dict: Dict[str, str],
|
| 275 |
+
split: str,
|
| 276 |
+
scan_types: List[str],
|
| 277 |
+
resize_dims: Tuple[int, int, int],
|
| 278 |
validate_share: float = 0.3,
|
| 279 |
test_share: float = 0.2,
|
| 280 |
raw_image: bool = True,
|
|
|
|
| 413 |
if col not in ['Patient']
|
| 414 |
}
|
| 415 |
|
| 416 |
+
# Get list of image and mask data files
|
| 417 |
image_files = [
|
| 418 |
file for file in os.listdir(os.path.join(paths_dict['images'], 'images'))
|
| 419 |
if file.endswith('.mha')
|
|
|
|
| 426 |
]
|
| 427 |
assert len(mask_files) > 0, "No mask files found--check directory path."
|
| 428 |
|
| 429 |
+
# Filter image and mask data files based on scan types
|
| 430 |
image_files = [
|
| 431 |
file for file in image_files
|
| 432 |
if any(scan_type in file for scan_type in scan_types)
|