Chris Oswald
commited on
Commit
·
76efad4
1
Parent(s):
c5eade3
refactored to enable sharding
Browse files
SPIDER.py
CHANGED
|
@@ -16,7 +16,7 @@
|
|
| 16 |
# Import packages
|
| 17 |
import csv
|
| 18 |
import os
|
| 19 |
-
from typing import Dict, List, Mapping, Optional, Sequence, Tuple, Union
|
| 20 |
|
| 21 |
import numpy as np
|
| 22 |
import pandas as pd
|
|
@@ -35,6 +35,13 @@ def import_csv_data(filepath: str) -> List[Dict[str, str]]:
|
|
| 35 |
results.append(line)
|
| 36 |
return results
|
| 37 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
def standardize_3D_image(
|
| 39 |
image: np.ndarray,
|
| 40 |
resize_shape: Tuple[int, int, int]
|
|
@@ -130,10 +137,21 @@ class CustomBuilderConfig(datasets.BuilderConfig):
|
|
| 130 |
description: Optional[str] = None,
|
| 131 |
scan_types: List[str] = DEFAULT_SCAN_TYPES,
|
| 132 |
resize_shape: Tuple[int, int, int] = DEFAULT_RESIZE,
|
|
|
|
| 133 |
):
|
| 134 |
super().__init__(name, version, data_dir, data_files, description)
|
| 135 |
-
self.scan_types = scan_types
|
| 136 |
self.resize_shape = resize_shape
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 137 |
|
| 138 |
|
| 139 |
class SPIDER(datasets.GeneratorBasedBuilder):
|
|
@@ -143,30 +161,44 @@ class SPIDER(datasets.GeneratorBasedBuilder):
|
|
| 143 |
DEFAULT_WRITER_BATCH_SIZE = 16 # PyArrow default is too large for image data
|
| 144 |
VERSION = datasets.Version("1.1.0")
|
| 145 |
BUILDER_CONFIG_CLASS = CustomBuilderConfig
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 157 |
|
| 158 |
def _info(self):
|
| 159 |
"""Specify datasets.DatasetInfo object containing information and typing
|
| 160 |
for the dataset."""
|
| 161 |
|
| 162 |
-
image_size = self.config.resize_shape
|
| 163 |
features = datasets.Features({
|
| 164 |
"patient_id": datasets.Value("string"),
|
| 165 |
"scan_type": datasets.Value("string"),
|
|
|
|
|
|
|
|
|
|
|
|
|
| 166 |
"image_path": datasets.Value("string"),
|
| 167 |
"mask_path": datasets.Value("string"),
|
| 168 |
-
"image_array": datasets.Array3D(shape=image_size, dtype='uint8'),
|
| 169 |
-
"mask_array": datasets.Array3D(shape=image_size, dtype='uint8'),
|
| 170 |
"metadata": {
|
| 171 |
"num_vertebrae": datasets.Value(dtype="string"), #TODO: more specific types
|
| 172 |
"num_discs": datasets.Value(dtype="string"),
|
|
@@ -228,87 +260,31 @@ class SPIDER(datasets.GeneratorBasedBuilder):
|
|
| 228 |
citation=_CITATION,
|
| 229 |
)
|
| 230 |
|
| 231 |
-
def _split_generators(
|
| 232 |
-
"""Download and extract data and define splits based on configuration."""
|
| 233 |
-
|
| 234 |
-
paths_dict = dl_manager.download_and_extract(_URLS)
|
| 235 |
-
return [
|
| 236 |
-
datasets.SplitGenerator(
|
| 237 |
-
name=datasets.Split.TRAIN,
|
| 238 |
-
gen_kwargs={
|
| 239 |
-
"paths_dict": paths_dict,
|
| 240 |
-
"split": "train",
|
| 241 |
-
"scan_types": self.scan_types,
|
| 242 |
-
"resize_shape": self.resize_shape,
|
| 243 |
-
},
|
| 244 |
-
),
|
| 245 |
-
datasets.SplitGenerator(
|
| 246 |
-
name=datasets.Split.VALIDATION,
|
| 247 |
-
gen_kwargs={
|
| 248 |
-
"paths_dict": paths_dict,
|
| 249 |
-
"split": "validate",
|
| 250 |
-
"scan_types": self.scan_types,
|
| 251 |
-
"resize_shape": self.resize_shape,
|
| 252 |
-
},
|
| 253 |
-
),
|
| 254 |
-
datasets.SplitGenerator(
|
| 255 |
-
name=datasets.Split.TEST,
|
| 256 |
-
gen_kwargs={
|
| 257 |
-
"paths_dict": paths_dict,
|
| 258 |
-
"split": "test",
|
| 259 |
-
"scan_types": self.scan_types,
|
| 260 |
-
"resize_shape": self.resize_shape,
|
| 261 |
-
},
|
| 262 |
-
),
|
| 263 |
-
]
|
| 264 |
-
|
| 265 |
-
def _generate_examples(
|
| 266 |
self,
|
| 267 |
-
|
| 268 |
-
split: str,
|
| 269 |
-
scan_types: List[str],
|
| 270 |
-
resize_shape: Tuple[int, int, int],
|
| 271 |
validate_share: float = 0.2,
|
| 272 |
test_share: float = 0.2,
|
| 273 |
random_seed: int = 9999,
|
| 274 |
-
)
|
| 275 |
"""
|
| 276 |
-
|
| 277 |
-
(key, example) tuples from the dataset. The `key` is for legacy reasons
|
| 278 |
-
(tfds) and is not important in itself, but must be unique for each example.
|
| 279 |
|
| 280 |
Args
|
| 281 |
-
|
| 282 |
-
split: specify training, validation, or testing set;
|
| 283 |
-
options = 'train', 'validate', OR 'test'
|
| 284 |
-
scan_types: list of sagittal scan types to use in examples;
|
| 285 |
-
options = ['t1', 't2', 't2_SPACE']
|
| 286 |
validate_share: float indicating share of data to use for validation;
|
| 287 |
must be in range (0.0, 1.0); note that training share is
|
| 288 |
calculated as (1 - validate_share - test_share)
|
| 289 |
test_share: float indicating share of data to use for testing;
|
| 290 |
must be in range (0.0, 1.0); note that training share is
|
| 291 |
calculated as (1 - validate_share - test_share)
|
| 292 |
-
|
| 293 |
-
Yields
|
| 294 |
-
Tuple (unique patient-scan ID, dict of
|
| 295 |
"""
|
| 296 |
# Set constants
|
| 297 |
train_share = (1.0 - validate_share - test_share)
|
| 298 |
np.random.seed(int(random_seed))
|
| 299 |
|
| 300 |
# Validate params
|
| 301 |
-
for item in scan_types:
|
| 302 |
-
if item not in ['t1', 't2', 't2_SPACE']:
|
| 303 |
-
raise ValueError(
|
| 304 |
-
'Scan type "{item}" not recognized as valid scan type.\
|
| 305 |
-
Verify scan type argument.'
|
| 306 |
-
)
|
| 307 |
-
if split not in ['train', 'validate', 'test']:
|
| 308 |
-
raise ValueError(
|
| 309 |
-
f'Split argument "{split}" is not recognized. \
|
| 310 |
-
Please enter one of ["train", "validate", "test"]'
|
| 311 |
-
)
|
| 312 |
if train_share <= 0.0:
|
| 313 |
raise ValueError(
|
| 314 |
f'Training share is calculated as (1 - validate_share - test_share) \
|
|
@@ -327,6 +303,33 @@ class SPIDER(datasets.GeneratorBasedBuilder):
|
|
| 327 |
{test_share}.'
|
| 328 |
)
|
| 329 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 330 |
# Generate train/validate/test partitions of patient IDs
|
| 331 |
partition = np.random.choice(
|
| 332 |
['train', 'dev', 'test'],
|
|
@@ -339,6 +342,19 @@ class SPIDER(datasets.GeneratorBasedBuilder):
|
|
| 339 |
test_ids = set(patient_ids[partition == 'test'])
|
| 340 |
assert len(train_ids.union(validate_ids, test_ids)) == N_PATIENTS
|
| 341 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 342 |
# Import patient/scanner data and radiological gradings data
|
| 343 |
overview_data = import_csv_data(paths_dict['overview'])
|
| 344 |
grades_data = import_csv_data(paths_dict['gradings'])
|
|
@@ -389,54 +405,74 @@ class SPIDER(datasets.GeneratorBasedBuilder):
|
|
| 389 |
if col not in ['Patient']
|
| 390 |
}
|
| 391 |
|
| 392 |
-
#
|
| 393 |
-
|
| 394 |
-
|
| 395 |
-
|
| 396 |
-
|
| 397 |
-
|
| 398 |
-
|
| 399 |
-
|
| 400 |
-
file for file in os.listdir(os.path.join(paths_dict['masks'], 'masks'))
|
| 401 |
-
if file.endswith('.mha')
|
| 402 |
-
]
|
| 403 |
-
assert len(mask_files) > 0, "No mask files found--check directory path."
|
| 404 |
-
|
| 405 |
-
# Filter image and mask data files based on scan types
|
| 406 |
-
image_files = [
|
| 407 |
-
file for file in image_files
|
| 408 |
-
if any(scan_type in file for scan_type in scan_types)
|
| 409 |
-
]
|
| 410 |
|
| 411 |
-
|
| 412 |
-
|
| 413 |
-
|
| 414 |
-
|
| 415 |
-
|
| 416 |
-
|
| 417 |
-
|
| 418 |
-
|
| 419 |
-
|
| 420 |
-
|
| 421 |
-
|
| 422 |
-
|
| 423 |
-
|
| 424 |
-
|
| 425 |
-
|
| 426 |
-
|
| 427 |
-
|
| 428 |
-
|
| 429 |
-
|
| 430 |
-
|
| 431 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 432 |
]
|
| 433 |
-
|
| 434 |
-
|
| 435 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 436 |
# Shuffle order of patient scans
|
| 437 |
# (note that only images need to be shuffled since masks and metadata
|
| 438 |
# will be linked to the selected image)
|
| 439 |
-
|
|
|
|
| 440 |
|
| 441 |
## Generate next example
|
| 442 |
# ----------------------
|
|
@@ -483,10 +519,10 @@ class SPIDER(datasets.GeneratorBasedBuilder):
|
|
| 483 |
return_dict = {
|
| 484 |
'patient_id':patient_id,
|
| 485 |
'scan_type':scan_type,
|
|
|
|
|
|
|
| 486 |
'image_path':image_path,
|
| 487 |
'mask_path':mask_path,
|
| 488 |
-
'image_array':image_array_standardized,
|
| 489 |
-
'mask_array':mask_array_standardized,
|
| 490 |
'metadata':image_overview,
|
| 491 |
'rad_gradings':patient_grades_dict,
|
| 492 |
}
|
|
|
|
| 16 |
# Import packages
|
| 17 |
import csv
|
| 18 |
import os
|
| 19 |
+
from typing import Dict, List, Mapping, Optional, Sequence, Set, Tuple, Union
|
| 20 |
|
| 21 |
import numpy as np
|
| 22 |
import pandas as pd
|
|
|
|
| 35 |
results.append(line)
|
| 36 |
return results
|
| 37 |
|
| 38 |
+
def subset_file_list(all_files: List[str], subset_ids: Set[int]):
|
| 39 |
+
"""Subset files pertaining to individuals in person_ids arg."""
|
| 40 |
+
return [
|
| 41 |
+
file for file in all_files
|
| 42 |
+
if any(str(id_val) == file.split('_')[0] for id_val in subset_ids)
|
| 43 |
+
]
|
| 44 |
+
|
| 45 |
def standardize_3D_image(
|
| 46 |
image: np.ndarray,
|
| 47 |
resize_shape: Tuple[int, int, int]
|
|
|
|
| 137 |
description: Optional[str] = None,
|
| 138 |
scan_types: List[str] = DEFAULT_SCAN_TYPES,
|
| 139 |
resize_shape: Tuple[int, int, int] = DEFAULT_RESIZE,
|
| 140 |
+
shuffle: bool = True,
|
| 141 |
):
|
| 142 |
super().__init__(name, version, data_dir, data_files, description)
|
| 143 |
+
self.scan_types = self.validate_scan_types(scan_types)
|
| 144 |
self.resize_shape = resize_shape
|
| 145 |
+
self.shuffle = shuffle
|
| 146 |
+
|
| 147 |
+
def validate_scan_types(self, scan_types):
|
| 148 |
+
for item in scan_types:
|
| 149 |
+
if item not in ['t1', 't2', 't2_SPACE']:
|
| 150 |
+
raise ValueError(
|
| 151 |
+
'Scan type "{item}" not recognized as valid scan type.\
|
| 152 |
+
Verify scan type argument.'
|
| 153 |
+
)
|
| 154 |
+
return scan_types
|
| 155 |
|
| 156 |
|
| 157 |
class SPIDER(datasets.GeneratorBasedBuilder):
|
|
|
|
| 161 |
DEFAULT_WRITER_BATCH_SIZE = 16 # PyArrow default is too large for image data
|
| 162 |
VERSION = datasets.Version("1.1.0")
|
| 163 |
BUILDER_CONFIG_CLASS = CustomBuilderConfig
|
| 164 |
+
BUILDER_CONFIGS = [
|
| 165 |
+
CustomBuilderConfig(
|
| 166 |
+
name="default",
|
| 167 |
+
description="Load the full dataset",
|
| 168 |
+
),
|
| 169 |
+
CustomBuilderConfig(
|
| 170 |
+
name="demo",
|
| 171 |
+
description="Generate 10 examples for demonstration",
|
| 172 |
+
)
|
| 173 |
+
]
|
| 174 |
+
DEFAULT_CONFIG_NAME = "default"
|
| 175 |
+
|
| 176 |
+
# def __init__(
|
| 177 |
+
# self,
|
| 178 |
+
# *args,
|
| 179 |
+
# scan_types: List[str] = DEFAULT_SCAN_TYPES,
|
| 180 |
+
# resize_shape: Tuple[int, int, int] = DEFAULT_RESIZE,
|
| 181 |
+
# shuffle: bool = True,
|
| 182 |
+
# **kwargs,
|
| 183 |
+
# ):
|
| 184 |
+
# super().__init__(*args, **kwargs)
|
| 185 |
+
# self.scan_types = self.config.scan_types
|
| 186 |
+
# self.resize_shape = self.config.resize_shape
|
| 187 |
+
# self.shuffle = self.config.shuffle
|
| 188 |
|
| 189 |
def _info(self):
|
| 190 |
"""Specify datasets.DatasetInfo object containing information and typing
|
| 191 |
for the dataset."""
|
| 192 |
|
|
|
|
| 193 |
features = datasets.Features({
|
| 194 |
"patient_id": datasets.Value("string"),
|
| 195 |
"scan_type": datasets.Value("string"),
|
| 196 |
+
"image": datasets.Image(),
|
| 197 |
+
"mask": datasets.Image(),
|
| 198 |
+
# "image": datasets.Array3D(shape=self.config.resize_shape, dtype='uint8'),
|
| 199 |
+
# "mask": datasets.Array3D(shape=self.config.resize_shape, dtype='uint8'),
|
| 200 |
"image_path": datasets.Value("string"),
|
| 201 |
"mask_path": datasets.Value("string"),
|
|
|
|
|
|
|
| 202 |
"metadata": {
|
| 203 |
"num_vertebrae": datasets.Value(dtype="string"), #TODO: more specific types
|
| 204 |
"num_discs": datasets.Value(dtype="string"),
|
|
|
|
| 260 |
citation=_CITATION,
|
| 261 |
)
|
| 262 |
|
| 263 |
+
def _split_generators(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 264 |
self,
|
| 265 |
+
dl_manager,
|
|
|
|
|
|
|
|
|
|
| 266 |
validate_share: float = 0.2,
|
| 267 |
test_share: float = 0.2,
|
| 268 |
random_seed: int = 9999,
|
| 269 |
+
):
|
| 270 |
"""
|
| 271 |
+
Download and extract data and define splits based on configuration.
|
|
|
|
|
|
|
| 272 |
|
| 273 |
Args
|
| 274 |
+
dl_manager: HuggingFace datasets download manager (automatically supplied)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 275 |
validate_share: float indicating share of data to use for validation;
|
| 276 |
must be in range (0.0, 1.0); note that training share is
|
| 277 |
calculated as (1 - validate_share - test_share)
|
| 278 |
test_share: float indicating share of data to use for testing;
|
| 279 |
must be in range (0.0, 1.0); note that training share is
|
| 280 |
calculated as (1 - validate_share - test_share)
|
| 281 |
+
random_seed: seed for random draws of train/validate/test patient ids
|
|
|
|
|
|
|
| 282 |
"""
|
| 283 |
# Set constants
|
| 284 |
train_share = (1.0 - validate_share - test_share)
|
| 285 |
np.random.seed(int(random_seed))
|
| 286 |
|
| 287 |
# Validate params
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 288 |
if train_share <= 0.0:
|
| 289 |
raise ValueError(
|
| 290 |
f'Training share is calculated as (1 - validate_share - test_share) \
|
|
|
|
| 303 |
{test_share}.'
|
| 304 |
)
|
| 305 |
|
| 306 |
+
# Download images (returns dictionary to local cache)
|
| 307 |
+
paths_dict = dl_manager.download_and_extract(_URLS)
|
| 308 |
+
|
| 309 |
+
# Get list of image and mask data files
|
| 310 |
+
image_files = [
|
| 311 |
+
file for file in os.listdir(os.path.join(paths_dict['images'], 'images'))
|
| 312 |
+
if file.endswith('.mha')
|
| 313 |
+
]
|
| 314 |
+
assert len(image_files) > 0, "No image files found--check directory path."
|
| 315 |
+
|
| 316 |
+
mask_files = [
|
| 317 |
+
file for file in os.listdir(os.path.join(paths_dict['masks'], 'masks'))
|
| 318 |
+
if file.endswith('.mha')
|
| 319 |
+
]
|
| 320 |
+
assert len(mask_files) > 0, "No mask files found--check directory path."
|
| 321 |
+
|
| 322 |
+
# Filter image and mask data files based on scan types
|
| 323 |
+
image_files = [
|
| 324 |
+
file for file in image_files
|
| 325 |
+
if any(scan_type in file for scan_type in self.config.scan_types)
|
| 326 |
+
]
|
| 327 |
+
|
| 328 |
+
mask_files = [
|
| 329 |
+
file for file in mask_files
|
| 330 |
+
if any(scan_type in file for scan_type in self.config.scan_types)
|
| 331 |
+
]
|
| 332 |
+
|
| 333 |
# Generate train/validate/test partitions of patient IDs
|
| 334 |
partition = np.random.choice(
|
| 335 |
['train', 'dev', 'test'],
|
|
|
|
| 342 |
test_ids = set(patient_ids[partition == 'test'])
|
| 343 |
assert len(train_ids.union(validate_ids, test_ids)) == N_PATIENTS
|
| 344 |
|
| 345 |
+
# Subset train/validation/test partition images and mask files
|
| 346 |
+
train_image_files = subset_file_list(image_files, train_ids)
|
| 347 |
+
validate_image_files = subset_file_list(image_files, validate_ids)
|
| 348 |
+
test_image_files = subset_file_list(image_files, test_ids)
|
| 349 |
+
|
| 350 |
+
train_mask_files = subset_file_list(mask_files, train_ids)
|
| 351 |
+
validate_mask_files = subset_file_list(mask_files, validate_ids)
|
| 352 |
+
test_mask_files = subset_file_list(mask_files, test_ids)
|
| 353 |
+
|
| 354 |
+
assert len(train_image_files) == len(train_mask_files)
|
| 355 |
+
assert len(validate_image_files) == len(validate_mask_files)
|
| 356 |
+
assert len(test_image_files) == len(test_mask_files)
|
| 357 |
+
|
| 358 |
# Import patient/scanner data and radiological gradings data
|
| 359 |
overview_data = import_csv_data(paths_dict['overview'])
|
| 360 |
grades_data = import_csv_data(paths_dict['gradings'])
|
|
|
|
| 405 |
if col not in ['Patient']
|
| 406 |
}
|
| 407 |
|
| 408 |
+
# DEMO configuration: subset first 10 examples
|
| 409 |
+
if self.config.name == "demo":
|
| 410 |
+
train_image_files = train_image_files[:10]
|
| 411 |
+
train_mask_files = train_mask_files[:10]
|
| 412 |
+
validate_image_files = validate_image_files[:10]
|
| 413 |
+
validate_mask_files = validate_mask_files[:10]
|
| 414 |
+
test_image_files = test_image_files[:10]
|
| 415 |
+
test_mask_files = test_mask_files[:10]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 416 |
|
| 417 |
+
return [
|
| 418 |
+
datasets.SplitGenerator(
|
| 419 |
+
name=datasets.Split.TRAIN,
|
| 420 |
+
gen_kwargs={
|
| 421 |
+
"paths_dict": paths_dict,
|
| 422 |
+
"image_files": train_image_files,
|
| 423 |
+
"mask_files": train_mask_files,
|
| 424 |
+
"overview_dict": overview_dict,
|
| 425 |
+
"grades_dict": grades_dict,
|
| 426 |
+
"resize_shape": self.config.resize_shape,
|
| 427 |
+
"shuffle": self.config.shuffle,
|
| 428 |
+
},
|
| 429 |
+
),
|
| 430 |
+
datasets.SplitGenerator(
|
| 431 |
+
name=datasets.Split.VALIDATION,
|
| 432 |
+
gen_kwargs={
|
| 433 |
+
"paths_dict": paths_dict,
|
| 434 |
+
"image_files": validate_image_files,
|
| 435 |
+
"mask_files": validate_mask_files,
|
| 436 |
+
"overview_dict": overview_dict,
|
| 437 |
+
"grades_dict": grades_dict,
|
| 438 |
+
"resize_shape": self.config.resize_shape,
|
| 439 |
+
"shuffle": self.config.shuffle,
|
| 440 |
+
},
|
| 441 |
+
),
|
| 442 |
+
datasets.SplitGenerator(
|
| 443 |
+
name=datasets.Split.TEST,
|
| 444 |
+
gen_kwargs={
|
| 445 |
+
"paths_dict": paths_dict,
|
| 446 |
+
"image_files": test_image_files,
|
| 447 |
+
"mask_files": test_mask_files,
|
| 448 |
+
"overview_dict": overview_dict,
|
| 449 |
+
"grades_dict": grades_dict,
|
| 450 |
+
"resize_shape": self.config.resize_shape,
|
| 451 |
+
"shuffle": self.config.shuffle,
|
| 452 |
+
},
|
| 453 |
+
),
|
| 454 |
]
|
| 455 |
+
|
| 456 |
+
def _generate_examples(
|
| 457 |
+
self,
|
| 458 |
+
paths_dict: Dict[str, str],
|
| 459 |
+
image_files: List[str],
|
| 460 |
+
mask_files: List[str],
|
| 461 |
+
overview_dict: Dict,
|
| 462 |
+
grades_dict: Dict,
|
| 463 |
+
resize_shape: Tuple[int, int, int],
|
| 464 |
+
shuffle: bool,
|
| 465 |
+
) -> Tuple[str, Dict]:
|
| 466 |
+
"""
|
| 467 |
+
This method handles input defined in _split_generators to yield
|
| 468 |
+
(key, example) tuples from the dataset. The `key` is for legacy reasons
|
| 469 |
+
(tfds) and is not important in itself, but must be unique for each example.
|
| 470 |
+
"""
|
| 471 |
# Shuffle order of patient scans
|
| 472 |
# (note that only images need to be shuffled since masks and metadata
|
| 473 |
# will be linked to the selected image)
|
| 474 |
+
if shuffle:
|
| 475 |
+
np.random.shuffle(image_files)
|
| 476 |
|
| 477 |
## Generate next example
|
| 478 |
# ----------------------
|
|
|
|
| 519 |
return_dict = {
|
| 520 |
'patient_id':patient_id,
|
| 521 |
'scan_type':scan_type,
|
| 522 |
+
'image':image_array_standardized,
|
| 523 |
+
'mask':mask_array_standardized,
|
| 524 |
'image_path':image_path,
|
| 525 |
'mask_path':mask_path,
|
|
|
|
|
|
|
| 526 |
'metadata':image_overview,
|
| 527 |
'rad_gradings':patient_grades_dict,
|
| 528 |
}
|