| # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. | |
| # | |
| # Licensed under the Apache License, Version 2.0 (the "License"); | |
| # you may not use this file except in compliance with the License. | |
| # You may obtain a copy of the License at | |
| # | |
| # http://www.apache.org/licenses/LICENSE-2.0 | |
| # | |
| # Unless required by applicable law or agreed to in writing, software | |
| # distributed under the License is distributed on an "AS IS" BASIS, | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| # See the License for the specific language governing permissions and | |
| # limitations under the License. | |
| # TODO: Address all TODOs and remove all explanatory comments | |
| """TODO: Add a description here.""" | |
| import csv | |
| import json | |
| import os | |
| from typing import Dict, List, Optional, Set | |
| import numpy as np | |
| import datasets | |
| # TODO: Add BibTeX citation | |
| # Find for instance the citation on arxiv or on the dataset repo/website | |
| _CITATION = """\ | |
| @InProceedings{huggingface:dataset, | |
| title = {A great new dataset}, | |
| author={huggingface, Inc. | |
| }, | |
| year={2020} | |
| } | |
| """ | |
| # TODO: Add description of the dataset here | |
| # You can copy an official description | |
| _DESCRIPTION = """\ | |
| This new dataset is designed to solve this great NLP task and is crafted with a lot of care. | |
| """ | |
| _HOMEPAGE = "https://zenodo.org/records/10159290" | |
| _LICENSE = """Creative Commons Attribution 4.0 International License \ | |
| (https://creativecommons.org/licenses/by/4.0/legalcode)""" | |
| # TODO: Add link to the official dataset URLs here | |
| # The HuggingFace Datasets library doesn't host the datasets but only points to the original files. | |
| # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method) | |
| _URLS = { | |
| "first_domain": { | |
| "images":"https://zenodo.org/records/10159290/files/images.zip", | |
| "masks":"https://zenodo.org/records/10159290/files/masks.zip", | |
| "overview":"https://zenodo.org/records/10159290/files/overview.csv", | |
| "gradings":"https://zenodo.org/records/10159290/files/radiological_gradings.csv", | |
| } | |
| } | |
| class SPIDER(datasets.GeneratorBasedBuilder): | |
| """TODO: Short description of my dataset.""" | |
| VERSION = datasets.Version("1.1.0") | |
| # This is an example of a dataset with multiple configurations. | |
| # If you don't want/need to define several sub-sets in your dataset, | |
| # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes. | |
| # If you need to make complex sub-parts in the datasets with configurable options | |
| # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig | |
| # BUILDER_CONFIG_CLASS = MyBuilderConfig | |
| # You will be able to load one or the other configurations in the following list with | |
| # data = datasets.load_dataset('my_dataset', 'first_domain') | |
| # data = datasets.load_dataset('my_dataset', 'second_domain') | |
| BUILDER_CONFIGS = [ | |
| datasets.BuilderConfig(name="first_domain", version=VERSION, description="This part of my dataset covers a first domain"), | |
| datasets.BuilderConfig(name="second_domain", version=VERSION, description="This part of my dataset covers a second domain"), | |
| ] | |
| DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense. | |
| def _info(self): | |
| # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset | |
| if self.config.name == "first_domain": # This is the name of the configuration selected in BUILDER_CONFIGS above | |
| features = datasets.Features( | |
| { | |
| "sentence": datasets.Value("string"), | |
| "option1": datasets.Value("string"), | |
| "answer": datasets.Value("string") | |
| # These are the features of your dataset like images, labels ... | |
| } | |
| ) | |
| else: # This is an example to show how to have different features for "first_domain" and "second_domain" | |
| features = datasets.Features( | |
| { | |
| "sentence": datasets.Value("string"), | |
| "option2": datasets.Value("string"), | |
| "second_domain_answer": datasets.Value("string") | |
| # These are the features of your dataset like images, labels ... | |
| } | |
| ) | |
| return datasets.DatasetInfo( | |
| # This is the description that will appear on the datasets page. | |
| description=_DESCRIPTION, | |
| # This defines the different columns of the dataset and their types | |
| features=features, # Here we define them above because they are different between the two configurations | |
| # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and | |
| # specify them. They'll be used if as_supervised=True in builder.as_dataset. | |
| # supervised_keys=("sentence", "label"), | |
| # Homepage of the dataset for documentation | |
| homepage=_HOMEPAGE, | |
| # License for the dataset if available | |
| license=_LICENSE, | |
| # Citation for the dataset | |
| citation=_CITATION, | |
| ) | |
| def _split_generators(self, dl_manager): | |
| # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration | |
| # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name | |
| # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS | |
| # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files. | |
| # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive | |
| urls = _URLS[self.config.name] | |
| paths_dict = dl_manager.download_and_extract(urls) | |
| return [ | |
| datasets.SplitGenerator( | |
| name=datasets.Split.TRAIN, | |
| # These kwargs will be passed to _generate_examples | |
| gen_kwargs={ | |
| "paths_dict": paths_dict, | |
| "split": "train", | |
| }, | |
| ), | |
| datasets.SplitGenerator( | |
| name=datasets.Split.VALIDATION, | |
| # These kwargs will be passed to _generate_examples | |
| gen_kwargs={ | |
| "paths_dict": paths_dict, | |
| "split": "dev", | |
| }, | |
| ), | |
| datasets.SplitGenerator( | |
| name=datasets.Split.TEST, | |
| # These kwargs will be passed to _generate_examples | |
| gen_kwargs={ | |
| "paths_dict": paths_dict, | |
| "split": "test" | |
| }, | |
| ), | |
| ] | |
| # method parameters are unpacked from `gen_kwargs` as given in `_split_generators` | |
| def _generate_examples(self, paths_dict, split): | |
| # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset. | |
| # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example. | |
| # Generate train/validate/test partitions of patient IDs | |
| np.random.seed(9999) | |
| N_PATIENTS = 257 #TODO: make hardcoded values dynamic | |
| VALIDATE_SHARE = 0.3 | |
| TEST_SHARE = 0.2 | |
| TRAIN_SHARE = (1.0 - VALIDATE_SHARE - TEST_SHARE) | |
| partition = np.random.choice( | |
| ['train', 'dev', 'test'], | |
| p=[TRAIN_SHARE, VALIDATE_SHARE, TEST_SHARE], | |
| size=N_PATIENTS, | |
| ) | |
| patient_ids = (np.arange(N_PATIENTS) + 1) | |
| train_ids = set(patient_ids[partition == 'train']) | |
| validate_ids = set(patient_ids[partition == 'dev']) | |
| test_ids = set(patient_ids[partition == 'test']) | |
| assert len(train_ids.union(validate_ids, test_ids)) == N_PATIENTS | |
| # Import patient/scanner data and radiological gradings data | |
| overview_data = import_csv_data(paths_dict['overview']) | |
| grades_data = import_csv_data(paths_dict['gradings']) | |
| # Import image and mask data | |
| image_files = [ | |
| file for file in os.listdir(os.path.join(paths_dict['images'], 'images')) | |
| if file.endswith('.mha') | |
| ] | |
| assert len(image_files) > 0, "No image files found--check directory path." | |
| mask_files = [ | |
| file for file in os.listdir(os.path.join(paths_dict['masks'], 'masks')) | |
| if file.endswith('.mha') | |
| ] | |
| assert len(mask_files) > 0, "No mask files found--check directory path." | |
| images = [] | |
| masks = [] | |
| if split == 'train': | |
| for patient_id in train_ids: | |
| elif split == 'validate': | |
| elif split == 'test': | |
| def import_csv_data(filepath: str) -> List[Dict[str, str]]: | |
| """Import all rows of CSV file.""" | |
| results = [] | |
| with open(filepath, encoding='utf-8') as f: | |
| reader = csv.DictReader(f) | |
| for line in reader: | |
| results.append(line) | |
| return results | |
| with open(filepath, encoding="utf-8") as f: | |
| for key, row in enumerate(f): | |
| data = json.loads(row) | |
| if self.config.name == "first_domain": | |
| # Yields examples as (key, example) tuples | |
| yield key, { | |
| "sentence": data["sentence"], | |
| "option1": data["option1"], | |
| "answer": "" if split == "test" else data["answer"], | |
| } | |
| else: | |
| yield key, { | |
| "sentence": data["sentence"], | |
| "option2": data["option2"], | |
| "second_domain_answer": "" if split == "test" else data["second_domain_answer"], | |
| } |