Upload indolem_sentiment.py with huggingface_hub
Browse files- indolem_sentiment.py +25 -25
indolem_sentiment.py
CHANGED
|
@@ -28,7 +28,7 @@ To create a dataset loading script you will create a class and implement 3 metho
|
|
| 28 |
|
| 29 |
TODO: Before submitting your script, delete this doc string and replace it with a description of your dataset.
|
| 30 |
|
| 31 |
-
[
|
| 32 |
"""
|
| 33 |
from base64 import encode
|
| 34 |
import json
|
|
@@ -37,10 +37,10 @@ from typing import Dict, List, Tuple
|
|
| 37 |
|
| 38 |
import datasets
|
| 39 |
|
| 40 |
-
from
|
| 41 |
-
from
|
| 42 |
-
from
|
| 43 |
-
from
|
| 44 |
|
| 45 |
# TODO: Add BibTeX citation
|
| 46 |
_CITATION = """\
|
|
@@ -67,7 +67,7 @@ _CITATION = """\
|
|
| 67 |
# E.g. Hallmarks of Cancer: [dataset_name] --> hallmarks_of_cancer
|
| 68 |
_DATASETNAME = "indolem_sentiment"
|
| 69 |
_SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
|
| 70 |
-
_UNIFIED_VIEW_NAME =
|
| 71 |
|
| 72 |
_LANGUAGES = ["ind"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
|
| 73 |
_LOCAL = False
|
|
@@ -101,7 +101,7 @@ _LICENSE = "Creative Commons Attribution Share-Alike 4.0 International"
|
|
| 101 |
# For local datasets, this variable can be an empty dictionary.
|
| 102 |
|
| 103 |
# For publicly available datasets you will most likely end up passing these URLs to dl_manager in _split_generators.
|
| 104 |
-
# In most cases the URLs will be the same for the source and
|
| 105 |
# However, if you need to access different files for each config you can have multiple entries in this dict.
|
| 106 |
# This can be an arbitrarily nested dict/list of URLs (see below in `_split_generators` method)
|
| 107 |
_URLS = {
|
|
@@ -120,7 +120,7 @@ _SUPPORTED_TASKS = [Tasks.SENTIMENT_ANALYSIS] # example: [Tasks.TRANSLATION, Ta
|
|
| 120 |
# provided by the original dataset as a version goes.
|
| 121 |
_SOURCE_VERSION = "1.0.0"
|
| 122 |
|
| 123 |
-
|
| 124 |
|
| 125 |
|
| 126 |
# TODO: Name the dataset class to match the script name using CamelCase instead of snake_case
|
|
@@ -128,40 +128,40 @@ class IndolemSentimentDataset(datasets.GeneratorBasedBuilder):
|
|
| 128 |
|
| 129 |
label_classes = ['negative','positive']
|
| 130 |
SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
|
| 131 |
-
|
| 132 |
|
| 133 |
-
# You will be able to load the "source" or "
|
| 134 |
# ds_source = datasets.load_dataset('my_dataset', name='source')
|
| 135 |
-
#
|
| 136 |
|
| 137 |
# For local datasets you can make use of the `data_dir` and `data_files` kwargs
|
| 138 |
# https://huggingface.co/docs/datasets/add_dataset.html#downloading-data-files-and-organizing-splits
|
| 139 |
# ds_source = datasets.load_dataset('my_dataset', name='source', data_dir="/path/to/data/files")
|
| 140 |
-
#
|
| 141 |
|
| 142 |
# TODO: For each dataset, implement Config for Source and Nusantara;
|
| 143 |
-
# If dataset contains more than one subset (see
|
| 144 |
# Each of them should contain:
|
| 145 |
-
# - name: should be unique for each dataset config eg. smsa_(source|
|
| 146 |
-
# - version: option = (SOURCE_VERSION|
|
| 147 |
# - description: one line description for the dataset
|
| 148 |
-
# - schema: options = (source|
|
| 149 |
# - subset_id: subset id is the canonical name for the dataset (eg. smsa)
|
| 150 |
-
# where [
|
| 151 |
|
| 152 |
BUILDER_CONFIGS = [
|
| 153 |
-
|
| 154 |
name="indolem_sentiment_source",
|
| 155 |
version=SOURCE_VERSION,
|
| 156 |
description="indolem_sentiment source schema",
|
| 157 |
schema="source",
|
| 158 |
subset_id="indolem_sentiment",
|
| 159 |
),
|
| 160 |
-
|
| 161 |
-
name="
|
| 162 |
-
version=
|
| 163 |
description="indolem_sentiment Nusantara schema",
|
| 164 |
-
schema="
|
| 165 |
subset_id="indolem_sentiment",
|
| 166 |
),
|
| 167 |
]
|
|
@@ -176,7 +176,7 @@ class IndolemSentimentDataset(datasets.GeneratorBasedBuilder):
|
|
| 176 |
|
| 177 |
if self.config.schema == "source":
|
| 178 |
features = datasets.Features({"sentence":datasets.Value("string"), "sentiment": datasets.Value("int32")})
|
| 179 |
-
elif self.config.schema == "
|
| 180 |
features = schemas.text_features(self.label_classes)
|
| 181 |
|
| 182 |
return datasets.DatasetInfo(
|
|
@@ -190,7 +190,7 @@ class IndolemSentimentDataset(datasets.GeneratorBasedBuilder):
|
|
| 190 |
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
| 191 |
"""Returns SplitGenerators."""
|
| 192 |
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
|
| 193 |
-
# If you need to access the "source" or "
|
| 194 |
# LOCAL DATASETS: You do not need the dl_manager; you can ignore this argument. Make sure `gen_kwargs` in the return gets passed the right filepath
|
| 195 |
# PUBLIC DATASETS: Assign your data-dir based on the dl_manager.
|
| 196 |
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs; many examples use the download_and_extract method; see the DownloadManager docs here: https://huggingface.co/docs/datasets/package_reference/builder_classes.html#datasets.DownloadManager
|
|
@@ -252,7 +252,7 @@ class IndolemSentimentDataset(datasets.GeneratorBasedBuilder):
|
|
| 252 |
sentiment = int(line[-1])
|
| 253 |
if self.config.schema == 'source':
|
| 254 |
ex = {'sentence': sentence, 'sentiment': sentiment}
|
| 255 |
-
elif self.config.schema == '
|
| 256 |
ex = {'id': str(id), 'text': str(sentence), 'label': self.label_classes[sentiment]}
|
| 257 |
else:
|
| 258 |
raise ValueError(f"Invalid config: {self.config.name}")
|
|
|
|
| 28 |
|
| 29 |
TODO: Before submitting your script, delete this doc string and replace it with a description of your dataset.
|
| 30 |
|
| 31 |
+
[seacrowd_schema_name] = (kb, pairs, qa, text, t2t, entailment)
|
| 32 |
"""
|
| 33 |
from base64 import encode
|
| 34 |
import json
|
|
|
|
| 37 |
|
| 38 |
import datasets
|
| 39 |
|
| 40 |
+
from seacrowd.utils import schemas
|
| 41 |
+
from seacrowd.utils.common_parser import load_conll_data
|
| 42 |
+
from seacrowd.utils.configs import SEACrowdConfig
|
| 43 |
+
from seacrowd.utils.constants import Tasks, DEFAULT_SOURCE_VIEW_NAME, DEFAULT_SEACROWD_VIEW_NAME
|
| 44 |
|
| 45 |
# TODO: Add BibTeX citation
|
| 46 |
_CITATION = """\
|
|
|
|
| 67 |
# E.g. Hallmarks of Cancer: [dataset_name] --> hallmarks_of_cancer
|
| 68 |
_DATASETNAME = "indolem_sentiment"
|
| 69 |
_SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
|
| 70 |
+
_UNIFIED_VIEW_NAME = DEFAULT_SEACROWD_VIEW_NAME
|
| 71 |
|
| 72 |
_LANGUAGES = ["ind"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
|
| 73 |
_LOCAL = False
|
|
|
|
| 101 |
# For local datasets, this variable can be an empty dictionary.
|
| 102 |
|
| 103 |
# For publicly available datasets you will most likely end up passing these URLs to dl_manager in _split_generators.
|
| 104 |
+
# In most cases the URLs will be the same for the source and seacrowd config.
|
| 105 |
# However, if you need to access different files for each config you can have multiple entries in this dict.
|
| 106 |
# This can be an arbitrarily nested dict/list of URLs (see below in `_split_generators` method)
|
| 107 |
_URLS = {
|
|
|
|
| 120 |
# provided by the original dataset as a version goes.
|
| 121 |
_SOURCE_VERSION = "1.0.0"
|
| 122 |
|
| 123 |
+
_SEACROWD_VERSION = "2024.06.20"
|
| 124 |
|
| 125 |
|
| 126 |
# TODO: Name the dataset class to match the script name using CamelCase instead of snake_case
|
|
|
|
| 128 |
|
| 129 |
label_classes = ['negative','positive']
|
| 130 |
SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
|
| 131 |
+
SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
|
| 132 |
|
| 133 |
+
# You will be able to load the "source" or "se" configurations with
|
| 134 |
# ds_source = datasets.load_dataset('my_dataset', name='source')
|
| 135 |
+
# ds_seacrowd = datasets.load_dataset('my_dataset', name='seacrowd')
|
| 136 |
|
| 137 |
# For local datasets you can make use of the `data_dir` and `data_files` kwargs
|
| 138 |
# https://huggingface.co/docs/datasets/add_dataset.html#downloading-data-files-and-organizing-splits
|
| 139 |
# ds_source = datasets.load_dataset('my_dataset', name='source', data_dir="/path/to/data/files")
|
| 140 |
+
# ds_seacrowd = datasets.load_dataset('my_dataset', name='seacrowd', data_dir="/path/to/data/files")
|
| 141 |
|
| 142 |
# TODO: For each dataset, implement Config for Source and Nusantara;
|
| 143 |
+
# If dataset contains more than one subset (see seacrowd/sea_datasets/smsa.py) implement for EACH of them.
|
| 144 |
# Each of them should contain:
|
| 145 |
+
# - name: should be unique for each dataset config eg. smsa_(source|seacrowd)_[seacrowd_schema_name]
|
| 146 |
+
# - version: option = (SOURCE_VERSION|SEACROWD_VERSION)
|
| 147 |
# - description: one line description for the dataset
|
| 148 |
+
# - schema: options = (source|seacrowd_[seacrowd_schema_name])
|
| 149 |
# - subset_id: subset id is the canonical name for the dataset (eg. smsa)
|
| 150 |
+
# where [seacrowd_schema_name] = (kb, pairs, qa, text, t2t)
|
| 151 |
|
| 152 |
BUILDER_CONFIGS = [
|
| 153 |
+
SEACrowdConfig(
|
| 154 |
name="indolem_sentiment_source",
|
| 155 |
version=SOURCE_VERSION,
|
| 156 |
description="indolem_sentiment source schema",
|
| 157 |
schema="source",
|
| 158 |
subset_id="indolem_sentiment",
|
| 159 |
),
|
| 160 |
+
SEACrowdConfig(
|
| 161 |
+
name="indolem_sentiment_seacrowd_text",
|
| 162 |
+
version=SEACROWD_VERSION,
|
| 163 |
description="indolem_sentiment Nusantara schema",
|
| 164 |
+
schema="seacrowd_text",
|
| 165 |
subset_id="indolem_sentiment",
|
| 166 |
),
|
| 167 |
]
|
|
|
|
| 176 |
|
| 177 |
if self.config.schema == "source":
|
| 178 |
features = datasets.Features({"sentence":datasets.Value("string"), "sentiment": datasets.Value("int32")})
|
| 179 |
+
elif self.config.schema == "seacrowd_text":
|
| 180 |
features = schemas.text_features(self.label_classes)
|
| 181 |
|
| 182 |
return datasets.DatasetInfo(
|
|
|
|
| 190 |
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
| 191 |
"""Returns SplitGenerators."""
|
| 192 |
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
|
| 193 |
+
# If you need to access the "source" or "seacrowd" config choice, that will be in self.config.name
|
| 194 |
# LOCAL DATASETS: You do not need the dl_manager; you can ignore this argument. Make sure `gen_kwargs` in the return gets passed the right filepath
|
| 195 |
# PUBLIC DATASETS: Assign your data-dir based on the dl_manager.
|
| 196 |
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs; many examples use the download_and_extract method; see the DownloadManager docs here: https://huggingface.co/docs/datasets/package_reference/builder_classes.html#datasets.DownloadManager
|
|
|
|
| 252 |
sentiment = int(line[-1])
|
| 253 |
if self.config.schema == 'source':
|
| 254 |
ex = {'sentence': sentence, 'sentiment': sentiment}
|
| 255 |
+
elif self.config.schema == 'seacrowd_text':
|
| 256 |
ex = {'id': str(id), 'text': str(sentence), 'label': self.label_classes[sentiment]}
|
| 257 |
else:
|
| 258 |
raise ValueError(f"Invalid config: {self.config.name}")
|