Mayfull commited on
Commit
7288f9f
·
verified ·
1 Parent(s): 8f155de

Update valse_vlms.py

Browse files
Files changed (1) hide show
  1. valse_vlms.py +97 -96
valse_vlms.py CHANGED
@@ -1,96 +1,97 @@
1
- import os
2
- from datasets import DatasetInfo, GeneratorBasedBuilder, SplitGenerator, Version, Features, Value, Sequence, Image, Split
3
-
4
- _CITATION = """\
5
- @inproceedings{parcalabescu-etal-2022-valse,
6
- title = "{VALSE}: A Task-Independent Benchmark for Vision and Language Models Centered on Linguistic Phenomena",
7
- author = "Parcalabescu, Letitia and
8
- Cafagna, Michele and
9
- Muradjan, Lilitta and
10
- Frank, Anette and
11
- Calixto, Iacer and
12
- Gatt, Albert",
13
- booktitle = "Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
14
- month = may,
15
- year = "2022",
16
- address = "Dublin, Ireland",
17
- publisher = "Association for Computational Linguistics",
18
- url = "https://aclanthology.org/2022.acl-long.567",
19
- pages = "8253--8280",
20
- abstract = "We propose VALSE (Vision And Language Structured Evaluation), a novel benchmark designed for testing general-purpose pretrained vision and language (V{\&}L) models for their visio-linguistic grounding capabilities on specific linguistic phenomena. VALSE offers a suite of six tests covering various linguistic constructs. Solving these requires models to ground linguistic phenomena in the visual modality, allowing more fine-grained evaluations than hitherto possible. We build VALSE using methods that support the construction of valid foils, and report results from evaluating five widely-used V{\&}L models. Our experiments suggest that current models have considerable difficulty addressing most phenomena. Hence, we expect VALSE to serve as an important benchmark to measure future progress of pretrained V{\&}L models from a linguistic perspective, complementing the canonical task-centred V{\&}L evaluations.",
21
- }
22
- """
23
-
24
- _DESCRIPTION = """\
25
- Code and datasets for "VALSE: A Task-Independent Benchmark for Vision and Language Models Centered on Linguistic Phenomena".
26
- """
27
-
28
- _HOMEPAGE = "https://huggingface.co/datasets/Mayfull/valse_vlms"
29
- _LICENSE = "MIT License"
30
-
31
- class VALSEVLMsDataset(GeneratorBasedBuilder):
32
- VERSION = Version("1.0.0")
33
-
34
- def _info(self):
35
- return DatasetInfo(
36
- description=_DESCRIPTION,
37
- homepage=_HOMEPAGE,
38
- license=_LICENSE,
39
- citation=_CITATION,
40
- features=Features(
41
- {
42
- "images": Image(), # Single image
43
- "positive_caption": Sequence(Value("string")),
44
- "negative_caption": Sequence(Value("string")),
45
- "original_file_name": Value("string"),
46
- "dataset": Value("string"),
47
- "key": Value("string"),
48
- "linguistic_phenomena": Value("string"),
49
- "original_split": Value("string"),
50
- }
51
- ),
52
- )
53
-
54
- def _split_generators(self, dl_manager):
55
- # URLs for images.zip and examples.jsonl
56
- urls_to_download = {
57
- "images": "https://huggingface.co/datasets/Mayfull/valse_vlms/resolve/main/images.zip",
58
- "examples": "https://huggingface.co/datasets/Mayfull/valse_vlms/resolve/main/examples.jsonl",
59
- }
60
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
61
-
62
- return [
63
- SplitGenerator(
64
- name=Split.TEST,
65
- gen_kwargs={
66
- "examples_file": downloaded_files["examples"],
67
- "images_dir": downloaded_files["images"],
68
- },
69
- ),
70
- ]
71
-
72
- def _generate_examples(self, examples_file, images_dir):
73
- # Read the examples.jsonl file
74
- with open(examples_file, "r", encoding="utf-8") as f:
75
- for idx, line in enumerate(f):
76
- data = eval(line.strip())
77
-
78
- # Get image path
79
- image_file_name = data.get("image_file_name")
80
- image_path = os.path.join(images_dir, image_file_name)
81
-
82
- # Ensure the image file exists
83
- if not os.path.exists(image_path):
84
- continue # Skip if image not found
85
-
86
- # Prepare the example
87
- yield idx, {
88
- "images": image_path,
89
- "positive_caption": data.get("positive_caption", []),
90
- "negative_caption": data.get("negative_caption", []),
91
- "original_file_name": data.get("original_file_name", ""),
92
- "dataset": data.get("dataset", ""),
93
- "key": data.get("key", ""),
94
- "linguistic_phenomena": data.get("linguistic_phenomena", ""),
95
- "original_split": data.get("original_split", ""),
96
- }
 
 
1
+ import os
2
+ from datasets import DatasetInfo, GeneratorBasedBuilder, SplitGenerator, Version, Features, Value, Sequence, Image, Split
3
+
4
+ _CITATION = """\
5
+ @inproceedings{parcalabescu-etal-2022-valse,
6
+ title = "{VALSE}: A Task-Independent Benchmark for Vision and Language Models Centered on Linguistic Phenomena",
7
+ author = "Parcalabescu, Letitia and
8
+ Cafagna, Michele and
9
+ Muradjan, Lilitta and
10
+ Frank, Anette and
11
+ Calixto, Iacer and
12
+ Gatt, Albert",
13
+ booktitle = "Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
14
+ month = may,
15
+ year = "2022",
16
+ address = "Dublin, Ireland",
17
+ publisher = "Association for Computational Linguistics",
18
+ url = "https://aclanthology.org/2022.acl-long.567",
19
+ pages = "8253--8280",
20
+ abstract = "We propose VALSE (Vision And Language Structured Evaluation), a novel benchmark designed for testing general-purpose pretrained vision and language (V{\&}L) models for their visio-linguistic grounding capabilities on specific linguistic phenomena. VALSE offers a suite of six tests covering various linguistic constructs. Solving these requires models to ground linguistic phenomena in the visual modality, allowing more fine-grained evaluations than hitherto possible. We build VALSE using methods that support the construction of valid foils, and report results from evaluating five widely-used V{\&}L models. Our experiments suggest that current models have considerable difficulty addressing most phenomena. Hence, we expect VALSE to serve as an important benchmark to measure future progress of pretrained V{\&}L models from a linguistic perspective, complementing the canonical task-centred V{\&}L evaluations.",
21
+ }
22
+ """
23
+
24
+ _DESCRIPTION = """\
25
+ Code and datasets for "VALSE: A Task-Independent Benchmark for Vision and Language Models Centered on Linguistic Phenomena".
26
+ """
27
+
28
+ _HOMEPAGE = "https://huggingface.co/datasets/Mayfull/valse_vlms"
29
+ _LICENSE = "MIT License"
30
+
31
+ class VALSEVLMsDataset(GeneratorBasedBuilder):
32
+ VERSION = Version("1.0.0")
33
+
34
+ def _info(self):
35
+ return DatasetInfo(
36
+ description=_DESCRIPTION,
37
+ homepage=_HOMEPAGE,
38
+ license=_LICENSE,
39
+ citation=_CITATION,
40
+ features=Features(
41
+ {
42
+ "images": Sequence(Image()), # List of images
43
+ "positive_caption": Sequence(Value("string")),
44
+ "negative_caption": Sequence(Value("string")),
45
+ "original_file_name": Value("string"),
46
+ "dataset": Value("string"),
47
+ "key": Value("string"),
48
+ "linguistic_phenomena": Value("string"),
49
+ "original_split": Value("string"),
50
+ }
51
+ ),
52
+ )
53
+
54
+ def _split_generators(self, dl_manager):
55
+ # URLs for images.zip and examples.jsonl
56
+ urls_to_download = {
57
+ "images": "https://huggingface.co/datasets/Mayfull/valse_vlms/resolve/main/images.zip",
58
+ "examples": "https://huggingface.co/datasets/Mayfull/valse_vlms/resolve/main/examples.jsonl",
59
+ }
60
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
61
+
62
+ return [
63
+ SplitGenerator(
64
+ name=Split.TEST,
65
+ gen_kwargs={
66
+ "examples_file": downloaded_files["examples"],
67
+ "images_dir": downloaded_files["images"],
68
+ },
69
+ ),
70
+ ]
71
+
72
+ def _generate_examples(self, examples_file, images_dir):
73
+ # Read the examples.jsonl file
74
+ with open(examples_file, "r", encoding="utf-8") as f:
75
+ for idx, line in enumerate(f):
76
+ data = eval(line.strip())
77
+
78
+ # Get image path and wrap it in a list
79
+ image_file_name = data.get("image_file_name")
80
+ image_path = os.path.join(images_dir, image_file_name)
81
+ images = [image_path] # Wrap single image path in a list
82
+
83
+ # Ensure the image file exists
84
+ if not os.path.exists(image_path):
85
+ continue # Skip if image not found
86
+
87
+ # Prepare the example
88
+ yield idx, {
89
+ "images": images, # List of images
90
+ "positive_caption": data.get("positive_caption", []),
91
+ "negative_caption": data.get("negative_caption", []),
92
+ "original_file_name": data.get("original_file_name", ""),
93
+ "dataset": data.get("dataset", ""),
94
+ "key": data.get("key", ""),
95
+ "linguistic_phenomena": data.get("linguistic_phenomena", ""),
96
+ "original_split": data.get("original_split", ""),
97
+ }