IlyasMoutawwakil HF Staff commited on
Commit
549624e
·
verified ·
1 Parent(s): da20323

Upload cpu_training_transformers_text-generation_hf-internal-testing/tiny-random-LlamaForCausalLM/benchmark.json with huggingface_hub

Browse files
cpu_training_transformers_text-generation_hf-internal-testing/tiny-random-LlamaForCausalLM/benchmark.json ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "name": "cpu_training_transformers_text-generation_hf-internal-testing/tiny-random-LlamaForCausalLM",
4
+ "backend": {
5
+ "name": "pytorch",
6
+ "version": "2.5.1",
7
+ "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
+ "model": "hf-internal-testing/tiny-random-LlamaForCausalLM",
9
+ "processor": "hf-internal-testing/tiny-random-LlamaForCausalLM",
10
+ "task": "text-generation",
11
+ "library": "transformers",
12
+ "model_type": "llama",
13
+ "device": "cpu",
14
+ "device_ids": null,
15
+ "seed": 42,
16
+ "inter_op_num_threads": null,
17
+ "intra_op_num_threads": null,
18
+ "model_kwargs": {},
19
+ "processor_kwargs": {},
20
+ "no_weights": true,
21
+ "tp_plan": null,
22
+ "device_map": null,
23
+ "torch_dtype": null,
24
+ "eval_mode": true,
25
+ "to_bettertransformer": false,
26
+ "low_cpu_mem_usage": null,
27
+ "attn_implementation": null,
28
+ "cache_implementation": null,
29
+ "allow_tf32": false,
30
+ "autocast_enabled": false,
31
+ "autocast_dtype": null,
32
+ "torch_compile": false,
33
+ "torch_compile_target": "forward",
34
+ "torch_compile_config": {},
35
+ "quantization_scheme": null,
36
+ "quantization_config": {},
37
+ "deepspeed_inference": false,
38
+ "deepspeed_inference_config": {},
39
+ "peft_type": null,
40
+ "peft_config": {}
41
+ },
42
+ "scenario": {
43
+ "name": "training",
44
+ "_target_": "optimum_benchmark.scenarios.training.scenario.TrainingScenario",
45
+ "max_steps": 5,
46
+ "warmup_steps": 2,
47
+ "dataset_shapes": {
48
+ "dataset_size": 500,
49
+ "sequence_length": 16,
50
+ "num_choices": 1
51
+ },
52
+ "training_arguments": {
53
+ "per_device_train_batch_size": 2,
54
+ "gradient_accumulation_steps": 1,
55
+ "output_dir": "./trainer_output",
56
+ "eval_strategy": "no",
57
+ "save_strategy": "no",
58
+ "do_train": true,
59
+ "use_cpu": false,
60
+ "max_steps": 5,
61
+ "do_eval": false,
62
+ "do_predict": false,
63
+ "report_to": "none",
64
+ "skip_memory_metrics": true,
65
+ "ddp_find_unused_parameters": false
66
+ },
67
+ "latency": true,
68
+ "memory": true,
69
+ "energy": true
70
+ },
71
+ "launcher": {
72
+ "name": "process",
73
+ "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
74
+ "device_isolation": false,
75
+ "device_isolation_action": null,
76
+ "numactl": false,
77
+ "numactl_kwargs": {},
78
+ "start_method": "spawn"
79
+ },
80
+ "environment": {
81
+ "cpu": " AMD EPYC 7742 64-Core Processor",
82
+ "cpu_count": 128,
83
+ "cpu_ram_mb": 540671.643648,
84
+ "system": "Linux",
85
+ "machine": "x86_64",
86
+ "platform": "Linux-5.4.0-166-generic-x86_64-with-glibc2.31",
87
+ "processor": "x86_64",
88
+ "python_version": "3.10.18",
89
+ "gpu": [
90
+ "NVIDIA A100-SXM4-80GB",
91
+ "NVIDIA A100-SXM4-80GB",
92
+ "NVIDIA A100-SXM4-80GB",
93
+ "NVIDIA DGX Display",
94
+ "NVIDIA A100-SXM4-80GB"
95
+ ],
96
+ "gpu_count": 5,
97
+ "gpu_vram_mb": 347892350976,
98
+ "optimum_benchmark_version": "0.6.0.dev0",
99
+ "optimum_benchmark_commit": "61a08086def388b3e78bbf6b42ed20ab4af3f8db",
100
+ "transformers_version": "4.45.1",
101
+ "transformers_commit": "61a08086def388b3e78bbf6b42ed20ab4af3f8db",
102
+ "accelerate_version": "1.10.0",
103
+ "accelerate_commit": "61a08086def388b3e78bbf6b42ed20ab4af3f8db",
104
+ "diffusers_version": "0.34.0",
105
+ "diffusers_commit": "61a08086def388b3e78bbf6b42ed20ab4af3f8db",
106
+ "optimum_version": null,
107
+ "optimum_commit": null,
108
+ "timm_version": "1.0.19",
109
+ "timm_commit": "61a08086def388b3e78bbf6b42ed20ab4af3f8db",
110
+ "peft_version": "0.17.0",
111
+ "peft_commit": "61a08086def388b3e78bbf6b42ed20ab4af3f8db"
112
+ },
113
+ "print_report": true,
114
+ "log_report": true
115
+ },
116
+ "report": {
117
+ "overall": {
118
+ "memory": {
119
+ "unit": "MB",
120
+ "max_ram": 764.45696,
121
+ "max_global_vram": null,
122
+ "max_process_vram": null,
123
+ "max_reserved": null,
124
+ "max_allocated": null
125
+ },
126
+ "latency": {
127
+ "unit": "s",
128
+ "values": [
129
+ 0.1563250799663365,
130
+ 0.012235323898494244,
131
+ 0.011144319083541632,
132
+ 0.014274328015744686,
133
+ 0.014069293159991503
134
+ ],
135
+ "count": 5,
136
+ "total": 0.20804834412410855,
137
+ "mean": 0.04160966882482171,
138
+ "p50": 0.014069293159991503,
139
+ "p90": 0.09950477918609979,
140
+ "p95": 0.1279149295762181,
141
+ "p99": 0.1506430498883128,
142
+ "stdev": 0.057369518695879806,
143
+ "stdev_": 137.8754513461947
144
+ },
145
+ "throughput": {
146
+ "unit": "samples/s",
147
+ "value": 240.32875729197414
148
+ },
149
+ "energy": {
150
+ "unit": "kWh",
151
+ "cpu": 1.2375592967146077e-05,
152
+ "ram": 2.0760073413276463e-05,
153
+ "gpu": 3.035057984135392e-05,
154
+ "total": 6.348624622177646e-05
155
+ },
156
+ "efficiency": {
157
+ "unit": "samples/kWh",
158
+ "value": 157514.4317883752
159
+ }
160
+ },
161
+ "warmup": {
162
+ "memory": {
163
+ "unit": "MB",
164
+ "max_ram": 764.45696,
165
+ "max_global_vram": null,
166
+ "max_process_vram": null,
167
+ "max_reserved": null,
168
+ "max_allocated": null
169
+ },
170
+ "latency": {
171
+ "unit": "s",
172
+ "values": [
173
+ 0.1563250799663365,
174
+ 0.012235323898494244
175
+ ],
176
+ "count": 2,
177
+ "total": 0.16856040386483073,
178
+ "mean": 0.08428020193241537,
179
+ "p50": 0.08428020193241537,
180
+ "p90": 0.14191610435955226,
181
+ "p95": 0.14912059216294438,
182
+ "p99": 0.15488418240565807,
183
+ "stdev": 0.07204487803392112,
184
+ "stdev_": 85.48256456681749
185
+ },
186
+ "throughput": {
187
+ "unit": "samples/s",
188
+ "value": 47.46073108851372
189
+ },
190
+ "energy": null,
191
+ "efficiency": null
192
+ },
193
+ "train": {
194
+ "memory": {
195
+ "unit": "MB",
196
+ "max_ram": 764.45696,
197
+ "max_global_vram": null,
198
+ "max_process_vram": null,
199
+ "max_reserved": null,
200
+ "max_allocated": null
201
+ },
202
+ "latency": {
203
+ "unit": "s",
204
+ "values": [
205
+ 0.011144319083541632,
206
+ 0.014274328015744686,
207
+ 0.014069293159991503
208
+ ],
209
+ "count": 3,
210
+ "total": 0.03948794025927782,
211
+ "mean": 0.013162646753092607,
212
+ "p50": 0.014069293159991503,
213
+ "p90": 0.014233321044594049,
214
+ "p95": 0.014253824530169368,
215
+ "p99": 0.014270227318629623,
216
+ "stdev": 0.001429625769044644,
217
+ "stdev_": 10.86123327520545
218
+ },
219
+ "throughput": {
220
+ "unit": "samples/s",
221
+ "value": 455.8353735801867
222
+ },
223
+ "energy": null,
224
+ "efficiency": null
225
+ }
226
+ }
227
+ }