Upload dataset_metadata.json with huggingface_hub
Browse files- dataset_metadata.json +80 -0
dataset_metadata.json
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"repository_info": {
|
| 3 |
+
"name": "ericjm/narrow-data",
|
| 4 |
+
"description": "Experimental model checkpoints from 'On the creation of narrow AI' paper",
|
| 5 |
+
"version": "1.0",
|
| 6 |
+
"paper_title": "On the creation of narrow AI: hierarchy and nonlocality of neural network skills",
|
| 7 |
+
"authors": ["Eric Michaud", "Asher Parker-Sartori", "Max Tegmark"],
|
| 8 |
+
"upload_date": "2024-06-22",
|
| 9 |
+
"upload_method": "HuggingFace CLI"
|
| 10 |
+
},
|
| 11 |
+
"experiments": {
|
| 12 |
+
"trainscratch01": {
|
| 13 |
+
"description": "LLMs trained from scratch on GitHub code",
|
| 14 |
+
"purpose": "Scaling analysis for paper Figures 6 & 12",
|
| 15 |
+
"dataset": "codeparrot/github-code (Python subset)",
|
| 16 |
+
"training_steps": 100000,
|
| 17 |
+
"learning_rate": "5e-4",
|
| 18 |
+
"sequence_length": 1024,
|
| 19 |
+
"hardware": "NVIDIA A100 80GB"
|
| 20 |
+
}
|
| 21 |
+
},
|
| 22 |
+
"models_uploaded": {
|
| 23 |
+
"trainscratch01/d256_l4_h4": {
|
| 24 |
+
"parameters": "23M",
|
| 25 |
+
"hidden_size": 256,
|
| 26 |
+
"num_layers": 4,
|
| 27 |
+
"num_heads": 4,
|
| 28 |
+
"intermediate_size": 1024,
|
| 29 |
+
"model_size_gb": 0.15,
|
| 30 |
+
"purpose": "Smallest model for scaling baseline"
|
| 31 |
+
},
|
| 32 |
+
"trainscratch01/d768_l12_h12": {
|
| 33 |
+
"parameters": "338M",
|
| 34 |
+
"hidden_size": 768,
|
| 35 |
+
"num_layers": 12,
|
| 36 |
+
"num_heads": 12,
|
| 37 |
+
"intermediate_size": 3072,
|
| 38 |
+
"model_size_gb": 0.65,
|
| 39 |
+
"purpose": "Representative medium model for key scaling point"
|
| 40 |
+
},
|
| 41 |
+
"trainscratch01/d1024_l16_h16": {
|
| 42 |
+
"parameters": "~500M",
|
| 43 |
+
"hidden_size": 1024,
|
| 44 |
+
"num_layers": 16,
|
| 45 |
+
"num_heads": 16,
|
| 46 |
+
"intermediate_size": 4096,
|
| 47 |
+
"model_size_gb": 1.13,
|
| 48 |
+
"purpose": "Alternative medium size for scaling comparison"
|
| 49 |
+
}
|
| 50 |
+
},
|
| 51 |
+
"usage": {
|
| 52 |
+
"loading_models": {
|
| 53 |
+
"library": "transformers",
|
| 54 |
+
"example": "AutoModelForCausalLM.from_pretrained('ericjm/narrow-data', subfolder='trainscratch01/d768_l12_h12/final_model')"
|
| 55 |
+
},
|
| 56 |
+
"tokenizer": {
|
| 57 |
+
"compatible": "NousResearch/Meta-Llama-3.1-8B",
|
| 58 |
+
"note": "Use this tokenizer for compatibility with all models"
|
| 59 |
+
},
|
| 60 |
+
"training_curves": {
|
| 61 |
+
"location": "trainer_state.json within each final_model directory",
|
| 62 |
+
"description": "Contains step-by-step training history and loss curves"
|
| 63 |
+
}
|
| 64 |
+
},
|
| 65 |
+
"paper_figures": {
|
| 66 |
+
"Figure 6": "LLM training frontiers - uses scaling analysis from these models",
|
| 67 |
+
"Figure 12": "Training run comparison - compares training efficiency across model sizes"
|
| 68 |
+
},
|
| 69 |
+
"technical_details": {
|
| 70 |
+
"model_format": "SafeTensors",
|
| 71 |
+
"precision": "float32",
|
| 72 |
+
"total_upload_size_gb": 1.93,
|
| 73 |
+
"files_per_model": ["model.safetensors", "config.json", "tokenizer.json", "trainer_state.json", "training_args.bin"],
|
| 74 |
+
"excluded_files": ["pruning_mask.pt (5GB each)", "large intermediate checkpoints"],
|
| 75 |
+
"optimization": "Essential final models only for efficient sharing"
|
| 76 |
+
},
|
| 77 |
+
"citation": {
|
| 78 |
+
"bibtex": "@article{michaud2024narrow, title={On the creation of narrow AI: hierarchy and nonlocality of neural network skills}, author={Michaud, Eric and Parker-Sartori, Asher and Tegmark, Max}, journal={arXiv preprint}, year={2024}}"
|
| 79 |
+
}
|
| 80 |
+
}
|