narrow-data / dataset_metadata.json
ericjm's picture
Upload dataset_metadata.json with huggingface_hub
22cf270 verified
raw
history blame
3.06 kB
{
"repository_info": {
"name": "ericjm/narrow-data",
"description": "Experimental model checkpoints from 'On the creation of narrow AI' paper",
"version": "1.0",
"paper_title": "On the creation of narrow AI: hierarchy and nonlocality of neural network skills",
"authors": ["Eric Michaud", "Asher Parker-Sartori", "Max Tegmark"],
"upload_date": "2024-06-22",
"upload_method": "HuggingFace CLI"
},
"experiments": {
"trainscratch01": {
"description": "LLMs trained from scratch on GitHub code",
"purpose": "Scaling analysis for paper Figures 6 & 12",
"dataset": "codeparrot/github-code (Python subset)",
"training_steps": 100000,
"learning_rate": "5e-4",
"sequence_length": 1024,
"hardware": "NVIDIA A100 80GB"
}
},
"models_uploaded": {
"trainscratch01/d256_l4_h4": {
"parameters": "23M",
"hidden_size": 256,
"num_layers": 4,
"num_heads": 4,
"intermediate_size": 1024,
"model_size_gb": 0.15,
"purpose": "Smallest model for scaling baseline"
},
"trainscratch01/d768_l12_h12": {
"parameters": "338M",
"hidden_size": 768,
"num_layers": 12,
"num_heads": 12,
"intermediate_size": 3072,
"model_size_gb": 0.65,
"purpose": "Representative medium model for key scaling point"
},
"trainscratch01/d1024_l16_h16": {
"parameters": "~500M",
"hidden_size": 1024,
"num_layers": 16,
"num_heads": 16,
"intermediate_size": 4096,
"model_size_gb": 1.13,
"purpose": "Alternative medium size for scaling comparison"
}
},
"usage": {
"loading_models": {
"library": "transformers",
"example": "AutoModelForCausalLM.from_pretrained('ericjm/narrow-data', subfolder='trainscratch01/d768_l12_h12/final_model')"
},
"tokenizer": {
"compatible": "NousResearch/Meta-Llama-3.1-8B",
"note": "Use this tokenizer for compatibility with all models"
},
"training_curves": {
"location": "trainer_state.json within each final_model directory",
"description": "Contains step-by-step training history and loss curves"
}
},
"paper_figures": {
"Figure 6": "LLM training frontiers - uses scaling analysis from these models",
"Figure 12": "Training run comparison - compares training efficiency across model sizes"
},
"technical_details": {
"model_format": "SafeTensors",
"precision": "float32",
"total_upload_size_gb": 1.93,
"files_per_model": ["model.safetensors", "config.json", "tokenizer.json", "trainer_state.json", "training_args.bin"],
"excluded_files": ["pruning_mask.pt (5GB each)", "large intermediate checkpoints"],
"optimization": "Essential final models only for efficient sharing"
},
"citation": {
"bibtex": "@article{michaud2024narrow, title={On the creation of narrow AI: hierarchy and nonlocality of neural network skills}, author={Michaud, Eric and Parker-Sartori, Asher and Tegmark, Max}, journal={arXiv preprint}, year={2024}}"
}
}