| model_name: google/gemma-2-2b | |
| out_dir: pretrained_model/models | |
| precision: bf16-mixed | |
| initial_checkpoint_dir: google/gemma-2-2b | |
| resume: false | |
| data: | |
| class_path: litgpt.data.LitData | |
| init_args: | |
| data_path: data | |
| seed: 42 | |
| num_workers: 8 | |
| train: | |
| save_interval: 1000 | |
| log_interval: 1 | |
| global_batch_size: 4 | |
| micro_batch_size: 1 | |
| lr_warmup_steps: 2000 | |
| max_tokens: 156800708 | |
| max_seq_length: 2048 | |
| tie_embeddings: false | |
| max_norm: 1.0 | |
| min_lr: 4.0e-05 | |
| eval: | |
| interval: 1000 | |
| max_iters: 100 | |
| initial_validation: false | |
| final_validation: true | |
| optimizer: AdamW | |
| devices: auto | |
| num_nodes: 1 | |
| tokenizer_dir: google/gemma-2-2b | |
| logger_name: tensorboard | |
| seed: 42 | |