nathanael-fijalkow commited on
Commit
c93f9b0
·
verified ·
1 Parent(s): ff6121a

Chess Challenge submission by nathanael-fijalkow

Browse files
README.md CHANGED
@@ -26,10 +26,6 @@ model = AutoModelForCausalLM.from_pretrained("LLM-course/my-chess-model", trust_
26
  tokenizer = AutoTokenizer.from_pretrained("LLM-course/my-chess-model", trust_remote_code=True)
27
  ```
28
 
29
- ## Model Details
30
 
31
- - **Architecture**: Chess Transformer (GPT-style)
32
- - **Vocab size**: 1682
33
- - **Embedding dim**: 128
34
- - **Layers**: 4
35
- - **Heads**: 4
 
26
  tokenizer = AutoTokenizer.from_pretrained("LLM-course/my-chess-model", trust_remote_code=True)
27
  ```
28
 
29
+ ## Evaluation
30
 
31
+ This model is evaluated at the [Chess Challenge Arena](https://huggingface.co/spaces/LLM-course/Chess1MChallenge).
 
 
 
 
__pycache__/data.cpython-314.pyc ADDED
Binary file (9.7 kB). View file
 
__pycache__/model.cpython-314.pyc ADDED
Binary file (21.8 kB). View file
 
__pycache__/tokenizer.cpython-314.pyc ADDED
Binary file (13.5 kB). View file
 
config.json CHANGED
@@ -2,6 +2,10 @@
2
  "architectures": [
3
  "ChessForCausalLM"
4
  ],
 
 
 
 
5
  "bos_token_id": 1,
6
  "dropout": 0.1,
7
  "dtype": "float32",
 
2
  "architectures": [
3
  "ChessForCausalLM"
4
  ],
5
+ "auto_map": {
6
+ "AutoConfig": "model.ChessConfig",
7
+ "AutoModelForCausalLM": "model.ChessForCausalLM"
8
+ },
9
  "bos_token_id": 1,
10
  "dropout": 0.1,
11
  "dtype": "float32",
data.py ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Data loading utilities for the Chess Challenge.
3
+
4
+ This module provides functions to load and process chess game data
5
+ from the Lichess dataset on Hugging Face.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ from typing import Dict, Iterator, List, Optional
11
+
12
+ import torch
13
+ from torch.utils.data import Dataset
14
+
15
+
16
+ class ChessDataset(Dataset):
17
+ """
18
+ PyTorch Dataset for chess games.
19
+
20
+ This dataset loads games from a Hugging Face dataset and prepares
21
+ them for language modeling training.
22
+
23
+ Each game is tokenized and truncated/padded to max_length.
24
+ The labels are shifted by one position for next-token prediction.
25
+
26
+ Example:
27
+ >>> from tokenizer import ChessTokenizer
28
+ >>> tokenizer = ChessTokenizer.build_vocab_from_dataset()
29
+ >>> dataset = ChessDataset(tokenizer, max_length=256)
30
+ >>> sample = dataset[0]
31
+ >>> print(sample["input_ids"].shape) # (256,)
32
+ """
33
+
34
+ def __init__(
35
+ self,
36
+ tokenizer,
37
+ dataset_name: str = "dlouapre/lichess_2025-01_1M",
38
+ split: str = "train",
39
+ column: str = "text",
40
+ max_length: int = 256,
41
+ max_samples: Optional[int] = None,
42
+ ):
43
+ """
44
+ Initialize the chess dataset.
45
+
46
+ Args:
47
+ tokenizer: The chess tokenizer to use.
48
+ dataset_name: Name of the dataset on Hugging Face Hub.
49
+ split: Dataset split to use.
50
+ column: Column containing the game strings.
51
+ max_length: Maximum sequence length.
52
+ max_samples: Maximum number of samples to load.
53
+ """
54
+ from datasets import load_dataset
55
+
56
+ self.tokenizer = tokenizer
57
+ self.max_length = max_length
58
+ self.column = column
59
+
60
+ # Load dataset
61
+ dataset = load_dataset(dataset_name, split=split)
62
+
63
+ if max_samples is not None:
64
+ dataset = dataset.select(range(min(max_samples, len(dataset))))
65
+
66
+ self.data = dataset
67
+
68
+ def __len__(self) -> int:
69
+ return len(self.data)
70
+
71
+ def __getitem__(self, idx: int) -> Dict[str, torch.Tensor]:
72
+ game = self.data[idx][self.column]
73
+
74
+ # Prepend BOS token for proper language modeling
75
+ game_with_bos = self.tokenizer.bos_token + " " + game
76
+
77
+ # Tokenize
78
+ encoding = self.tokenizer(
79
+ game_with_bos,
80
+ truncation=True,
81
+ max_length=self.max_length,
82
+ padding="max_length",
83
+ return_tensors="pt",
84
+ )
85
+
86
+ # Squeeze batch dimension
87
+ input_ids = encoding["input_ids"].squeeze(0)
88
+ attention_mask = encoding["attention_mask"].squeeze(0)
89
+
90
+ # Labels are the same as input_ids (model will shift internally)
91
+ labels = input_ids.clone()
92
+
93
+ # Set padding tokens to -100 to ignore in loss
94
+ labels[attention_mask == 0] = -100
95
+
96
+ return {
97
+ "input_ids": input_ids,
98
+ "attention_mask": attention_mask,
99
+ "labels": labels,
100
+ }
101
+
102
+
103
+ class ChessDataCollator:
104
+ """
105
+ Data collator for chess games.
106
+
107
+ This collator pads sequences to the same length within a batch
108
+ and creates the appropriate attention masks.
109
+ """
110
+
111
+ def __init__(self, tokenizer, max_length: int = 256):
112
+ self.tokenizer = tokenizer
113
+ self.max_length = max_length
114
+
115
+ def __call__(self, features: List[Dict]) -> Dict[str, torch.Tensor]:
116
+ # Stack tensors
117
+ input_ids = torch.stack([f["input_ids"] for f in features])
118
+ attention_mask = torch.stack([f["attention_mask"] for f in features])
119
+ labels = torch.stack([f["labels"] for f in features])
120
+
121
+ return {
122
+ "input_ids": input_ids,
123
+ "attention_mask": attention_mask,
124
+ "labels": labels,
125
+ }
126
+
127
+
128
+ def create_train_val_datasets(
129
+ tokenizer,
130
+ dataset_name: str = "dlouapre/lichess_2025-01_1M",
131
+ max_length: int = 256,
132
+ train_samples: Optional[int] = None,
133
+ val_samples: int = 5000,
134
+ val_ratio: float = 0.05,
135
+ ):
136
+ """
137
+ Create training and validation datasets.
138
+
139
+ Args:
140
+ tokenizer: The chess tokenizer.
141
+ dataset_name: Name of the dataset.
142
+ max_length: Maximum sequence length.
143
+ train_samples: Maximum training samples (None for all).
144
+ val_samples: Number of validation samples.
145
+ val_ratio: Ratio of validation samples (used if train_samples is None).
146
+
147
+ Returns:
148
+ Tuple of (train_dataset, val_dataset).
149
+ """
150
+ from datasets import load_dataset
151
+
152
+ # Load full dataset
153
+ full_dataset = load_dataset(dataset_name, split="train")
154
+
155
+ # Determine split sizes
156
+ total = len(full_dataset)
157
+
158
+ if train_samples is not None:
159
+ n_train = min(train_samples, total - val_samples)
160
+ else:
161
+ n_train = int(total * (1 - val_ratio))
162
+
163
+ n_val = min(val_samples, total - n_train)
164
+
165
+ # Split dataset
166
+ train_data = full_dataset.select(range(n_train))
167
+ val_data = full_dataset.select(range(n_train, n_train + n_val))
168
+
169
+ # Create dataset objects
170
+ train_dataset = ChessDataset(
171
+ tokenizer=tokenizer,
172
+ dataset_name=dataset_name,
173
+ max_length=max_length,
174
+ )
175
+ train_dataset.data = train_data
176
+
177
+ val_dataset = ChessDataset(
178
+ tokenizer=tokenizer,
179
+ dataset_name=dataset_name,
180
+ max_length=max_length,
181
+ )
182
+ val_dataset.data = val_data
183
+
184
+ return train_dataset, val_dataset
185
+
186
+
187
+ def stream_games(
188
+ dataset_name: str = "dlouapre/lichess_2025-01_1M",
189
+ split: str = "train",
190
+ column: str = "text",
191
+ ) -> Iterator[str]:
192
+ """
193
+ Stream games from the dataset for memory-efficient processing.
194
+
195
+ Args:
196
+ dataset_name: Name of the dataset on Hugging Face Hub.
197
+ split: Dataset split to use.
198
+ column: Column containing the game strings.
199
+
200
+ Yields:
201
+ Game strings one at a time.
202
+ """
203
+ from datasets import load_dataset
204
+
205
+ dataset = load_dataset(dataset_name, split=split, streaming=True)
206
+
207
+ for example in dataset:
208
+ yield example[column]
209
+
210
+
211
+ def analyze_dataset_statistics(
212
+ dataset_name: str = "dlouapre/lichess_2025-01_1M",
213
+ max_samples: int = 10000,
214
+ ) -> Dict:
215
+ """
216
+ Analyze statistics of the chess dataset.
217
+
218
+ Args:
219
+ dataset_name: Name of the dataset.
220
+ max_samples: Maximum number of samples to analyze.
221
+
222
+ Returns:
223
+ Dictionary containing dataset statistics.
224
+ """
225
+ from collections import Counter
226
+ from datasets import load_dataset
227
+
228
+ dataset = load_dataset(dataset_name, split="train")
229
+ dataset = dataset.select(range(min(max_samples, len(dataset))))
230
+
231
+ game_lengths = []
232
+ move_counts = Counter()
233
+ opening_moves = Counter()
234
+
235
+ for example in dataset:
236
+ moves = example["text"].strip().split()
237
+ game_lengths.append(len(moves))
238
+ move_counts.update(moves)
239
+
240
+ # Track common openings (first 4 moves)
241
+ if len(moves) >= 4:
242
+ opening = " ".join(moves[:4])
243
+ opening_moves[opening] += 1
244
+
245
+ return {
246
+ "total_games": len(dataset),
247
+ "avg_game_length": sum(game_lengths) / len(game_lengths),
248
+ "min_game_length": min(game_lengths),
249
+ "max_game_length": max(game_lengths),
250
+ "unique_moves": len(move_counts),
251
+ "most_common_moves": move_counts.most_common(20),
252
+ "most_common_openings": opening_moves.most_common(10),
253
+ }
model.py ADDED
@@ -0,0 +1,438 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Chess Transformer Model for the Chess Challenge.
3
+
4
+ This module provides a simple GPT-style transformer architecture
5
+ designed to fit within the 1M parameter constraint.
6
+
7
+ Key components:
8
+ - ChessConfig: Configuration class for model hyperparameters
9
+ - ChessForCausalLM: The main model class for next-move prediction
10
+ """
11
+
12
+ from __future__ import annotations
13
+
14
+ import math
15
+ from dataclasses import dataclass
16
+ from typing import Optional, Tuple, Union
17
+
18
+ import torch
19
+ import torch.nn as nn
20
+ import torch.nn.functional as F
21
+ from transformers import PretrainedConfig, PreTrainedModel
22
+ from transformers.modeling_outputs import CausalLMOutputWithPast
23
+
24
+
25
+ class ChessConfig(PretrainedConfig):
26
+ """
27
+ Configuration class for the Chess Transformer model.
28
+
29
+ This configuration is designed for a ~1M parameter model.
30
+ Students can adjust these values to explore different architectures.
31
+
32
+ Parameter budget breakdown (with default values):
33
+ - Embeddings (vocab): 1200 x 128 = 153,600
34
+ - Position Embeddings: 256 x 128 = 32,768
35
+ - Transformer Layers: 6 x ~120,000 = ~720,000
36
+ - LM Head (with weight tying): 0 (shared with embeddings)
37
+ - Total: ~906,000 parameters
38
+
39
+ Attributes:
40
+ vocab_size: Size of the vocabulary (number of unique moves).
41
+ n_embd: Embedding dimension (d_model).
42
+ n_layer: Number of transformer layers.
43
+ n_head: Number of attention heads.
44
+ n_ctx: Maximum sequence length (context window).
45
+ n_inner: Feed-forward inner dimension (default: 3 * n_embd).
46
+ dropout: Dropout probability.
47
+ layer_norm_epsilon: Epsilon for layer normalization.
48
+ tie_weights: Whether to tie embedding and output weights.
49
+ """
50
+
51
+ model_type = "chess_transformer"
52
+
53
+ def __init__(
54
+ self,
55
+ vocab_size: int = 1200,
56
+ n_embd: int = 128,
57
+ n_layer: int = 6,
58
+ n_head: int = 4,
59
+ n_ctx: int = 256,
60
+ n_inner: Optional[int] = None,
61
+ dropout: float = 0.1,
62
+ layer_norm_epsilon: float = 1e-5,
63
+ tie_weights: bool = True,
64
+ pad_token_id: int = 0,
65
+ bos_token_id: int = 1,
66
+ eos_token_id: int = 2,
67
+ **kwargs,
68
+ ):
69
+ super().__init__(
70
+ pad_token_id=pad_token_id,
71
+ bos_token_id=bos_token_id,
72
+ eos_token_id=eos_token_id,
73
+ **kwargs,
74
+ )
75
+
76
+ self.vocab_size = vocab_size
77
+ self.n_embd = n_embd
78
+ self.n_layer = n_layer
79
+ self.n_head = n_head
80
+ self.n_ctx = n_ctx
81
+ self.n_inner = n_inner if n_inner is not None else 3 * n_embd # Reduced from 4x to 3x
82
+ self.dropout = dropout
83
+ self.layer_norm_epsilon = layer_norm_epsilon
84
+ self.tie_weights = tie_weights
85
+ # Inform HF base class about tying behavior
86
+ self.tie_word_embeddings = bool(tie_weights)
87
+
88
+
89
+ class MultiHeadAttention(nn.Module):
90
+ """
91
+ Multi-head self-attention module.
92
+
93
+ This is a standard scaled dot-product attention implementation
94
+ with causal masking for autoregressive generation.
95
+ """
96
+
97
+ def __init__(self, config: ChessConfig):
98
+ super().__init__()
99
+
100
+ assert config.n_embd % config.n_head == 0, \
101
+ f"n_embd ({config.n_embd}) must be divisible by n_head ({config.n_head})"
102
+
103
+ self.n_head = config.n_head
104
+ self.n_embd = config.n_embd
105
+ self.head_dim = config.n_embd // config.n_head
106
+
107
+ # Combined QKV projection for efficiency
108
+ self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd)
109
+ self.c_proj = nn.Linear(config.n_embd, config.n_embd)
110
+
111
+ self.dropout = nn.Dropout(config.dropout)
112
+
113
+ # Causal mask (will be created on first forward pass)
114
+ self.register_buffer(
115
+ "bias",
116
+ torch.tril(torch.ones(config.n_ctx, config.n_ctx)).view(
117
+ 1, 1, config.n_ctx, config.n_ctx
118
+ ),
119
+ persistent=False,
120
+ )
121
+
122
+ def forward(
123
+ self,
124
+ x: torch.Tensor,
125
+ attention_mask: Optional[torch.Tensor] = None,
126
+ ) -> torch.Tensor:
127
+ batch_size, seq_len, _ = x.size()
128
+
129
+ # Compute Q, K, V
130
+ qkv = self.c_attn(x)
131
+ q, k, v = qkv.split(self.n_embd, dim=2)
132
+
133
+ # Reshape for multi-head attention
134
+ q = q.view(batch_size, seq_len, self.n_head, self.head_dim).transpose(1, 2)
135
+ k = k.view(batch_size, seq_len, self.n_head, self.head_dim).transpose(1, 2)
136
+ v = v.view(batch_size, seq_len, self.n_head, self.head_dim).transpose(1, 2)
137
+
138
+ # Scaled dot-product attention
139
+ attn_weights = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.head_dim)
140
+
141
+ # Apply causal mask
142
+ causal_mask = self.bias[:, :, :seq_len, :seq_len]
143
+ attn_weights = attn_weights.masked_fill(causal_mask == 0, float("-inf"))
144
+
145
+ # Apply attention mask (for padding)
146
+ if attention_mask is not None:
147
+ # attention_mask shape: (batch_size, seq_len) -> (batch_size, 1, 1, seq_len)
148
+ attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
149
+ attn_weights = attn_weights.masked_fill(attention_mask == 0, float("-inf"))
150
+
151
+ attn_weights = F.softmax(attn_weights, dim=-1)
152
+ attn_weights = self.dropout(attn_weights)
153
+
154
+ # Apply attention to values
155
+ attn_output = torch.matmul(attn_weights, v)
156
+
157
+ # Reshape back
158
+ attn_output = attn_output.transpose(1, 2).contiguous().view(
159
+ batch_size, seq_len, self.n_embd
160
+ )
161
+
162
+ # Output projection
163
+ attn_output = self.c_proj(attn_output)
164
+
165
+ return attn_output
166
+
167
+
168
+ class FeedForward(nn.Module):
169
+ """
170
+ Feed-forward network (MLP) module.
171
+
172
+ Standard two-layer MLP with GELU activation.
173
+ """
174
+
175
+ def __init__(self, config: ChessConfig):
176
+ super().__init__()
177
+
178
+ self.c_fc = nn.Linear(config.n_embd, config.n_inner)
179
+ self.c_proj = nn.Linear(config.n_inner, config.n_embd)
180
+ self.dropout = nn.Dropout(config.dropout)
181
+
182
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
183
+ x = self.c_fc(x)
184
+ x = F.gelu(x)
185
+ x = self.c_proj(x)
186
+ x = self.dropout(x)
187
+ return x
188
+
189
+
190
+ class TransformerBlock(nn.Module):
191
+ """
192
+ A single transformer block with attention and feed-forward layers.
193
+
194
+ Uses pre-normalization (LayerNorm before attention/FFN) for better
195
+ training stability.
196
+ """
197
+
198
+ def __init__(self, config: ChessConfig):
199
+ super().__init__()
200
+
201
+ self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
202
+ self.attn = MultiHeadAttention(config)
203
+ self.ln_2 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
204
+ self.mlp = FeedForward(config)
205
+
206
+ def forward(
207
+ self,
208
+ x: torch.Tensor,
209
+ attention_mask: Optional[torch.Tensor] = None,
210
+ ) -> torch.Tensor:
211
+ # Pre-norm attention
212
+ x = x + self.attn(self.ln_1(x), attention_mask=attention_mask)
213
+ # Pre-norm FFN
214
+ x = x + self.mlp(self.ln_2(x))
215
+ return x
216
+
217
+
218
+ class ChessForCausalLM(PreTrainedModel):
219
+ """
220
+ Chess Transformer for Causal Language Modeling (next-move prediction).
221
+
222
+ This model is designed to predict the next chess move given a sequence
223
+ of previous moves. It uses a GPT-style architecture with:
224
+ - Token embeddings for chess moves
225
+ - Learned positional embeddings
226
+ - Stacked transformer blocks
227
+ - Linear head for next-token prediction
228
+
229
+ The model supports weight tying between the embedding layer and the
230
+ output projection to save parameters.
231
+
232
+ Example:
233
+ >>> config = ChessConfig(vocab_size=1200, n_embd=128, n_layer=6)
234
+ >>> model = ChessForCausalLM(config)
235
+ >>> inputs = {"input_ids": torch.tensor([[1, 42, 87]])}
236
+ >>> outputs = model(**inputs)
237
+ >>> next_move_logits = outputs.logits[:, -1, :]
238
+ """
239
+
240
+ config_class = ChessConfig
241
+ base_model_prefix = "transformer"
242
+ supports_gradient_checkpointing = True
243
+ # Suppress missing-key warning for tied lm_head when loading
244
+ keys_to_ignore_on_load_missing = ["lm_head.weight"]
245
+
246
+ def __init__(self, config: ChessConfig):
247
+ super().__init__(config)
248
+
249
+ # Token and position embeddings
250
+ self.wte = nn.Embedding(config.vocab_size, config.n_embd)
251
+ self.wpe = nn.Embedding(config.n_ctx, config.n_embd)
252
+
253
+ self.drop = nn.Dropout(config.dropout)
254
+
255
+ # Transformer blocks
256
+ self.h = nn.ModuleList([
257
+ TransformerBlock(config) for _ in range(config.n_layer)
258
+ ])
259
+
260
+ # Final layer norm
261
+ self.ln_f = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
262
+
263
+ # Output head
264
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
265
+
266
+ # Declare tied weights for proper serialization
267
+ if config.tie_weights:
268
+ self._tied_weights_keys = ["lm_head.weight"]
269
+
270
+ # Initialize weights
271
+ self.post_init()
272
+
273
+ # Tie weights if configured
274
+ if config.tie_weights:
275
+ self.tie_weights()
276
+
277
+ def get_input_embeddings(self) -> nn.Module:
278
+ return self.wte
279
+
280
+ def set_input_embeddings(self, new_embeddings: nn.Module):
281
+ self.wte = new_embeddings
282
+ if getattr(self.config, "tie_weights", False):
283
+ self.tie_weights()
284
+
285
+ def get_output_embeddings(self) -> nn.Module:
286
+ return self.lm_head
287
+
288
+ def set_output_embeddings(self, new_embeddings: nn.Module):
289
+ self.lm_head = new_embeddings
290
+
291
+ def tie_weights(self):
292
+ # Use HF helper to tie or clone depending on config
293
+ if getattr(self.config, "tie_weights", False) or getattr(self.config, "tie_word_embeddings", False):
294
+ self._tie_or_clone_weights(self.lm_head, self.wte)
295
+
296
+ def _init_weights(self, module: nn.Module):
297
+ """Initialize weights following GPT-2 style."""
298
+ if isinstance(module, nn.Linear):
299
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
300
+ if module.bias is not None:
301
+ torch.nn.init.zeros_(module.bias)
302
+ elif isinstance(module, nn.Embedding):
303
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
304
+ elif isinstance(module, nn.LayerNorm):
305
+ torch.nn.init.ones_(module.weight)
306
+ torch.nn.init.zeros_(module.bias)
307
+
308
+ def forward(
309
+ self,
310
+ input_ids: torch.LongTensor,
311
+ attention_mask: Optional[torch.Tensor] = None,
312
+ position_ids: Optional[torch.LongTensor] = None,
313
+ labels: Optional[torch.LongTensor] = None,
314
+ return_dict: Optional[bool] = None,
315
+ **kwargs,
316
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
317
+ """
318
+ Forward pass of the model.
319
+
320
+ Args:
321
+ input_ids: Token IDs of shape (batch_size, seq_len).
322
+ attention_mask: Attention mask of shape (batch_size, seq_len).
323
+ position_ids: Position IDs of shape (batch_size, seq_len).
324
+ labels: Labels for language modeling loss.
325
+ return_dict: Whether to return a ModelOutput object.
326
+
327
+ Returns:
328
+ CausalLMOutputWithPast containing loss (if labels provided) and logits.
329
+ """
330
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
331
+
332
+ batch_size, seq_len = input_ids.size()
333
+ device = input_ids.device
334
+
335
+ # Create position IDs if not provided
336
+ if position_ids is None:
337
+ position_ids = torch.arange(seq_len, device=device).unsqueeze(0).expand(batch_size, -1)
338
+
339
+ # Get embeddings
340
+ token_embeds = self.wte(input_ids)
341
+ position_embeds = self.wpe(position_ids)
342
+ hidden_states = self.drop(token_embeds + position_embeds)
343
+
344
+ # Pass through transformer blocks
345
+ for block in self.h:
346
+ hidden_states = block(hidden_states, attention_mask=attention_mask)
347
+
348
+ # Final layer norm
349
+ hidden_states = self.ln_f(hidden_states)
350
+
351
+ # Get logits
352
+ logits = self.lm_head(hidden_states)
353
+
354
+ # Compute loss if labels are provided
355
+ loss = None
356
+ if labels is not None:
357
+ # Shift logits and labels for next-token prediction
358
+ shift_logits = logits[..., :-1, :].contiguous()
359
+ shift_labels = labels[..., 1:].contiguous()
360
+
361
+ # Flatten for cross-entropy
362
+ loss_fct = nn.CrossEntropyLoss(ignore_index=-100)
363
+ # loss_fct = nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id)
364
+ loss = loss_fct(
365
+ shift_logits.view(-1, shift_logits.size(-1)),
366
+ shift_labels.view(-1),
367
+ )
368
+
369
+ if not return_dict:
370
+ output = (logits,)
371
+ return ((loss,) + output) if loss is not None else output
372
+
373
+ return CausalLMOutputWithPast(
374
+ loss=loss,
375
+ logits=logits,
376
+ past_key_values=None,
377
+ hidden_states=None,
378
+ attentions=None,
379
+ )
380
+
381
+ @torch.no_grad()
382
+ def generate_move(
383
+ self,
384
+ input_ids: torch.LongTensor,
385
+ temperature: float = 1.0,
386
+ top_k: Optional[int] = None,
387
+ top_p: Optional[float] = None,
388
+ ) -> int:
389
+ """
390
+ Generate the next move given a sequence of moves.
391
+
392
+ Args:
393
+ input_ids: Token IDs of shape (1, seq_len).
394
+ temperature: Sampling temperature (1.0 = no change).
395
+ top_k: If set, only sample from top k tokens.
396
+ top_p: If set, use nucleus sampling with this threshold.
397
+
398
+ Returns:
399
+ The token ID of the predicted next move.
400
+ """
401
+ self.eval()
402
+
403
+ # Get logits for the last position
404
+ outputs = self(input_ids)
405
+ logits = outputs.logits[:, -1, :] / temperature
406
+
407
+ # Apply top-k filtering
408
+ if top_k is not None:
409
+ indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
410
+ logits[indices_to_remove] = float("-inf")
411
+
412
+ # Apply top-p (nucleus) filtering
413
+ if top_p is not None:
414
+ sorted_logits, sorted_indices = torch.sort(logits, descending=True)
415
+ cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
416
+
417
+ # Remove tokens with cumulative probability above the threshold
418
+ sorted_indices_to_remove = cumulative_probs > top_p
419
+ sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
420
+ sorted_indices_to_remove[..., 0] = 0
421
+
422
+ indices_to_remove = sorted_indices_to_remove.scatter(
423
+ dim=-1, index=sorted_indices, src=sorted_indices_to_remove
424
+ )
425
+ logits[indices_to_remove] = float("-inf")
426
+
427
+ # Sample from the distribution
428
+ probs = F.softmax(logits, dim=-1)
429
+ next_token = torch.multinomial(probs, num_samples=1)
430
+
431
+ return next_token.item()
432
+
433
+
434
+ # Register the model with Auto classes for easy loading
435
+ from transformers import AutoConfig, AutoModelForCausalLM
436
+
437
+ AutoConfig.register("chess_transformer", ChessConfig)
438
+ AutoModelForCausalLM.register(ChessConfig, ChessForCausalLM)
tokenizer.py ADDED
@@ -0,0 +1,278 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Custom Chess Tokenizer for the Chess Challenge.
3
+
4
+ This tokenizer treats each move as a single token using the extended UCI notation
5
+ from the Lichess dataset (e.g., WPe2e4, BNg8f6).
6
+
7
+ The dataset format uses:
8
+ - W/B prefix for White/Black
9
+ - Piece letter: P=Pawn, N=Knight, B=Bishop, R=Rook, Q=Queen, K=King
10
+ - Source and destination squares (e.g., e2e4)
11
+ - Special suffixes: (x)=capture, (+)=check, (+*)=checkmate, (o)/(O)=castling
12
+ """
13
+
14
+ from __future__ import annotations
15
+
16
+ import json
17
+ import os
18
+ from pathlib import Path
19
+ from typing import Dict, List, Optional
20
+
21
+ from transformers import PreTrainedTokenizer
22
+
23
+
24
+ class ChessTokenizer(PreTrainedTokenizer):
25
+ """
26
+ A custom tokenizer for chess moves using extended UCI notation.
27
+
28
+ This tokenizer maps each possible chess move to a unique token ID.
29
+ The vocabulary is built from the training dataset to ensure all moves
30
+ encountered during training have a corresponding token.
31
+
32
+ Example:
33
+ >>> tokenizer = ChessTokenizer()
34
+ >>> tokenizer.encode("WPe2e4 BPe7e5")
35
+ [1, 42, 87, 2] # [BOS, e2e4, e7e5, EOS]
36
+ """
37
+
38
+ model_input_names = ["input_ids", "attention_mask"]
39
+ vocab_files_names = {"vocab_file": "vocab.json"}
40
+
41
+ # Special tokens
42
+ PAD_TOKEN = "[PAD]"
43
+ BOS_TOKEN = "[BOS]"
44
+ EOS_TOKEN = "[EOS]"
45
+ UNK_TOKEN = "[UNK]"
46
+
47
+ def __init__(
48
+ self,
49
+ vocab_file: Optional[str] = None,
50
+ vocab: Optional[Dict[str, int]] = None,
51
+ **kwargs,
52
+ ):
53
+ """
54
+ Initialize the chess tokenizer.
55
+
56
+ Args:
57
+ vocab_file: Path to a JSON file containing the vocabulary mapping.
58
+ vocab: Dictionary mapping tokens to IDs (alternative to vocab_file).
59
+ **kwargs: Additional arguments passed to PreTrainedTokenizer.
60
+ """
61
+ # Initialize special tokens
62
+ self._pad_token = self.PAD_TOKEN
63
+ self._bos_token = self.BOS_TOKEN
64
+ self._eos_token = self.EOS_TOKEN
65
+ self._unk_token = self.UNK_TOKEN
66
+
67
+ # Remove any duplicate special-token entries passed through kwargs
68
+ # to avoid "multiple values for keyword" errors when loading from disk.
69
+ kwargs.pop("pad_token", None)
70
+ kwargs.pop("bos_token", None)
71
+ kwargs.pop("eos_token", None)
72
+ kwargs.pop("unk_token", None)
73
+
74
+ # Load or create vocabulary
75
+ if vocab is not None:
76
+ self._vocab = vocab
77
+ elif vocab_file is not None and os.path.exists(vocab_file):
78
+ with open(vocab_file, "r", encoding="utf-8") as f:
79
+ self._vocab = json.load(f)
80
+ else:
81
+ # Create a minimal vocabulary with just special tokens
82
+ # The full vocabulary should be built from the dataset
83
+ self._vocab = self._create_default_vocab()
84
+
85
+ # Create reverse mapping
86
+ self._ids_to_tokens = {v: k for k, v in self._vocab.items()}
87
+
88
+ # Call parent init AFTER setting up vocab
89
+ super().__init__(
90
+ pad_token=self._pad_token,
91
+ bos_token=self._bos_token,
92
+ eos_token=self._eos_token,
93
+ unk_token=self._unk_token,
94
+ **kwargs,
95
+ )
96
+
97
+ def _create_default_vocab(self) -> Dict[str, int]:
98
+ """
99
+ Create a minimal default vocabulary with just special tokens.
100
+
101
+ For the full vocabulary, use `build_vocab_from_dataset()`.
102
+ This minimal vocab is just a placeholder - you should build from data.
103
+ """
104
+ special_tokens = [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN]
105
+ vocab = {token: idx for idx, token in enumerate(special_tokens)}
106
+ return vocab
107
+
108
+ @classmethod
109
+ def build_vocab_from_iterator(
110
+ cls,
111
+ iterator,
112
+ min_frequency: int = 1,
113
+ ) -> "ChessTokenizer":
114
+ """
115
+ Build a tokenizer vocabulary from an iterator of game strings.
116
+
117
+ Args:
118
+ iterator: An iterator yielding game strings (space-separated moves).
119
+ min_frequency: Minimum frequency for a token to be included.
120
+
121
+ Returns:
122
+ A ChessTokenizer with the built vocabulary.
123
+ """
124
+ from collections import Counter
125
+
126
+ token_counts = Counter()
127
+
128
+ for game in iterator:
129
+ moves = game.strip().split()
130
+ token_counts.update(moves)
131
+
132
+ # Filter by frequency
133
+ tokens = [
134
+ token for token, count in token_counts.items()
135
+ if count >= min_frequency
136
+ ]
137
+
138
+ # Sort for reproducibility
139
+ tokens = sorted(tokens)
140
+
141
+ # Build vocabulary
142
+ special_tokens = [cls.PAD_TOKEN, cls.BOS_TOKEN, cls.EOS_TOKEN, cls.UNK_TOKEN]
143
+ vocab = {token: idx for idx, token in enumerate(special_tokens + tokens)}
144
+
145
+ return cls(vocab=vocab)
146
+
147
+ @classmethod
148
+ def build_vocab_from_dataset(
149
+ cls,
150
+ dataset_name: str = "dlouapre/lichess_2025-01_1M",
151
+ split: str = "train",
152
+ column: str = "text",
153
+ min_frequency: int = 500,
154
+ max_samples: Optional[int] = 100000,
155
+ ) -> "ChessTokenizer":
156
+ """
157
+ Build a tokenizer vocabulary from a Hugging Face dataset.
158
+
159
+ Args:
160
+ dataset_name: Name of the dataset on Hugging Face Hub.
161
+ split: Dataset split to use.
162
+ column: Column containing the game strings.
163
+ min_frequency: Minimum frequency for a token to be included (default: 500).
164
+ max_samples: Maximum number of samples to process (default: 100k).
165
+
166
+ Returns:
167
+ A ChessTokenizer with the built vocabulary.
168
+ """
169
+ from datasets import load_dataset
170
+
171
+ dataset = load_dataset(dataset_name, split=split)
172
+
173
+ if max_samples is not None:
174
+ dataset = dataset.select(range(min(max_samples, len(dataset))))
175
+
176
+ def game_iterator():
177
+ for example in dataset:
178
+ yield example[column]
179
+
180
+ return cls.build_vocab_from_iterator(game_iterator(), min_frequency=min_frequency)
181
+
182
+ @property
183
+ def vocab_size(self) -> int:
184
+ """Return the size of the vocabulary."""
185
+ return len(self._vocab)
186
+
187
+ def get_vocab(self) -> Dict[str, int]:
188
+ """Return the vocabulary as a dictionary."""
189
+ return dict(self._vocab)
190
+
191
+ def _tokenize(self, text: str) -> List[str]:
192
+ """
193
+ Tokenize a string of moves into a list of tokens.
194
+
195
+ Args:
196
+ text: A string of space-separated moves.
197
+
198
+ Returns:
199
+ List of move tokens.
200
+ """
201
+ return text.strip().split()
202
+
203
+ def _convert_token_to_id(self, token: str) -> int:
204
+ """Convert a token to its ID."""
205
+ return self._vocab.get(token, self._vocab.get(self.UNK_TOKEN, 0))
206
+
207
+ def _convert_id_to_token(self, index: int) -> str:
208
+ """Convert an ID to its token."""
209
+ return self._ids_to_tokens.get(index, self.UNK_TOKEN)
210
+
211
+ def convert_tokens_to_string(self, tokens: List[str]) -> str:
212
+ """Convert a list of tokens back to a string."""
213
+ # Filter out special tokens for cleaner output
214
+ special = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN}
215
+ return " ".join(t for t in tokens if t not in special)
216
+
217
+ def save_vocabulary(
218
+ self,
219
+ save_directory: str,
220
+ filename_prefix: Optional[str] = None,
221
+ ) -> tuple:
222
+ """
223
+ Save the vocabulary to a JSON file.
224
+
225
+ Args:
226
+ save_directory: Directory to save the vocabulary.
227
+ filename_prefix: Optional prefix for the filename.
228
+
229
+ Returns:
230
+ Tuple containing the path to the saved vocabulary file.
231
+ """
232
+ if not os.path.isdir(save_directory):
233
+ os.makedirs(save_directory, exist_ok=True)
234
+
235
+ vocab_file = os.path.join(
236
+ save_directory,
237
+ (filename_prefix + "-" if filename_prefix else "") + "vocab.json",
238
+ )
239
+
240
+ with open(vocab_file, "w", encoding="utf-8") as f:
241
+ json.dump(self._vocab, f, ensure_ascii=False, indent=2)
242
+
243
+ return (vocab_file,)
244
+
245
+
246
+ def count_vocab_from_dataset(
247
+ dataset_name: str = "dlouapre/lichess_2025-01_1M",
248
+ split: str = "train",
249
+ column: str = "text",
250
+ max_samples: Optional[int] = 10000,
251
+ ) -> Dict[str, int]:
252
+ """
253
+ Count token frequencies in a dataset (useful for vocabulary analysis).
254
+
255
+ Args:
256
+ dataset_name: Name of the dataset on Hugging Face Hub.
257
+ split: Dataset split to use.
258
+ column: Column containing the game strings.
259
+ max_samples: Maximum number of samples to process.
260
+
261
+ Returns:
262
+ Dictionary mapping tokens to their frequencies.
263
+ """
264
+ from collections import Counter
265
+ from datasets import load_dataset
266
+
267
+ dataset = load_dataset(dataset_name, split=split)
268
+
269
+ if max_samples is not None:
270
+ dataset = dataset.select(range(min(max_samples, len(dataset))))
271
+
272
+ token_counts = Counter()
273
+
274
+ for example in dataset:
275
+ moves = example[column].strip().split()
276
+ token_counts.update(moves)
277
+
278
+ return dict(token_counts)
tokenizer_config.json CHANGED
@@ -33,6 +33,12 @@
33
  "special": true
34
  }
35
  },
 
 
 
 
 
 
36
  "bos_token": "[BOS]",
37
  "clean_up_tokenization_spaces": false,
38
  "eos_token": "[EOS]",
@@ -41,4 +47,4 @@
41
  "pad_token": "[PAD]",
42
  "tokenizer_class": "ChessTokenizer",
43
  "unk_token": "[UNK]"
44
- }
 
33
  "special": true
34
  }
35
  },
36
+ "auto_map": {
37
+ "AutoTokenizer": [
38
+ "tokenizer.ChessTokenizer",
39
+ "tokenizer.ChessTokenizer"
40
+ ]
41
+ },
42
  "bos_token": "[BOS]",
43
  "clean_up_tokenization_spaces": false,
44
  "eos_token": "[EOS]",
 
47
  "pad_token": "[PAD]",
48
  "tokenizer_class": "ChessTokenizer",
49
  "unk_token": "[UNK]"
50
+ }
train.py ADDED
@@ -0,0 +1,290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Training script for the Chess Challenge.
3
+
4
+ This script provides a complete training pipeline using the Hugging Face Trainer.
5
+ Students can modify this script to experiment with different training strategies.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import argparse
11
+ import os
12
+ import warnings
13
+ from pathlib import Path
14
+
15
+ # Suppress warnings from third-party libraries (multiprocess has Python 3.14 compat issues)
16
+ warnings.filterwarnings("ignore", message="'return' in a 'finally' block")
17
+
18
+ import torch
19
+ from transformers import (
20
+ Trainer,
21
+ TrainingArguments,
22
+ set_seed,
23
+ )
24
+
25
+ from data import ChessDataCollator, create_train_val_datasets
26
+ from model import ChessConfig, ChessForCausalLM
27
+ from tokenizer import ChessTokenizer
28
+
29
+
30
+ def count_parameters(model, trainable_only=True):
31
+ """Count the number of parameters in a model."""
32
+ if trainable_only:
33
+ return sum(p.numel() for p in model.parameters() if p.requires_grad)
34
+ return sum(p.numel() for p in model.parameters())
35
+
36
+
37
+ def parse_args():
38
+ """Parse command line arguments."""
39
+ parser = argparse.ArgumentParser(
40
+ description="Train a chess-playing language model"
41
+ )
42
+
43
+ # Model arguments
44
+ parser.add_argument(
45
+ "--n_embd", type=int, default=128,
46
+ help="Embedding dimension"
47
+ )
48
+ parser.add_argument(
49
+ "--n_layer", type=int, default=4,
50
+ help="Number of transformer layers"
51
+ )
52
+ parser.add_argument(
53
+ "--n_head", type=int, default=4,
54
+ help="Number of attention heads"
55
+ )
56
+ parser.add_argument(
57
+ "--n_ctx", type=int, default=256,
58
+ help="Maximum context length"
59
+ )
60
+ parser.add_argument(
61
+ "--n_inner", type=int, default=None,
62
+ help="Feed-forward inner dimension (default: 4 * n_embd)"
63
+ )
64
+ parser.add_argument(
65
+ "--dropout", type=float, default=0.1,
66
+ help="Dropout probability"
67
+ )
68
+ parser.add_argument(
69
+ "--no_tie_weights", action="store_true",
70
+ help="Disable weight tying between embedding and output layers"
71
+ )
72
+
73
+ # Data arguments
74
+ parser.add_argument(
75
+ "--dataset_name", type=str, default="dlouapre/lichess_2025-01_1M",
76
+ help="Name of the dataset on Hugging Face Hub"
77
+ )
78
+ parser.add_argument(
79
+ "--max_train_samples", type=int, default=None,
80
+ help="Maximum number of training samples"
81
+ )
82
+ parser.add_argument(
83
+ "--val_samples", type=int, default=5000,
84
+ help="Number of validation samples"
85
+ )
86
+
87
+ # Training arguments
88
+ parser.add_argument(
89
+ "--output_dir", type=str, default="./output",
90
+ help="Output directory for model and logs"
91
+ )
92
+ parser.add_argument(
93
+ "--num_train_epochs", type=int, default=3,
94
+ help="Number of training epochs"
95
+ )
96
+ parser.add_argument(
97
+ "--per_device_train_batch_size", type=int, default=32,
98
+ help="Training batch size per device"
99
+ )
100
+ parser.add_argument(
101
+ "--per_device_eval_batch_size", type=int, default=64,
102
+ help="Evaluation batch size per device"
103
+ )
104
+ parser.add_argument(
105
+ "--learning_rate", type=float, default=5e-4,
106
+ help="Learning rate"
107
+ )
108
+ parser.add_argument(
109
+ "--weight_decay", type=float, default=0.01,
110
+ help="Weight decay"
111
+ )
112
+ parser.add_argument(
113
+ "--warmup_ratio", type=float, default=0.1,
114
+ help="Warmup ratio"
115
+ )
116
+ parser.add_argument(
117
+ "--seed", type=int, default=42,
118
+ help="Random seed"
119
+ )
120
+
121
+ # Logging arguments
122
+ parser.add_argument(
123
+ "--logging_steps", type=int, default=100,
124
+ help="Logging frequency"
125
+ )
126
+ parser.add_argument(
127
+ "--eval_steps", type=int, default=500,
128
+ help="Evaluation frequency"
129
+ )
130
+ parser.add_argument(
131
+ "--save_steps", type=int, default=1000,
132
+ help="Checkpoint saving frequency"
133
+ )
134
+
135
+ return parser.parse_args()
136
+
137
+
138
+ def main():
139
+ """Main training function."""
140
+ args = parse_args()
141
+
142
+ # Set seed for reproducibility
143
+ set_seed(args.seed)
144
+
145
+ print("=" * 60)
146
+ print("CHESS CHALLENGE - TRAINING")
147
+ print("=" * 60)
148
+
149
+ # Build tokenizer from dataset
150
+ print("\nBuilding tokenizer from dataset...")
151
+ tokenizer = ChessTokenizer.build_vocab_from_dataset(
152
+ dataset_name=args.dataset_name,
153
+ min_frequency=500, # Only keep moves that appear at least 500 times
154
+ max_samples=100000, # Use 100k games to build vocabulary
155
+ )
156
+ print(f" Vocabulary size: {tokenizer.vocab_size}")
157
+
158
+ # Use the vocab size from tokenizer (override args if provided)
159
+ actual_vocab_size = tokenizer.vocab_size
160
+
161
+ # Create model configuration
162
+ print("\nCreating model configuration...")
163
+ config = ChessConfig(
164
+ vocab_size=actual_vocab_size,
165
+ n_embd=args.n_embd,
166
+ n_layer=args.n_layer,
167
+ n_head=args.n_head,
168
+ n_ctx=args.n_ctx,
169
+ n_inner=args.n_inner,
170
+ dropout=args.dropout,
171
+ tie_weights=not args.no_tie_weights,
172
+ pad_token_id=tokenizer.pad_token_id,
173
+ bos_token_id=tokenizer.bos_token_id,
174
+ eos_token_id=tokenizer.eos_token_id,
175
+ )
176
+
177
+ # Print configuration
178
+ print(f"\nModel configuration:")
179
+ print(f" vocab_size: {config.vocab_size}")
180
+ print(f" n_embd: {config.n_embd}")
181
+ print(f" n_layer: {config.n_layer}")
182
+ print(f" n_head: {config.n_head}")
183
+ print(f" tie_weights: {config.tie_weights}")
184
+
185
+ # Create model
186
+ print("\nCreating model...")
187
+ model = ChessForCausalLM(config)
188
+ n_params = count_parameters(model)
189
+ print(f" Total parameters: {n_params:,}")
190
+
191
+ if n_params > 1_000_000:
192
+ print("WARNING: Model exceeds 1M parameter limit!")
193
+ else:
194
+ print("OK: Model is within 1M parameter limit")
195
+
196
+ # Load datasets
197
+ print("\nLoading datasets...")
198
+ train_dataset, val_dataset = create_train_val_datasets(
199
+ tokenizer=tokenizer,
200
+ dataset_name=args.dataset_name,
201
+ max_length=args.n_ctx,
202
+ train_samples=args.max_train_samples,
203
+ val_samples=args.val_samples,
204
+ )
205
+ print(f" Training samples: {len(train_dataset):,}")
206
+ print(f" Validation samples: {len(val_dataset):,}")
207
+
208
+ # Create data collator
209
+ data_collator = ChessDataCollator(tokenizer, max_length=args.n_ctx)
210
+
211
+ # Training arguments
212
+ training_args = TrainingArguments(
213
+ output_dir=args.output_dir,
214
+ num_train_epochs=args.num_train_epochs,
215
+ per_device_train_batch_size=args.per_device_train_batch_size,
216
+ per_device_eval_batch_size=args.per_device_eval_batch_size,
217
+ learning_rate=args.learning_rate,
218
+ weight_decay=args.weight_decay,
219
+ warmup_ratio=args.warmup_ratio,
220
+ logging_dir=os.path.join(args.output_dir, "logs"),
221
+ logging_steps=args.logging_steps,
222
+ eval_strategy="epoch",
223
+ save_strategy="epoch",
224
+ save_total_limit=3,
225
+ load_best_model_at_end=True,
226
+ metric_for_best_model="eval_loss",
227
+ greater_is_better=False,
228
+ seed=args.seed,
229
+ bf16=torch.cuda.is_available() and torch.cuda.is_bf16_supported(),
230
+ report_to=["none"],
231
+ )
232
+
233
+ # Create trainer
234
+ trainer = Trainer(
235
+ model=model,
236
+ args=training_args,
237
+ train_dataset=train_dataset,
238
+ eval_dataset=val_dataset,
239
+ data_collator=data_collator,
240
+ tokenizer=tokenizer,
241
+ )
242
+
243
+ # Train
244
+ print("\nStarting training...")
245
+ trainer.train()
246
+
247
+ # Save final model
248
+ print("\nSaving final model...")
249
+ final_model_dir = os.path.join(args.output_dir, "final_model")
250
+ trainer.save_model(final_model_dir)
251
+ tokenizer.save_pretrained(final_model_dir)
252
+
253
+ # Copy model.py and tokenizer.py for trust_remote_code loading
254
+ import shutil
255
+ import json
256
+ script_dir = Path(__file__).parent
257
+ shutil.copy(script_dir / "model.py", final_model_dir)
258
+ shutil.copy(script_dir / "tokenizer.py", final_model_dir)
259
+ print(" Copied model.py and tokenizer.py")
260
+
261
+ # Add auto_map to config.json for AutoModelForCausalLM
262
+ config_path = os.path.join(final_model_dir, "config.json")
263
+ with open(config_path) as f:
264
+ config_dict = json.load(f)
265
+ config_dict["auto_map"] = {
266
+ "AutoConfig": "model.ChessConfig",
267
+ "AutoModelForCausalLM": "model.ChessForCausalLM",
268
+ }
269
+ with open(config_path, "w") as f:
270
+ json.dump(config_dict, f, indent=2)
271
+ print(" Added auto_map to config.json")
272
+
273
+ # Add auto_map to tokenizer_config.json for AutoTokenizer
274
+ tokenizer_config_path = os.path.join(final_model_dir, "tokenizer_config.json")
275
+ with open(tokenizer_config_path) as f:
276
+ tokenizer_dict = json.load(f)
277
+ tokenizer_dict["auto_map"] = {
278
+ "AutoTokenizer": ["tokenizer.ChessTokenizer", None],
279
+ }
280
+ with open(tokenizer_config_path, "w") as f:
281
+ json.dump(tokenizer_dict, f, indent=2)
282
+ print(" Added auto_map to tokenizer_config.json")
283
+
284
+ print("\nTraining complete!")
285
+ print(f" Model saved to: {final_model_dir}")
286
+ print(" Ready for submission with: python submit.py --model_path " + final_model_dir)
287
+
288
+
289
+ if __name__ == "__main__":
290
+ main()