Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 16 additions & 5 deletions src/modeling/transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,13 @@ def __init__(self, vocab_size, embed_size, num_layers, num_heads, ff_dim, block_
self.norm = nn.LayerNorm(embed_size) # Final layer normalization
self.head = nn.Linear(embed_size, vocab_size) # Linear layer for output logits
self.block_size = block_size # Maximum sequence length

# Causal mask to ensure autoregressive property (tokens don't attend to future tokens)
self.register_buffer(
"causal_mask",
torch.tril(torch.ones(block_size, block_size)).view(1, 1, block_size, block_size)
)

self.apply(self._init_weights) # Initialize weights

# Initialize weights for Linear and Embedding layers
Expand All @@ -98,15 +105,19 @@ def _init_weights(self, module):

def forward(self, x, targets=None):
B, T = x.shape # Batch size and sequence length
T = min(T, self.block_size) # Ensure sequence length doesn't exceed block_size
pos = torch.arange(0, T, dtype=torch.long, device=x.device).unsqueeze(0) # Positional indices

tok_emb = self.embed(x) # Token embeddings
T = min(T, self.block_size)
x = x[:, :T]
pos = torch.arange(0, T, dtype=torch.long, device=x.device).unsqueeze(0)

tok_emb = self.embed(x)
pos_emb = self.pos_embed(pos) # Positional embeddings
x = tok_emb + pos_emb # Combine token and positional embeddings

# Apply causal mask
mask = self.causal_mask[:, :, :T, :T]

for layer in self.layers:
x = layer(x) # Pass through each transformer block
x = layer(x, mask=mask) # Pass through each transformer block with mask

x = self.norm(x) # Apply final layer normalization
logits = self.head(x) # Compute output logits
Expand Down
48 changes: 48 additions & 0 deletions src/tests/test_causality.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
import torch
import unittest
from src.modeling.transformer import TransformerModel

class TestCausality(unittest.TestCase):
def setUp(self):
self.vocab_size = 100
self.embed_size = 64
self.num_layers = 1
self.num_heads = 2
self.ff_dim = 128
self.block_size = 32
self.dropout = 0.0

self.model = TransformerModel(
vocab_size=self.vocab_size,
embed_size=self.embed_size,
num_layers=self.num_layers,
num_heads=self.num_heads,
ff_dim=self.ff_dim,
block_size=self.block_size,
dropout=self.dropout
).eval()

def test_causality(self):
"""
Changing a token at position t should NOT affect the logits at positions < t.
"""
seq_len = 10
x1 = torch.randint(0, self.vocab_size, (1, seq_len))

# Create x2 identical to x1 up to position 4, but different at position 5
x2 = x1.clone()
x2[0, 5] = (x1[0, 5] + 1) % self.vocab_size

with torch.no_grad():
logits1, _ = self.model(x1)
logits2, _ = self.model(x2)

# Logits at positions 0 to 4 should be identical
torch.testing.assert_close(logits1[:, :5, :], logits2[:, :5, :], rtol=1e-5, atol=1e-5)

# Logits at position 5 onwards can (and likely will) differ
# (Technically, at position 5 they should also be different if the model is non-trivial)
self.assertFalse(torch.allclose(logits1[:, 5:, :], logits2[:, 5:, :]))

if __name__ == "__main__":
unittest.main()
2 changes: 1 addition & 1 deletion src/training/utils/checkpoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import os
from datetime import datetime

def save_checkpoint(model, optimizer, step, config):
def save_checkpoint(model, optimizer, step, config, path=None):
checkpoint_dir = f"models/checkpoints/{config['model']['name']}"
os.makedirs(checkpoint_dir, exist_ok=True)

Expand Down