Skip to content

Commit

Permalink
Rename variable to context_length to make it easier on readers (rasbt…
Browse files Browse the repository at this point in the history
…#106)

* rename to context length

* fix spacing
  • Loading branch information
rasbt authored Apr 4, 2024
1 parent a940373 commit 2de60d1
Show file tree
Hide file tree
Showing 25 changed files with 242 additions and 242 deletions.
24 changes: 12 additions & 12 deletions appendix-D/01_main-chapter-code/appendix-D.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -61,13 +61,13 @@
"from previous_chapters import GPTModel\n",
"\n",
"GPT_CONFIG_124M = {\n",
" \"vocab_size\": 50257, # Vocabulary size\n",
" \"ctx_len\": 256, # Shortened context length (orig: 1024)\n",
" \"emb_dim\": 768, # Embedding dimension\n",
" \"n_heads\": 12, # Number of attention heads\n",
" \"n_layers\": 12, # Number of layers\n",
" \"drop_rate\": 0.1, # Dropout rate\n",
" \"qkv_bias\": False # Query-key-value bias\n",
" \"vocab_size\": 50257, # Vocabulary size\n",
" \"context_length\": 256, # Shortened context length (orig: 1024)\n",
" \"emb_dim\": 768, # Embedding dimension\n",
" \"n_heads\": 12, # Number of attention heads\n",
" \"n_layers\": 12, # Number of layers\n",
" \"drop_rate\": 0.1, # Dropout rate\n",
" \"qkv_bias\": False # Query-key-value bias\n",
"}\n",
"\n",
"device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
Expand Down Expand Up @@ -127,17 +127,17 @@
"train_loader = create_dataloader_v1(\n",
" text_data[:split_idx],\n",
" batch_size=2,\n",
" max_length=GPT_CONFIG_124M[\"ctx_len\"],\n",
" stride=GPT_CONFIG_124M[\"ctx_len\"],\n",
" max_length=GPT_CONFIG_124M[\"context_length\"],\n",
" stride=GPT_CONFIG_124M[\"context_length\"],\n",
" drop_last=True,\n",
" shuffle=True\n",
")\n",
"\n",
"val_loader = create_dataloader_v1(\n",
" text_data[split_idx:],\n",
" batch_size=2,\n",
" max_length=GPT_CONFIG_124M[\"ctx_len\"],\n",
" stride=GPT_CONFIG_124M[\"ctx_len\"],\n",
" max_length=GPT_CONFIG_124M[\"context_length\"],\n",
" stride=GPT_CONFIG_124M[\"context_length\"],\n",
" drop_last=False,\n",
" shuffle=False\n",
")"
Expand Down Expand Up @@ -755,7 +755,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.4"
"version": "3.10.6"
}
},
"nbformat": 4,
Expand Down
6 changes: 3 additions & 3 deletions appendix-D/01_main-chapter-code/previous_chapters.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def create_dataloader_v1(txt, batch_size=4, max_length=256,
#####################################

class MultiHeadAttention(nn.Module):
def __init__(self, d_in, d_out, block_size, dropout, num_heads, qkv_bias=False):
def __init__(self, d_in, d_out, context_length, dropout, num_heads, qkv_bias=False):
super().__init__()
assert d_out % num_heads == 0, "d_out must be divisible by n_heads"

Expand All @@ -74,7 +74,7 @@ def __init__(self, d_in, d_out, block_size, dropout, num_heads, qkv_bias=False):
self.W_value = nn.Linear(d_in, d_out, bias=qkv_bias)
self.out_proj = nn.Linear(d_out, d_out) # Linear layer to combine head outputs
self.dropout = nn.Dropout(dropout)
self.register_buffer('mask', torch.triu(torch.ones(block_size, block_size), diagonal=1))
self.register_buffer('mask', torch.triu(torch.ones(context_length, context_length), diagonal=1))

def forward(self, x):
b, num_tokens, d_in = x.shape
Expand Down Expand Up @@ -164,7 +164,7 @@ def __init__(self, cfg):
self.att = MultiHeadAttention(
d_in=cfg["emb_dim"],
d_out=cfg["emb_dim"],
block_size=cfg["ctx_len"],
context_length=cfg["ctx_len"],
num_heads=cfg["n_heads"],
dropout=cfg["drop_rate"],
qkv_bias=cfg["qkv_bias"])
Expand Down
6 changes: 3 additions & 3 deletions ch02/01_main-chapter-code/ch02.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -1772,8 +1772,8 @@
"metadata": {},
"outputs": [],
"source": [
"block_size = max_length\n",
"pos_embedding_layer = torch.nn.Embedding(block_size, output_dim)"
"context_length = max_length\n",
"pos_embedding_layer = torch.nn.Embedding(context_length, output_dim)"
]
},
{
Expand Down Expand Up @@ -1874,7 +1874,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.12"
"version": "3.10.6"
}
},
"nbformat": 4,
Expand Down
6 changes: 3 additions & 3 deletions ch02/01_main-chapter-code/dataloader.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -87,11 +87,11 @@
"\n",
"vocab_size = 50257\n",
"output_dim = 256\n",
"block_size = 1024\n",
"context_length = 1024\n",
"\n",
"\n",
"token_embedding_layer = torch.nn.Embedding(vocab_size, output_dim)\n",
"pos_embedding_layer = torch.nn.Embedding(block_size, output_dim)\n",
"pos_embedding_layer = torch.nn.Embedding(context_length, output_dim)\n",
"\n",
"max_length = 4\n",
"dataloader = create_dataloader_v1(raw_text, batch_size=8, max_length=max_length, stride=max_length)"
Expand Down Expand Up @@ -150,7 +150,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.4"
"version": "3.10.6"
}
},
"nbformat": 4,
Expand Down
4 changes: 2 additions & 2 deletions ch02/01_main-chapter-code/exercise-solutions.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -294,9 +294,9 @@
"vocab_size = 50257\n",
"output_dim = 256\n",
"max_len = 4\n",
"block_size = max_len\n",
"context_length = max_len\n",
"\n",
"token_embedding_layer = torch.nn.Embedding(block_size, output_dim)\n",
"token_embedding_layer = torch.nn.Embedding(context_length, output_dim)\n",
"pos_embedding_layer = torch.nn.Embedding(vocab_size, output_dim)"
]
},
Expand Down
30 changes: 15 additions & 15 deletions ch03/01_main-chapter-code/ch03.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -1275,8 +1275,8 @@
}
],
"source": [
"block_size = attn_scores.shape[0]\n",
"mask_simple = torch.tril(torch.ones(block_size, block_size))\n",
"context_length = attn_scores.shape[0]\n",
"mask_simple = torch.tril(torch.ones(context_length, context_length))\n",
"print(mask_simple)"
]
},
Expand Down Expand Up @@ -1395,7 +1395,7 @@
}
],
"source": [
"mask = torch.triu(torch.ones(block_size, block_size), diagonal=1)\n",
"mask = torch.triu(torch.ones(context_length, context_length), diagonal=1)\n",
"masked = attn_scores.masked_fill(mask.bool(), -torch.inf)\n",
"print(masked)"
]
Expand Down Expand Up @@ -1598,14 +1598,14 @@
"source": [
"class CausalAttention(nn.Module):\n",
"\n",
" def __init__(self, d_in, d_out, block_size, dropout, qkv_bias=False):\n",
" def __init__(self, d_in, d_out, context_length, dropout, qkv_bias=False):\n",
" super().__init__()\n",
" self.d_out = d_out\n",
" self.W_query = nn.Linear(d_in, d_out, bias=qkv_bias)\n",
" self.W_key = nn.Linear(d_in, d_out, bias=qkv_bias)\n",
" self.W_value = nn.Linear(d_in, d_out, bias=qkv_bias)\n",
" self.dropout = nn.Dropout(dropout) # New\n",
" self.register_buffer('mask', torch.triu(torch.ones(block_size, block_size), diagonal=1)) # New\n",
" self.register_buffer('mask', torch.triu(torch.ones(context_length, context_length), diagonal=1)) # New\n",
"\n",
" def forward(self, x):\n",
" b, num_tokens, d_in = x.shape # New batch dimension b\n",
Expand All @@ -1624,8 +1624,8 @@
"\n",
"torch.manual_seed(123)\n",
"\n",
"block_size = batch.shape[1]\n",
"ca = CausalAttention(d_in, d_out, block_size, 0.0)\n",
"context_length = batch.shape[1]\n",
"ca = CausalAttention(d_in, d_out, context_length, 0.0)\n",
"\n",
"context_vecs = ca(batch)\n",
"\n",
Expand Down Expand Up @@ -1713,10 +1713,10 @@
"source": [
"class MultiHeadAttentionWrapper(nn.Module):\n",
"\n",
" def __init__(self, d_in, d_out, block_size, dropout, num_heads, qkv_bias=False):\n",
" def __init__(self, d_in, d_out, context_length, dropout, num_heads, qkv_bias=False):\n",
" super().__init__()\n",
" self.heads = nn.ModuleList(\n",
" [CausalAttention(d_in, d_out, block_size, dropout, qkv_bias) \n",
" [CausalAttention(d_in, d_out, context_length, dropout, qkv_bias) \n",
" for _ in range(num_heads)]\n",
" )\n",
"\n",
Expand All @@ -1726,9 +1726,9 @@
"\n",
"torch.manual_seed(123)\n",
"\n",
"block_size = batch.shape[1] # This is the number of tokens\n",
"context_length = batch.shape[1] # This is the number of tokens\n",
"d_in, d_out = 3, 2\n",
"mha = MultiHeadAttentionWrapper(d_in, d_out, block_size, 0.0, num_heads=2)\n",
"mha = MultiHeadAttentionWrapper(d_in, d_out, context_length, 0.0, num_heads=2)\n",
"\n",
"context_vecs = mha(batch)\n",
"\n",
Expand Down Expand Up @@ -1792,7 +1792,7 @@
],
"source": [
"class MultiHeadAttention(nn.Module):\n",
" def __init__(self, d_in, d_out, block_size, dropout, num_heads, qkv_bias=False):\n",
" def __init__(self, d_in, d_out, context_length, dropout, num_heads, qkv_bias=False):\n",
" super().__init__()\n",
" assert d_out % num_heads == 0, \"d_out must be divisible by num_heads\"\n",
"\n",
Expand All @@ -1805,7 +1805,7 @@
" self.W_value = nn.Linear(d_in, d_out, bias=qkv_bias)\n",
" self.out_proj = nn.Linear(d_out, d_out) # Linear layer to combine head outputs\n",
" self.dropout = nn.Dropout(dropout)\n",
" self.register_buffer('mask', torch.triu(torch.ones(block_size, block_size), diagonal=1))\n",
" self.register_buffer('mask', torch.triu(torch.ones(context_length, context_length), diagonal=1))\n",
"\n",
" def forward(self, x):\n",
" b, num_tokens, d_in = x.shape\n",
Expand Down Expand Up @@ -1848,9 +1848,9 @@
"\n",
"torch.manual_seed(123)\n",
"\n",
"batch_size, block_size, d_in = batch.shape\n",
"batch_size, context_length, d_in = batch.shape\n",
"d_out = 2\n",
"mha = MultiHeadAttention(d_in, d_out, block_size, 0.0, num_heads=2)\n",
"mha = MultiHeadAttention(d_in, d_out, context_length, 0.0, num_heads=2)\n",
"\n",
"context_vecs = mha(batch)\n",
"\n",
Expand Down
6 changes: 3 additions & 3 deletions ch03/01_main-chapter-code/exercise-solutions.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,7 @@
"torch.manual_seed(123)\n",
"\n",
"d_out = 1\n",
"mha = MultiHeadAttentionWrapper(d_in, d_out, block_size, 0.0, num_heads=2)\n",
"mha = MultiHeadAttentionWrapper(d_in, d_out, context_length, 0.0, num_heads=2)\n",
"\n",
"context_vecs = mha(batch)\n",
"\n",
Expand Down Expand Up @@ -247,11 +247,11 @@
"metadata": {},
"source": [
"```python\n",
"block_size = 1024\n",
"context_length = 1024\n",
"d_in, d_out = 768, 768\n",
"num_heads = 12\n",
"\n",
"mha = MultiHeadAttention(d_in, d_out, block_size, 0.0, num_heads)\n",
"mha = MultiHeadAttention(d_in, d_out, context_length, 0.0, num_heads)\n",
"```"
]
},
Expand Down
26 changes: 13 additions & 13 deletions ch03/01_main-chapter-code/multihead-attention.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -116,11 +116,11 @@
"vocab_size = 50257\n",
"output_dim = 256\n",
"max_len = 1024\n",
"block_size = max_len\n",
"context_length = max_len\n",
"\n",
"\n",
"token_embedding_layer = nn.Embedding(vocab_size, output_dim)\n",
"pos_embedding_layer = torch.nn.Embedding(block_size, output_dim)\n",
"pos_embedding_layer = torch.nn.Embedding(context_length, output_dim)\n",
"\n",
"max_length = 4\n",
"dataloader = create_dataloader(raw_text, batch_size=8, max_length=max_length, stride=max_length)"
Expand Down Expand Up @@ -187,14 +187,14 @@
"source": [
"class CausalSelfAttention(nn.Module):\n",
"\n",
" def __init__(self, d_in, d_out, block_size, dropout, qkv_bias=False):\n",
" def __init__(self, d_in, d_out, context_length, dropout, qkv_bias=False):\n",
" super().__init__()\n",
" self.d_out = d_out\n",
" self.W_query = nn.Linear(d_in, d_out, bias=qkv_bias)\n",
" self.W_key = nn.Linear(d_in, d_out, bias=qkv_bias)\n",
" self.W_value = nn.Linear(d_in, d_out, bias=qkv_bias)\n",
" self.dropout = nn.Dropout(dropout) # New\n",
" self.register_buffer('mask', torch.triu(torch.ones(block_size, block_size), diagonal=1)) # New\n",
" self.register_buffer('mask', torch.triu(torch.ones(context_length, context_length), diagonal=1)) # New\n",
"\n",
" def forward(self, x):\n",
" b, n_tokens, d_in = x.shape # New batch dimension b\n",
Expand All @@ -213,10 +213,10 @@
"\n",
"\n",
"class MultiHeadAttentionWrapper(nn.Module):\n",
" def __init__(self, d_in, d_out, block_size, dropout, num_heads, qkv_bias=False):\n",
" def __init__(self, d_in, d_out, context_length, dropout, num_heads, qkv_bias=False):\n",
" super().__init__()\n",
" self.heads = nn.ModuleList(\n",
" [CausalSelfAttention(d_in, d_out, block_size, dropout, qkv_bias) \n",
" [CausalSelfAttention(d_in, d_out, context_length, dropout, qkv_bias) \n",
" for _ in range(num_heads)]\n",
" )\n",
" self.out_proj = nn.Linear(d_out*num_heads, d_out*num_heads)\n",
Expand All @@ -243,13 +243,13 @@
"source": [
"torch.manual_seed(123)\n",
"\n",
"block_size = max_length\n",
"context_length = max_length\n",
"d_in = output_dim\n",
"\n",
"num_heads=2\n",
"d_out = d_in // num_heads\n",
"\n",
"mha = MultiHeadAttentionWrapper(d_in, d_out, block_size, 0.0, num_heads)\n",
"mha = MultiHeadAttentionWrapper(d_in, d_out, context_length, 0.0, num_heads)\n",
"\n",
"batch = input_embeddings\n",
"context_vecs = mha(batch)\n",
Expand All @@ -273,7 +273,7 @@
"outputs": [],
"source": [
"class MultiHeadAttention(nn.Module):\n",
" def __init__(self, d_in, d_out, block_size, dropout, num_heads, qkv_bias=False):\n",
" def __init__(self, d_in, d_out, context_length, dropout, num_heads, qkv_bias=False):\n",
" super().__init__()\n",
" assert d_out % num_heads == 0, \"d_out must be divisible by num_heads\"\n",
"\n",
Expand All @@ -286,7 +286,7 @@
" self.W_value = nn.Linear(d_in, d_out, bias=qkv_bias)\n",
" self.out_proj = nn.Linear(d_out, d_out) # Linear layer to combine head outputs\n",
" self.dropout = nn.Dropout(dropout)\n",
" self.register_buffer('mask', torch.triu(torch.ones(block_size, block_size), diagonal=1))\n",
" self.register_buffer('mask', torch.triu(torch.ones(context_length, context_length), diagonal=1))\n",
"\n",
" def forward(self, x):\n",
" b, num_tokens, d_in = x.shape\n",
Expand Down Expand Up @@ -345,11 +345,11 @@
"source": [
"torch.manual_seed(123)\n",
"\n",
"block_size = max_length\n",
"context_length = max_length\n",
"d_in = output_dim\n",
"d_out = d_in\n",
"\n",
"mha = MultiHeadAttention(d_in, d_out, block_size, 0.0, num_heads=2)\n",
"mha = MultiHeadAttention(d_in, d_out, context_length, 0.0, num_heads=2)\n",
"\n",
"batch = input_embeddings\n",
"context_vecs = mha(batch)\n",
Expand All @@ -374,7 +374,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.12"
"version": "3.10.6"
}
},
"nbformat": 4,
Expand Down
Loading

0 comments on commit 2de60d1

Please sign in to comment.