Coverage for transformer_lens/components/bert_block.py: 89%
45 statements
« prev ^ index » next coverage.py v7.4.4, created at 2024-11-19 14:42 +0000
« prev ^ index » next coverage.py v7.4.4, created at 2024-11-19 14:42 +0000
1"""Hooked Transformer Bert Block Component.
3This module contains all the component :class:`BertBlock`.
4"""
5from typing import Optional
7import torch
8import torch.nn as nn
9from jaxtyping import Float
11from transformer_lens.components import Attention, LayerNorm
12from transformer_lens.factories.mlp_factory import MLPFactory
13from transformer_lens.hook_points import HookPoint
14from transformer_lens.HookedTransformerConfig import HookedTransformerConfig
15from transformer_lens.utils import repeat_along_head_dimension
18class BertBlock(nn.Module):
19 """
20 BERT Block. Similar to the TransformerBlock, except that the LayerNorms are applied after the attention and MLP, rather than before.
21 """
23 def __init__(self, cfg: HookedTransformerConfig):
24 super().__init__()
25 self.cfg = cfg
27 self.attn = Attention(cfg)
28 self.ln1 = LayerNorm(cfg)
29 self.mlp = MLPFactory.create_mlp(self.cfg)
30 self.ln2 = LayerNorm(cfg)
32 self.hook_q_input = HookPoint() # [batch, pos, n_heads, d_model]
33 self.hook_k_input = HookPoint() # [batch, pos, n_heads, d_model]
34 self.hook_v_input = HookPoint() # [batch, pos, n_heads, d_model]
36 self.hook_attn_out = HookPoint() # [batch, pos, d_model]
37 self.hook_mlp_in = HookPoint() # [batch, pos, d_model]
38 self.hook_mlp_out = HookPoint() # [batch, pos, d_model]
39 self.hook_resid_pre = HookPoint() # [batch, pos, d_model]
40 self.hook_resid_mid = HookPoint() # [batch, pos, d_model]
41 self.hook_resid_post = HookPoint() # [batch, pos, d_model]
42 self.hook_normalized_resid_post = HookPoint() # [batch, pos, d_model]
44 def forward(
45 self,
46 resid_pre: Float[torch.Tensor, "batch pos d_model"],
47 additive_attention_mask: Optional[Float[torch.Tensor, "batch 1 1 pos"]] = None,
48 ):
49 resid_pre = self.hook_resid_pre(resid_pre)
51 query_input = resid_pre
52 key_input = resid_pre
53 value_input = resid_pre
55 if self.cfg.use_split_qkv_input: 55 ↛ 56line 55 didn't jump to line 56, because the condition on line 55 was never true
56 n_heads = self.cfg.n_heads
57 query_input = self.hook_q_input(repeat_along_head_dimension(query_input, n_heads))
58 key_input = self.hook_k_input(repeat_along_head_dimension(key_input, n_heads))
59 value_input = self.hook_v_input(repeat_along_head_dimension(value_input, n_heads))
61 attn_out = self.hook_attn_out(
62 self.attn(
63 query_input,
64 key_input,
65 value_input,
66 additive_attention_mask=additive_attention_mask,
67 )
68 )
69 resid_mid = self.hook_resid_mid(resid_pre + attn_out)
71 mlp_in = resid_mid if not self.cfg.use_hook_mlp_in else self.hook_mlp_in(resid_mid.clone())
72 normalized_resid_mid = self.ln1(mlp_in)
73 mlp_out = self.hook_mlp_out(self.mlp(normalized_resid_mid))
74 resid_post = self.hook_resid_post(normalized_resid_mid + mlp_out)
75 normalized_resid_post = self.hook_normalized_resid_post(self.ln2(resid_post))
77 return normalized_resid_post