Coverage for transformer_lens/components/t5_block.py: 20%
64 statements
« prev ^ index » next coverage.py v7.10.1, created at 2026-04-30 01:33 +0000
« prev ^ index » next coverage.py v7.10.1, created at 2026-04-30 01:33 +0000
1from typing import Optional
3import torch
4import torch.nn as nn
5from jaxtyping import Float
7from transformer_lens.cache.key_value_cache_entry import (
8 TransformerLensKeyValueCacheEntry,
9)
10from transformer_lens.components import RMSNorm, T5Attention
11from transformer_lens.config.HookedTransformerConfig import HookedTransformerConfig
12from transformer_lens.factories.mlp_factory import MLPFactory
13from transformer_lens.hook_points import HookPoint
14from transformer_lens.utilities import repeat_along_head_dimension
17class T5Block(nn.Module):
18 """
19 T5 decoder Block. Uses T5Layernorm, and T5attention insted of usual ones.
20 Also uses cross attention if is_decoder is True.
21 """
23 def __init__(self, cfg: HookedTransformerConfig, block_index: int, is_decoder: bool):
24 super().__init__()
25 self.cfg = cfg
26 self.is_decoder = is_decoder
28 self.ln1 = RMSNorm(cfg)
29 self.attn = T5Attention(cfg, has_relative_attention_bias=block_index == 0)
30 self.ln2 = RMSNorm(cfg)
31 if self.is_decoder:
32 self.cross_attn = T5Attention(cfg)
33 self.ln3 = RMSNorm(cfg)
34 self.mlp = MLPFactory.create_mlp(self.cfg) # [batch, pos, n_heads]
36 self.hook_q_input = HookPoint() # [batch, pos, n_heads, d_model]
37 self.hook_k_input = HookPoint() # [batch, pos, n_heads, d_model]
38 self.hook_v_input = HookPoint() # [batch, pos, n_heads, d_model]
40 self.hook_attn_in = HookPoint() # [batch, pos, d_model]
41 self.hook_attn_out = HookPoint() # [batch, pos, d_model]
42 if self.is_decoder:
43 self.hook_cross_attn_in = HookPoint() # [batch, pos, d_model]
44 self.hook_cross_attn_out = HookPoint() # [batch, pos, d_model]
45 self.hook_resid_mid_cross = HookPoint() # [batch, pos, d_model]
47 self.hook_mlp_in = HookPoint() # [batch, pos, d_model]
48 self.hook_mlp_out = HookPoint() # [batch, pos, d_model]
49 self.hook_resid_pre = HookPoint() # [batch, pos, d_model]
50 self.hook_resid_mid = HookPoint() # [batch, pos, d_model]
51 self.hook_resid_post = HookPoint() # [batch, pos, d_model]
53 def forward(
54 self,
55 resid_pre: Float[torch.Tensor, "batch pos d_model"],
56 additive_attention_mask: Optional[Float[torch.Tensor, "batch 1 1 pos"]] = None,
57 encoder_additive_attention_mask: Optional[
58 Float[torch.Tensor, "batch 1 1 encoder_pos"]
59 ] = None,
60 position_bias: Optional[Float[torch.Tensor, "1 head_index pos kv_pos"]] = None,
61 encoder_hidden_states: Optional[Float[torch.Tensor, "batch encoder_pos d_model"]] = None,
62 past_kv_cache_entry: Optional[TransformerLensKeyValueCacheEntry] = None,
63 ) -> Float[torch.Tensor, "batch pos d_model"]:
64 """A single Transformer block.
66 Args:
67 resid_pre (torch.Tensor): The residual stream - shape [batch, pos, d_model]
68 encoder_hidden_states (torch.Tensor): The hidden states of the encoder for cross attention - shape [batch, encoder_pos, d_model]
69 cache (TransformerLensKeyValueCache): A cache of previous keys and values, used only when generating text. Defaults to None.
70 attention_mask (torch.Tensor, optional): The attention mask for padded tokens. Defaults to None.
72 Returns:
73 _type_: _description_
74 """
75 resid_pre = self.hook_resid_pre(resid_pre) # [batch, pos, d_model]
77 attn_in = resid_pre
79 if self.cfg.use_attn_in:
80 attn_in = self.hook_attn_in(
81 repeat_along_head_dimension(resid_pre, n_heads=self.cfg.n_heads)
82 )
84 if self.cfg.use_split_qkv_input:
85 n_kv_heads = (
86 self.cfg.n_key_value_heads
87 if self.cfg.n_key_value_heads is not None
88 else self.cfg.n_heads
89 )
90 query_input = self.hook_q_input(
91 repeat_along_head_dimension(resid_pre, n_heads=self.cfg.n_heads)
92 )
93 key_input = self.hook_k_input(
94 repeat_along_head_dimension(resid_pre, n_heads=n_kv_heads)
95 )
96 value_input = self.hook_v_input(
97 repeat_along_head_dimension(resid_pre, n_heads=n_kv_heads)
98 )
99 else:
100 query_input = attn_in
101 key_input = attn_in
102 value_input = attn_in
104 attn_out = self.hook_attn_out(
105 # hook the residual stream states that are used to calculate the
106 # queries, keys and values, independently.
107 # Then take the layer norm of these inputs, and pass these to the attention module.
108 self.attn(
109 query_input=self.ln1(query_input),
110 key_input=self.ln1(key_input),
111 value_input=self.ln1(value_input),
112 past_kv_cache_entry=past_kv_cache_entry,
113 additive_attention_mask=additive_attention_mask,
114 position_bias=position_bias,
115 )
116 )
118 # [batch, pos, d_model]
120 resid_mid = self.hook_resid_mid(resid_pre + attn_out) # [batch, pos, d_model]
122 if self.is_decoder:
123 cross_attn_in = (
124 resid_mid
125 if not self.cfg.use_attn_in
126 else self.hook_cross_attn_in(resid_mid.clone())
127 )
129 if encoder_hidden_states is None:
130 raise ValueError("Encoder hidden states must be provided for cross attention!")
132 cross_attn_out = self.hook_cross_attn_out(
133 self.cross_attn(
134 query_input=self.ln2(cross_attn_in),
135 key_input=encoder_hidden_states,
136 value_input=encoder_hidden_states,
137 additive_attention_mask=encoder_additive_attention_mask,
138 )
139 )
140 resid_mid_cross = self.hook_resid_mid_cross(resid_mid + cross_attn_out)
142 mlp_in = (
143 resid_mid_cross
144 if not self.cfg.use_hook_mlp_in
145 else self.hook_mlp_in(resid_mid_cross.clone())
146 )
148 normalized_resid_mid = self.ln3(mlp_in)
149 else:
150 mlp_in = (
151 resid_mid if not self.cfg.use_hook_mlp_in else self.hook_mlp_in(resid_mid.clone())
152 )
153 normalized_resid_mid = self.ln2(mlp_in)
155 mlp_out = self.hook_mlp_out(self.mlp(normalized_resid_mid)) # [batch, pos, d_model]
156 resid_post = self.hook_resid_post(mlp_in + mlp_out) # [batch, pos, d_model]
158 return resid_post