Coverage for transformer_lens/components/unembed.py: 100%
14 statements
« prev ^ index » next coverage.py v7.4.4, created at 2024-12-14 00:54 +0000
« prev ^ index » next coverage.py v7.4.4, created at 2024-12-14 00:54 +0000
1"""Hooked Transformer Unembed Component.
3This module contains all the component :class:`Unembed`.
4"""
6from typing import Dict, Union
8import torch
9import torch.nn as nn
10from jaxtyping import Float
12from transformer_lens.HookedTransformerConfig import HookedTransformerConfig
13from transformer_lens.utilities.addmm import batch_addmm
16class Unembed(nn.Module):
17 def __init__(self, cfg: Union[Dict, HookedTransformerConfig]):
18 super().__init__()
19 self.cfg = HookedTransformerConfig.unwrap(cfg)
20 # Note that there's a separate variable for d_vocab_out and d_vocab (the input vocab size). For language tasks these are always the same, but for algorithmic tasks we may want them to be different.
21 self.W_U: Float[torch.Tensor, "d_model d_vocab_out"] = nn.Parameter(
22 torch.empty(self.cfg.d_model, self.cfg.d_vocab_out, dtype=self.cfg.dtype)
23 )
24 self.b_U: Float[torch.Tensor, "d_vocab_out"] = nn.Parameter(
25 torch.zeros(self.cfg.d_vocab_out, dtype=self.cfg.dtype)
26 )
28 def forward(
29 self, residual: Float[torch.Tensor, "batch pos d_model"]
30 ) -> Float[torch.Tensor, "batch pos d_vocab_out"]:
31 return batch_addmm(self.b_U, self.W_U, residual)