Coverage for transformer_lens/pretrained/weight_conversions/gptj.py: 13%
36 statements
« prev ^ index » next coverage.py v7.4.4, created at 2024-11-19 14:42 +0000
« prev ^ index » next coverage.py v7.4.4, created at 2024-11-19 14:42 +0000
1import einops
2import torch
4from transformer_lens.HookedTransformerConfig import HookedTransformerConfig
7def convert_gptj_weights(gptj, cfg: HookedTransformerConfig):
8 state_dict = {}
10 state_dict["embed.W_E"] = gptj.transformer.wte.weight
12 for l in range(cfg.n_layers):
13 state_dict[f"blocks.{l}.ln1.w"] = gptj.transformer.h[l].ln_1.weight
14 state_dict[f"blocks.{l}.ln1.b"] = gptj.transformer.h[l].ln_1.bias
16 W_Q = gptj.transformer.h[l].attn.q_proj.weight
17 W_K = gptj.transformer.h[l].attn.k_proj.weight
18 W_V = gptj.transformer.h[l].attn.v_proj.weight
19 W_Q = einops.rearrange(W_Q, "(i h) m->i m h", i=cfg.n_heads)
20 W_K = einops.rearrange(W_K, "(i h) m->i m h", i=cfg.n_heads)
21 W_V = einops.rearrange(W_V, "(i h) m->i m h", i=cfg.n_heads)
22 state_dict[f"blocks.{l}.attn.W_Q"] = W_Q
23 state_dict[f"blocks.{l}.attn.W_K"] = W_K
24 state_dict[f"blocks.{l}.attn.W_V"] = W_V
26 state_dict[f"blocks.{l}.attn.b_Q"] = torch.zeros(cfg.n_heads, cfg.d_head, dtype=cfg.dtype)
27 state_dict[f"blocks.{l}.attn.b_K"] = torch.zeros(cfg.n_heads, cfg.d_head, dtype=cfg.dtype)
28 state_dict[f"blocks.{l}.attn.b_V"] = torch.zeros(cfg.n_heads, cfg.d_head, dtype=cfg.dtype)
30 W_O = gptj.transformer.h[l].attn.out_proj.weight
31 W_O = einops.rearrange(W_O, "m (i h)->i h m", i=cfg.n_heads)
32 state_dict[f"blocks.{l}.attn.W_O"] = W_O
33 state_dict[f"blocks.{l}.attn.b_O"] = torch.zeros(cfg.d_model, dtype=cfg.dtype)
35 # Layer Norm 1 and 2 are tied.
36 state_dict[f"blocks.{l}.ln2.w"] = state_dict[f"blocks.{l}.ln1.w"]
37 state_dict[f"blocks.{l}.ln2.b"] = state_dict[f"blocks.{l}.ln1.b"]
39 state_dict[f"blocks.{l}.mlp.W_in"] = gptj.transformer.h[l].mlp.fc_in.weight.T
40 state_dict[f"blocks.{l}.mlp.b_in"] = gptj.transformer.h[l].mlp.fc_in.bias
42 state_dict[f"blocks.{l}.mlp.W_out"] = gptj.transformer.h[l].mlp.fc_out.weight.T
43 state_dict[f"blocks.{l}.mlp.b_out"] = gptj.transformer.h[l].mlp.fc_out.bias
44 state_dict["ln_final.w"] = gptj.transformer.ln_f.weight
45 state_dict["ln_final.b"] = gptj.transformer.ln_f.bias
47 state_dict["unembed.W_U"] = gptj.lm_head.weight.T
48 # Contains a bias, for some reason?
49 state_dict["unembed.b_U"] = gptj.lm_head.bias
50 return state_dict