Coverage for transformer_lens/pretrained/weight_conversions/opt.py: 100%
40 statements
« prev ^ index » next coverage.py v7.4.4, created at 2024-12-14 00:54 +0000
« prev ^ index » next coverage.py v7.4.4, created at 2024-12-14 00:54 +0000
1import einops
2import torch
4from transformer_lens.HookedTransformerConfig import HookedTransformerConfig
7def convert_opt_weights(opt, cfg: HookedTransformerConfig):
8 state_dict = {}
10 state_dict["embed.W_E"] = opt.model.decoder.embed_tokens.weight
11 state_dict["pos_embed.W_pos"] = opt.model.decoder.embed_positions.weight[2:, :]
13 for l in range(cfg.n_layers):
14 state_dict[f"blocks.{l}.ln1.w"] = opt.model.decoder.layers[l].self_attn_layer_norm.weight
15 state_dict[f"blocks.{l}.ln1.b"] = opt.model.decoder.layers[l].self_attn_layer_norm.bias
17 W_Q = opt.model.decoder.layers[l].self_attn.q_proj.weight
18 W_K = opt.model.decoder.layers[l].self_attn.k_proj.weight
19 W_V = opt.model.decoder.layers[l].self_attn.v_proj.weight
20 W_Q = einops.rearrange(
21 W_Q,
22 "(index d_head) d_model->index d_model d_head",
23 index=cfg.n_heads,
24 )
25 W_K = einops.rearrange(
26 W_K,
27 "(index d_head) d_model->index d_model d_head",
28 index=cfg.n_heads,
29 )
30 W_V = einops.rearrange(
31 W_V,
32 "(index d_head) d_model->index d_model d_head",
33 index=cfg.n_heads,
34 )
36 state_dict[f"blocks.{l}.attn.W_Q"] = W_Q
37 state_dict[f"blocks.{l}.attn.W_K"] = W_K
38 state_dict[f"blocks.{l}.attn.W_V"] = W_V
40 q_bias = einops.rearrange(
41 opt.model.decoder.layers[l].self_attn.q_proj.bias,
42 "(head_index d_head)->head_index d_head",
43 head_index=cfg.n_heads,
44 d_head=cfg.d_head,
45 )
46 k_bias = einops.rearrange(
47 opt.model.decoder.layers[l].self_attn.k_proj.bias,
48 "(head_index d_head)->head_index d_head",
49 head_index=cfg.n_heads,
50 d_head=cfg.d_head,
51 )
52 v_bias = einops.rearrange(
53 opt.model.decoder.layers[l].self_attn.v_proj.bias,
54 "(head_index d_head)->head_index d_head",
55 head_index=cfg.n_heads,
56 d_head=cfg.d_head,
57 )
59 state_dict[f"blocks.{l}.attn.b_Q"] = q_bias
60 state_dict[f"blocks.{l}.attn.b_K"] = k_bias
61 state_dict[f"blocks.{l}.attn.b_V"] = v_bias
63 W_O = opt.model.decoder.layers[l].self_attn.out_proj.weight
64 W_O = einops.rearrange(
65 W_O,
66 "d_model (index d_head)->index d_head d_model",
67 index=cfg.n_heads,
68 )
69 state_dict[f"blocks.{l}.attn.W_O"] = W_O
70 state_dict[f"blocks.{l}.attn.b_O"] = opt.model.decoder.layers[l].self_attn.out_proj.bias
72 state_dict[f"blocks.{l}.ln2.w"] = opt.model.decoder.layers[l].final_layer_norm.weight
73 state_dict[f"blocks.{l}.ln2.b"] = opt.model.decoder.layers[l].final_layer_norm.bias
75 state_dict[f"blocks.{l}.mlp.W_in"] = opt.model.decoder.layers[l].fc1.weight.T
76 state_dict[f"blocks.{l}.mlp.W_out"] = opt.model.decoder.layers[l].fc2.weight.T
78 state_dict[f"blocks.{l}.mlp.b_in"] = opt.model.decoder.layers[l].fc1.bias
79 state_dict[f"blocks.{l}.mlp.b_out"] = opt.model.decoder.layers[l].fc2.bias
80 state_dict["ln_final.w"] = opt.model.decoder.final_layer_norm.weight
81 state_dict["ln_final.b"] = opt.model.decoder.final_layer_norm.bias
82 state_dict["unembed.W_U"] = opt.lm_head.weight.T
83 state_dict["unembed.b_U"] = torch.zeros(cfg.d_vocab, dtype=cfg.dtype)
84 return state_dict