Coverage for transformer_lens/pretrained/weight_conversions/phi.py: 9%
41 statements
« prev ^ index » next coverage.py v7.4.4, created at 2024-11-19 14:42 +0000
« prev ^ index » next coverage.py v7.4.4, created at 2024-11-19 14:42 +0000
1import einops
3from transformer_lens.HookedTransformerConfig import HookedTransformerConfig
6def convert_phi_weights(phi, cfg: HookedTransformerConfig):
7 state_dict = {}
9 state_dict["embed.W_E"] = phi.model.embed_tokens.weight
11 for l in range(cfg.n_layers):
12 state_dict[f"blocks.{l}.ln1.w"] = phi.model.layers[l].input_layernorm.weight
13 state_dict[f"blocks.{l}.ln1.b"] = phi.model.layers[l].input_layernorm.bias
15 W_Q = phi.model.layers[l].self_attn.q_proj.weight
16 W_K = phi.model.layers[l].self_attn.k_proj.weight
17 W_V = phi.model.layers[l].self_attn.v_proj.weight
18 W_Q = einops.rearrange(
19 W_Q, "(n_head d_head) d_model -> n_head d_model d_head", n_head=cfg.n_heads
20 )
21 W_K = einops.rearrange(
22 W_K, "(n_head d_head) d_model -> n_head d_model d_head", n_head=cfg.n_heads
23 )
24 W_V = einops.rearrange(
25 W_V, "(n_head d_head) d_model -> n_head d_model d_head", n_head=cfg.n_heads
26 )
27 state_dict[f"blocks.{l}.attn.W_Q"] = W_Q
28 state_dict[f"blocks.{l}.attn.W_K"] = W_K
29 state_dict[f"blocks.{l}.attn.W_V"] = W_V
31 b_Q = phi.model.layers[l].self_attn.q_proj.bias
32 b_K = phi.model.layers[l].self_attn.k_proj.bias
33 b_V = phi.model.layers[l].self_attn.v_proj.bias
34 b_Q = einops.rearrange(b_Q, "(n_head d_head) -> n_head d_head", n_head=cfg.n_heads)
35 b_K = einops.rearrange(b_K, "(n_head d_head) -> n_head d_head", n_head=cfg.n_heads)
36 b_V = einops.rearrange(b_V, "(n_head d_head) -> n_head d_head", n_head=cfg.n_heads)
37 state_dict[f"blocks.{l}.attn.b_Q"] = b_Q
38 state_dict[f"blocks.{l}.attn.b_K"] = b_K
39 state_dict[f"blocks.{l}.attn.b_V"] = b_V
41 W_O = phi.model.layers[l].self_attn.dense.weight
42 W_O = einops.rearrange(
43 W_O, "d_model (n_head d_head) -> n_head d_head d_model", n_head=cfg.n_heads
44 )
46 state_dict[f"blocks.{l}.attn.W_O"] = W_O
47 state_dict[f"blocks.{l}.attn.b_O"] = phi.model.layers[l].self_attn.dense.bias
49 # Layer Norm 1 and 2 are tied.
50 state_dict[f"blocks.{l}.ln2.w"] = state_dict[f"blocks.{l}.ln1.w"]
51 state_dict[f"blocks.{l}.ln2.b"] = state_dict[f"blocks.{l}.ln1.b"]
53 state_dict[f"blocks.{l}.mlp.W_in"] = phi.model.layers[l].mlp.fc1.weight.T
54 state_dict[f"blocks.{l}.mlp.b_in"] = phi.model.layers[l].mlp.fc1.bias
55 state_dict[f"blocks.{l}.mlp.W_out"] = phi.model.layers[l].mlp.fc2.weight.T
56 state_dict[f"blocks.{l}.mlp.b_out"] = phi.model.layers[l].mlp.fc2.bias
58 state_dict["ln_final.w"] = phi.model.final_layernorm.weight
59 state_dict["ln_final.b"] = phi.model.final_layernorm.bias
61 state_dict["unembed.W_U"] = phi.lm_head.weight.T
62 state_dict["unembed.b_U"] = phi.lm_head.bias
64 return state_dict