Coverage for transformer_lens/pretrained/weight_conversions/neox.py: 100%

34 statements  

« prev     ^ index     » next       coverage.py v7.4.4, created at 2024-10-04 23:19 +0000

1import einops 

2import torch 

3 

4from transformer_lens.HookedTransformerConfig import HookedTransformerConfig 

5 

6 

7def convert_neox_weights(neox, cfg: HookedTransformerConfig): 

8 state_dict = {} 

9 

10 state_dict["embed.W_E"] = neox.gpt_neox.embed_in.weight 

11 

12 for l in range(cfg.n_layers): 

13 state_dict[f"blocks.{l}.ln1.w"] = neox.gpt_neox.layers[l].input_layernorm.weight 

14 state_dict[f"blocks.{l}.ln1.b"] = neox.gpt_neox.layers[l].input_layernorm.bias 

15 

16 # For some inexplicable reason, NeoX both uses the concatenated QKV 

17 # matmul of GPT-2 (afaict this has a neglible performance impact) AND 

18 # has the flattened axis in the DIFFERENT order of (head_index qkv 

19 # d_head) - this took me an hour to debug... 

20 W = neox.gpt_neox.layers[l].attention.query_key_value.weight 

21 W = einops.rearrange(W, "(i qkv h) m->qkv i m h", i=cfg.n_heads, qkv=3) 

22 

23 # Fold in layer norm weights 

24 state_dict[f"blocks.{l}.attn.W_Q"] = W[0] 

25 state_dict[f"blocks.{l}.attn.W_K"] = W[1] 

26 state_dict[f"blocks.{l}.attn.W_V"] = W[2] 

27 

28 qkv_bias = neox.gpt_neox.layers[l].attention.query_key_value.bias 

29 qkv_bias = einops.rearrange( 

30 qkv_bias, 

31 "(index qkv head)->qkv index head", 

32 qkv=3, 

33 index=cfg.n_heads, 

34 head=cfg.d_head, 

35 ) 

36 # Fold in layer norm biases 

37 state_dict[f"blocks.{l}.attn.b_Q"] = qkv_bias[0] 

38 state_dict[f"blocks.{l}.attn.b_K"] = qkv_bias[1] 

39 state_dict[f"blocks.{l}.attn.b_V"] = qkv_bias[2] 

40 

41 W_O = neox.gpt_neox.layers[l].attention.dense.weight 

42 W_O = einops.rearrange(W_O, "m (i h)->i h m", i=cfg.n_heads) 

43 state_dict[f"blocks.{l}.attn.W_O"] = W_O 

44 state_dict[f"blocks.{l}.attn.b_O"] = neox.gpt_neox.layers[l].attention.dense.bias 

45 

46 state_dict[f"blocks.{l}.ln2.w"] = neox.gpt_neox.layers[l].post_attention_layernorm.weight 

47 state_dict[f"blocks.{l}.ln2.b"] = neox.gpt_neox.layers[l].post_attention_layernorm.bias 

48 

49 state_dict[f"blocks.{l}.mlp.W_in"] = neox.gpt_neox.layers[l].mlp.dense_h_to_4h.weight.T 

50 state_dict[f"blocks.{l}.mlp.b_in"] = neox.gpt_neox.layers[l].mlp.dense_h_to_4h.bias 

51 

52 state_dict[f"blocks.{l}.mlp.W_out"] = neox.gpt_neox.layers[l].mlp.dense_4h_to_h.weight.T 

53 state_dict[f"blocks.{l}.mlp.b_out"] = neox.gpt_neox.layers[l].mlp.dense_4h_to_h.bias 

54 state_dict["ln_final.w"] = neox.gpt_neox.final_layer_norm.weight 

55 state_dict["ln_final.b"] = neox.gpt_neox.final_layer_norm.bias 

56 

57 state_dict["unembed.W_U"] = neox.embed_out.weight.T 

58 state_dict["unembed.b_U"] = torch.zeros(cfg.d_vocab, dtype=cfg.dtype) 

59 return state_dict