Coverage for transformer_lens/pretrained/weight_conversions/qwen2.py: 12%

41 statements  

« prev     ^ index     » next       coverage.py v7.4.4, created at 2024-11-19 14:42 +0000

1import einops 

2import torch 

3 

4from transformer_lens.HookedTransformerConfig import HookedTransformerConfig 

5 

6 

7def convert_qwen2_weights(qwen, cfg: HookedTransformerConfig): 

8 # Note that this method is also applied for Qwen1.5 models, since they 

9 # have architecture type Qwen2ForCausalLM. 

10 

11 state_dict = {} 

12 

13 state_dict["embed.W_E"] = qwen.model.embed_tokens.weight 

14 

15 assert cfg.d_mlp is not None # keep mypy happy 

16 

17 for l in range(cfg.n_layers): 

18 state_dict[f"blocks.{l}.ln1.w"] = qwen.model.layers[l].input_layernorm.weight 

19 

20 W_Q = qwen.model.layers[l].self_attn.q_proj.weight 

21 W_K = qwen.model.layers[l].self_attn.k_proj.weight 

22 W_V = qwen.model.layers[l].self_attn.v_proj.weight 

23 W_Q = einops.rearrange(W_Q, "(n h) m->n m h", n=cfg.n_heads) 

24 W_K = einops.rearrange(W_K, "(n h) m->n m h", n=cfg.n_key_value_heads) 

25 W_V = einops.rearrange(W_V, "(n h) m->n m h", n=cfg.n_key_value_heads) 

26 

27 state_dict[f"blocks.{l}.attn.W_Q"] = W_Q 

28 state_dict[f"blocks.{l}.attn._W_K"] = W_K 

29 state_dict[f"blocks.{l}.attn._W_V"] = W_V 

30 

31 b_Q = qwen.model.layers[l].self_attn.q_proj.bias 

32 b_Q = einops.rearrange( 

33 b_Q, 

34 "(n_head d_head) -> n_head d_head", 

35 n_head=cfg.n_heads, 

36 ) 

37 

38 b_K = qwen.model.layers[l].self_attn.k_proj.bias 

39 b_K = einops.rearrange( 

40 b_K, 

41 "(n_head d_head) -> n_head d_head", 

42 n_head=cfg.n_key_value_heads, 

43 ) 

44 

45 b_V = qwen.model.layers[l].self_attn.v_proj.bias 

46 b_V = einops.rearrange( 

47 b_V, 

48 "(n_head d_head) -> n_head d_head", 

49 n_head=cfg.n_key_value_heads, 

50 ) 

51 

52 state_dict[f"blocks.{l}.attn.b_Q"] = b_Q 

53 state_dict[f"blocks.{l}.attn._b_K"] = b_K 

54 state_dict[f"blocks.{l}.attn._b_V"] = b_V 

55 

56 W_O = qwen.model.layers[l].self_attn.o_proj.weight 

57 W_O = einops.rearrange(W_O, "m (n h)->n h m", n=cfg.n_heads) 

58 state_dict[f"blocks.{l}.attn.W_O"] = W_O 

59 

60 state_dict[f"blocks.{l}.attn.b_O"] = torch.zeros(cfg.d_model, dtype=cfg.dtype) 

61 

62 state_dict[f"blocks.{l}.ln2.w"] = qwen.model.layers[l].post_attention_layernorm.weight 

63 

64 state_dict[f"blocks.{l}.mlp.W_in"] = qwen.model.layers[l].mlp.up_proj.weight.T 

65 state_dict[f"blocks.{l}.mlp.W_gate"] = qwen.model.layers[l].mlp.gate_proj.weight.T 

66 state_dict[f"blocks.{l}.mlp.b_in"] = torch.zeros(cfg.d_mlp, dtype=cfg.dtype) 

67 

68 state_dict[f"blocks.{l}.mlp.W_out"] = qwen.model.layers[l].mlp.down_proj.weight.T 

69 state_dict[f"blocks.{l}.mlp.b_out"] = torch.zeros(cfg.d_model, dtype=cfg.dtype) 

70 

71 state_dict["ln_final.w"] = qwen.model.norm.weight 

72 

73 state_dict["unembed.W_U"] = qwen.lm_head.weight.T 

74 state_dict["unembed.b_U"] = torch.zeros(cfg.d_vocab, dtype=cfg.dtype) 

75 

76 return state_dict