Coverage for transformer_lens/pretrained/weight_conversions/qwen.py: 12%

38 statements  

« prev     ^ index     » next       coverage.py v7.4.4, created at 2024-10-04 23:19 +0000

1import einops 

2import torch 

3 

4from transformer_lens.HookedTransformerConfig import HookedTransformerConfig 

5 

6 

7def convert_qwen_weights(qwen, cfg: HookedTransformerConfig): 

8 state_dict = {} 

9 model = qwen.transformer 

10 state_dict["embed.W_E"] = model.wte.weight 

11 

12 assert cfg.d_mlp is not None # keep mypy happy 

13 

14 for l in range(cfg.n_layers): 

15 state_dict[f"blocks.{l}.ln1.w"] = model.h[l].ln_1.weight 

16 

17 W_Q, W_K, W_V = model.h[l].attn.c_attn.weight.split(split_size=cfg.d_model, dim=0) 

18 W_Q = einops.rearrange(W_Q, "(n h) m->n m h", n=cfg.n_heads) 

19 W_K = einops.rearrange(W_K, "(n h) m->n m h", n=cfg.n_heads) 

20 W_V = einops.rearrange(W_V, "(n h) m->n m h", n=cfg.n_heads) 

21 state_dict[f"blocks.{l}.attn.W_Q"] = W_Q 

22 state_dict[f"blocks.{l}.attn.W_K"] = W_K 

23 state_dict[f"blocks.{l}.attn.W_V"] = W_V 

24 

25 b_Q, b_K, b_V = model.h[l].attn.c_attn.bias.split(split_size=cfg.d_model, dim=0) 

26 b_Q = einops.rearrange( 

27 b_Q, 

28 "(n_head d_head) -> n_head d_head", 

29 n_head=cfg.n_heads, 

30 ) 

31 b_K = einops.rearrange( 

32 b_K, 

33 "(n_head d_head) -> n_head d_head", 

34 n_head=cfg.n_heads, 

35 ) 

36 b_V = einops.rearrange( 

37 b_V, 

38 "(n_head d_head) -> n_head d_head", 

39 n_head=cfg.n_heads, 

40 ) 

41 state_dict[f"blocks.{l}.attn.b_Q"] = b_Q 

42 state_dict[f"blocks.{l}.attn.b_K"] = b_K 

43 state_dict[f"blocks.{l}.attn.b_V"] = b_V 

44 

45 W_O = model.h[l].attn.c_proj.weight 

46 W_O = einops.rearrange(W_O, "m (n h)->n h m", n=cfg.n_heads) 

47 state_dict[f"blocks.{l}.attn.W_O"] = W_O 

48 

49 state_dict[f"blocks.{l}.attn.b_O"] = torch.zeros(cfg.d_model, dtype=cfg.dtype) 

50 

51 state_dict[f"blocks.{l}.ln2.w"] = model.h[l].ln_2.weight 

52 

53 state_dict[f"blocks.{l}.mlp.W_in"] = model.h[l].mlp.w1.weight.T 

54 state_dict[f"blocks.{l}.mlp.W_gate"] = model.h[l].mlp.w2.weight.T 

55 state_dict[f"blocks.{l}.mlp.b_in"] = torch.zeros(cfg.d_mlp, dtype=cfg.dtype) 

56 

57 state_dict[f"blocks.{l}.mlp.W_out"] = model.h[l].mlp.c_proj.weight.T 

58 state_dict[f"blocks.{l}.mlp.b_out"] = torch.zeros(cfg.d_model, dtype=cfg.dtype) 

59 

60 state_dict["ln_final.w"] = model.ln_f.weight 

61 

62 state_dict["unembed.W_U"] = qwen.lm_head.weight.T 

63 state_dict["unembed.b_U"] = torch.zeros(cfg.d_vocab, dtype=cfg.dtype) 

64 

65 return state_dict