Coverage for transformer_lens/pretrained/weight_conversions/mixtral.py: 12%

37 statements  

« prev     ^ index     » next       coverage.py v7.4.4, created at 2024-12-14 00:54 +0000

1import einops 

2import torch 

3 

4from transformer_lens.HookedTransformerConfig import HookedTransformerConfig 

5 

6 

7def convert_mixtral_weights(mixtral, cfg: HookedTransformerConfig): 

8 # The same as Mistral, but with the MLP replaced with MoE 

9 # As with Mistral, Mixtral has no biases 

10 

11 state_dict = {} 

12 

13 assert cfg.n_key_value_heads is not None # keep mypy happy 

14 assert cfg.d_mlp is not None 

15 assert cfg.num_experts is not None 

16 

17 state_dict["embed.W_E"] = mixtral.model.embed_tokens.weight 

18 

19 for l in range(cfg.n_layers): 

20 state_dict[f"blocks.{l}.ln1.w"] = mixtral.model.layers[l].input_layernorm.weight 

21 

22 W_Q = mixtral.model.layers[l].self_attn.q_proj.weight 

23 W_K = mixtral.model.layers[l].self_attn.k_proj.weight 

24 W_V = mixtral.model.layers[l].self_attn.v_proj.weight 

25 W_Q = einops.rearrange(W_Q, "(n h) m->n m h", n=cfg.n_heads) 

26 W_K = einops.rearrange(W_K, "(n h) m->n m h", n=cfg.n_key_value_heads) 

27 W_V = einops.rearrange(W_V, "(n h) m->n m h", n=cfg.n_key_value_heads) 

28 state_dict[f"blocks.{l}.attn.W_Q"] = W_Q 

29 state_dict[f"blocks.{l}.attn._W_K"] = W_K 

30 state_dict[f"blocks.{l}.attn._W_V"] = W_V 

31 

32 state_dict[f"blocks.{l}.attn.b_Q"] = torch.zeros(cfg.n_heads, cfg.d_head, dtype=cfg.dtype) 

33 state_dict[f"blocks.{l}.attn._b_K"] = torch.zeros( 

34 cfg.n_key_value_heads, cfg.d_head, dtype=cfg.dtype 

35 ) 

36 state_dict[f"blocks.{l}.attn._b_V"] = torch.zeros( 

37 cfg.n_key_value_heads, cfg.d_head, dtype=cfg.dtype 

38 ) 

39 

40 W_O = mixtral.model.layers[l].self_attn.o_proj.weight 

41 W_O = einops.rearrange(W_O, "m (n h)->n h m", n=cfg.n_heads) 

42 state_dict[f"blocks.{l}.attn.W_O"] = W_O 

43 

44 state_dict[f"blocks.{l}.attn.b_O"] = torch.zeros(cfg.d_model, dtype=cfg.dtype) 

45 

46 state_dict[f"blocks.{l}.ln2.w"] = mixtral.model.layers[l].post_attention_layernorm.weight 

47 

48 state_dict[f"blocks.{l}.mlp.W_gate.weight"] = mixtral.model.layers[ 

49 l 

50 ].block_sparse_moe.gate.weight 

51 

52 # The mapping here from wn to W_{in/out/gate} is a bit confusing: 

53 # w1 -> W_gate 

54 # w2 -> W_out 

55 # w3 -> W_in 

56 # See https://github.com/mistralai/mistral-inference/blob/8598cf582091a596671be31990448e0620017851/mistral/model.py#L128 for reference 

57 for e in range(cfg.num_experts): 

58 state_dict[f"blocks.{l}.mlp.experts.{e}.W_in.weight"] = ( 

59 mixtral.model.layers[l].block_sparse_moe.experts[e].w3.weight 

60 ) 

61 state_dict[f"blocks.{l}.mlp.experts.{e}.W_gate.weight"] = ( 

62 mixtral.model.layers[l].block_sparse_moe.experts[e].w1.weight 

63 ) 

64 state_dict[f"blocks.{l}.mlp.experts.{e}.W_out.weight"] = ( 

65 mixtral.model.layers[l].block_sparse_moe.experts[e].w2.weight 

66 ) 

67 

68 state_dict["ln_final.w"] = mixtral.model.norm.weight.data 

69 

70 state_dict["unembed.W_U"] = mixtral.lm_head.weight.T 

71 state_dict["unembed.b_U"] = torch.zeros(cfg.d_vocab, dtype=cfg.dtype) 

72 

73 return state_dict