Coverage for transformer_lens/pretrained/weight_conversions/mistral.py: 13%

36 statements  

« prev     ^ index     » next       coverage.py v7.4.4, created at 2024-12-14 00:54 +0000

1import einops 

2import torch 

3 

4from transformer_lens.HookedTransformerConfig import HookedTransformerConfig 

5 

6 

7def convert_mistral_weights(mistral, cfg: HookedTransformerConfig): 

8 state_dict = {} 

9 

10 state_dict["embed.W_E"] = mistral.model.embed_tokens.weight 

11 

12 assert cfg.n_key_value_heads is not None # keep mypy happy 

13 assert cfg.d_mlp is not None # keep mypy happy 

14 

15 # Mistral has no biases anywhere 

16 for l in range(cfg.n_layers): 

17 state_dict[f"blocks.{l}.ln1.w"] = mistral.model.layers[l].input_layernorm.weight 

18 

19 W_Q = mistral.model.layers[l].self_attn.q_proj.weight 

20 W_K = mistral.model.layers[l].self_attn.k_proj.weight 

21 W_V = mistral.model.layers[l].self_attn.v_proj.weight 

22 W_Q = einops.rearrange(W_Q, "(n h) m->n m h", n=cfg.n_heads) 

23 W_K = einops.rearrange(W_K, "(n h) m->n m h", n=cfg.n_key_value_heads) 

24 W_V = einops.rearrange(W_V, "(n h) m->n m h", n=cfg.n_key_value_heads) 

25 state_dict[f"blocks.{l}.attn.W_Q"] = W_Q 

26 state_dict[f"blocks.{l}.attn._W_K"] = W_K 

27 state_dict[f"blocks.{l}.attn._W_V"] = W_V 

28 

29 state_dict[f"blocks.{l}.attn.b_Q"] = torch.zeros(cfg.n_heads, cfg.d_head, dtype=cfg.dtype) 

30 state_dict[f"blocks.{l}.attn._b_K"] = torch.zeros( 

31 cfg.n_key_value_heads, cfg.d_head, dtype=cfg.dtype 

32 ) 

33 state_dict[f"blocks.{l}.attn._b_V"] = torch.zeros( 

34 cfg.n_key_value_heads, cfg.d_head, dtype=cfg.dtype 

35 ) 

36 

37 W_O = mistral.model.layers[l].self_attn.o_proj.weight 

38 W_O = einops.rearrange(W_O, "m (n h)->n h m", n=cfg.n_heads) 

39 state_dict[f"blocks.{l}.attn.W_O"] = W_O 

40 

41 state_dict[f"blocks.{l}.attn.b_O"] = torch.zeros(cfg.d_model, dtype=cfg.dtype) 

42 

43 state_dict[f"blocks.{l}.ln2.w"] = mistral.model.layers[l].post_attention_layernorm.weight 

44 

45 state_dict[f"blocks.{l}.mlp.W_in"] = mistral.model.layers[l].mlp.up_proj.weight.T 

46 state_dict[f"blocks.{l}.mlp.W_gate"] = mistral.model.layers[l].mlp.gate_proj.weight.T 

47 state_dict[f"blocks.{l}.mlp.b_in"] = torch.zeros(cfg.d_mlp, dtype=cfg.dtype) 

48 

49 state_dict[f"blocks.{l}.mlp.W_out"] = mistral.model.layers[l].mlp.down_proj.weight.T 

50 state_dict[f"blocks.{l}.mlp.b_out"] = torch.zeros(cfg.d_model, dtype=cfg.dtype) 

51 

52 state_dict["ln_final.w"] = mistral.model.norm.weight 

53 

54 state_dict["unembed.W_U"] = mistral.lm_head.weight.T 

55 state_dict["unembed.b_U"] = torch.zeros(cfg.d_vocab, dtype=cfg.dtype) 

56 

57 return state_dict