Coverage for transformer_lens/pretrained/weight_conversions/phi3.py: 14%

41 statements  

« prev     ^ index     » next       coverage.py v7.4.4, created at 2025-01-21 00:15 +0000

1from typing import cast 

2 

3import einops 

4import torch 

5 

6from transformer_lens.HookedTransformerConfig import HookedTransformerConfig 

7 

8 

9def convert_phi3_weights(phi, cfg: HookedTransformerConfig): 

10 state_dict = {} 

11 state_dict["embed.W_E"] = phi.model.embed_tokens.weight 

12 

13 # Some models with this architecture use Grouped Query Attention, and so for these we need to modify 

14 # the state dict keys for the K/V attention weight/biases, prepending "_" to the key names. 

15 using_gqa = cfg.n_key_value_heads is not None 

16 gqa_uscore = "_" if using_gqa else "" 

17 # need a cast since MyPy isn't smart enough to realize that using_gqa implies n_key_value_heads is not None 

18 n_kv_heads = cast(int, cfg.n_key_value_heads if using_gqa else cfg.n_heads) 

19 

20 for l in range(cfg.n_layers): 

21 state_dict[f"blocks.{l}.ln1.w"] = phi.model.layers[l].input_layernorm.weight 

22 state_dict[f"blocks.{l}.ln1.b"] = torch.zeros(cfg.d_vocab, dtype=cfg.dtype) 

23 

24 W = phi.model.layers[l].self_attn.qkv_proj.weight 

25 q_dim = cfg.n_heads * cfg.d_head 

26 kv_dim = n_kv_heads * cfg.d_head 

27 W_Q, W_K, W_V = W.split([q_dim, kv_dim, kv_dim], dim=0) 

28 

29 W_Q = einops.rearrange( 

30 W_Q, "(n_head d_head) d_model -> n_head d_model d_head", n_head=cfg.n_heads 

31 ) 

32 W_K = einops.rearrange( 

33 W_K, "(n_kv_head d_head) d_model -> n_kv_head d_model d_head", n_kv_head=n_kv_heads 

34 ) 

35 W_V = einops.rearrange( 

36 W_V, "(n_kv_head d_head) d_model -> n_kv_head d_model d_head", n_kv_head=n_kv_heads 

37 ) 

38 state_dict[f"blocks.{l}.attn.W_Q"] = W_Q 

39 state_dict[f"blocks.{l}.attn.{gqa_uscore}W_K"] = W_K 

40 state_dict[f"blocks.{l}.attn.{gqa_uscore}W_V"] = W_V 

41 

42 state_dict[f"blocks.{l}.attn.b_Q"] = torch.zeros( 

43 cfg.n_heads, cfg.d_head, dtype=cfg.dtype, device=cfg.device 

44 ) 

45 state_dict[f"blocks.{l}.attn.{gqa_uscore}b_K"] = torch.zeros( 

46 n_kv_heads, 

47 cfg.d_head, 

48 dtype=cfg.dtype, 

49 ) 

50 state_dict[f"blocks.{l}.attn.{gqa_uscore}b_V"] = torch.zeros( 

51 n_kv_heads, 

52 cfg.d_head, 

53 dtype=cfg.dtype, 

54 ) 

55 

56 W_O = phi.model.layers[l].self_attn.o_proj.weight 

57 W_O = einops.rearrange( 

58 W_O, "d_model (n_head d_head) -> n_head d_head d_model", n_head=cfg.n_heads 

59 ) 

60 

61 state_dict[f"blocks.{l}.attn.W_O"] = W_O 

62 state_dict[f"blocks.{l}.attn.b_O"] = torch.zeros(cfg.d_model, dtype=cfg.dtype) 

63 

64 state_dict[f"blocks.{l}.ln2.w"] = phi.model.layers[l].post_attention_layernorm.weight 

65 state_dict[f"blocks.{l}.ln2.b"] = torch.zeros(cfg.d_vocab, dtype=cfg.dtype) 

66 

67 W = phi.model.layers[l].mlp.gate_up_proj.weight.T 

68 W_gate, W_in = torch.tensor_split(W, 2, dim=1) 

69 state_dict[f"blocks.{l}.mlp.W_in"] = W_in 

70 state_dict[f"blocks.{l}.mlp.W_gate"] = W_gate 

71 state_dict[f"blocks.{l}.mlp.W_out"] = phi.model.layers[l].mlp.down_proj.weight.T 

72 

73 state_dict["ln_final.w"] = phi.model.norm.weight 

74 

75 state_dict["unembed.W_U"] = phi.lm_head.weight.T 

76 state_dict["unembed.b_U"] = torch.zeros(cfg.d_vocab, dtype=cfg.dtype) 

77 

78 return state_dict