Coverage for transformer_lens/pretrained/weight_conversions/llama.py: 11%

45 statements  

« prev     ^ index     » next       coverage.py v7.4.4, created at 2024-10-04 23:19 +0000

1from typing import cast 

2 

3import einops 

4import torch 

5 

6from transformer_lens.HookedTransformerConfig import HookedTransformerConfig 

7 

8 

9def convert_llama_weights(llama, cfg: HookedTransformerConfig): 

10 state_dict = {} 

11 

12 state_dict["embed.W_E"] = llama.model.embed_tokens.weight 

13 

14 # Some models with the Llama architecture use Grouped Query Attention, and so for these we need to modify 

15 # the state dict keys for the K/V attention weight/biases, prepending "_" to the key names. 

16 using_gqa = cfg.n_key_value_heads is not None 

17 gqa_uscore = "_" if using_gqa else "" 

18 # need a cast since MyPy isn't smart enough to realize that using_gqa implies n_key_value_heads is not None 

19 n_kv_heads = cast(int, cfg.n_key_value_heads if using_gqa else cfg.n_heads) 

20 

21 # llama has no biases anywhere and deals with everything else roughly like 

22 # GPTNeoX with different names 

23 

24 assert cfg.d_mlp is not None # keep mypy happy 

25 

26 for l in range(cfg.n_layers): 

27 state_dict[f"blocks.{l}.ln1.w"] = llama.model.layers[l].input_layernorm.weight 

28 

29 W_Q = llama.model.layers[l].self_attn.q_proj.weight 

30 W_K = llama.model.layers[l].self_attn.k_proj.weight 

31 W_V = llama.model.layers[l].self_attn.v_proj.weight 

32 

33 # in case of quantization, 

34 # parameters should stay as bitsandbytes.nn.modules.Params4bit 

35 if not cfg.load_in_4bit: 

36 W_Q = einops.rearrange(W_Q, "(n h) m->n m h", n=cfg.n_heads) 

37 W_K = einops.rearrange(W_K, "(n h) m->n m h", n=n_kv_heads) 

38 W_V = einops.rearrange(W_V, "(n h) m->n m h", n=n_kv_heads) 

39 

40 state_dict[f"blocks.{l}.attn.W_Q"] = W_Q 

41 state_dict[f"blocks.{l}.attn.{gqa_uscore}W_K"] = W_K 

42 state_dict[f"blocks.{l}.attn.{gqa_uscore}W_V"] = W_V 

43 

44 state_dict[f"blocks.{l}.attn.b_Q"] = torch.zeros( 

45 cfg.n_heads, cfg.d_head, dtype=cfg.dtype, device=cfg.device 

46 ) 

47 state_dict[f"blocks.{l}.attn.{gqa_uscore}b_K"] = torch.zeros( 

48 n_kv_heads, 

49 cfg.d_head, 

50 dtype=cfg.dtype, 

51 device=cfg.device, 

52 ) 

53 state_dict[f"blocks.{l}.attn.{gqa_uscore}b_V"] = torch.zeros( 

54 n_kv_heads, 

55 cfg.d_head, 

56 dtype=cfg.dtype, 

57 device=cfg.device, 

58 ) 

59 

60 W_O = llama.model.layers[l].self_attn.o_proj.weight 

61 

62 if not cfg.load_in_4bit: 

63 W_O = einops.rearrange(W_O, "m (n h)->n h m", n=cfg.n_heads) 

64 

65 state_dict[f"blocks.{l}.attn.W_O"] = W_O.to(device=cfg.device) 

66 

67 state_dict[f"blocks.{l}.attn.b_O"] = torch.zeros( 

68 cfg.d_model, dtype=cfg.dtype, device=cfg.device 

69 ) 

70 

71 state_dict[f"blocks.{l}.ln2.w"] = llama.model.layers[l].post_attention_layernorm.weight 

72 

73 # in case of quantization, 

74 # parameters should stay as bitsandbytes.nn.modules.Params4bit 

75 if not cfg.load_in_4bit: 

76 state_dict[f"blocks.{l}.mlp.W_in"] = llama.model.layers[l].mlp.up_proj.weight.T 

77 state_dict[f"blocks.{l}.mlp.W_gate"] = llama.model.layers[l].mlp.gate_proj.weight.T 

78 state_dict[f"blocks.{l}.mlp.W_out"] = llama.model.layers[l].mlp.down_proj.weight.T 

79 else: 

80 state_dict[f"blocks.{l}.mlp.W_in"] = llama.model.layers[l].mlp.up_proj.weight 

81 state_dict[f"blocks.{l}.mlp.W_gate"] = llama.model.layers[l].mlp.gate_proj.weight 

82 state_dict[f"blocks.{l}.mlp.W_out"] = llama.model.layers[l].mlp.down_proj.weight 

83 

84 state_dict[f"blocks.{l}.mlp.b_in"] = torch.zeros( 

85 cfg.d_mlp, dtype=cfg.dtype, device=cfg.device 

86 ) 

87 state_dict[f"blocks.{l}.mlp.b_out"] = torch.zeros( 

88 cfg.d_model, dtype=cfg.dtype, device=cfg.device 

89 ) 

90 

91 state_dict["ln_final.w"] = llama.model.norm.weight 

92 

93 state_dict["unembed.W_U"] = llama.lm_head.weight.T 

94 state_dict["unembed.b_U"] = torch.zeros(cfg.d_vocab, dtype=cfg.dtype, device=cfg.device) 

95 

96 return state_dict