Coverage for transformer_lens/pretrained/weight_conversions/olmo2.py: 14%

40 statements  

« prev     ^ index     » next       coverage.py v7.10.1, created at 2026-04-30 01:33 +0000

1import einops 

2import torch 

3from transformers.models.olmo2.modeling_olmo2 import Olmo2DecoderLayer 

4 

5from transformer_lens.config.HookedTransformerConfig import HookedTransformerConfig 

6 

7 

8def convert_olmo2_weights(olmo2, cfg: HookedTransformerConfig): 

9 state_dict = {} 

10 

11 assert cfg.d_mlp is not None 

12 

13 state_dict["embed.W_E"] = olmo2.model.embed_tokens.weight 

14 

15 for l in range(cfg.n_layers): 

16 olmo2_layer = olmo2.model.layers[l] 

17 assert isinstance(olmo2_layer, Olmo2DecoderLayer) 

18 

19 W_Q = olmo2_layer.self_attn.q_proj.weight 

20 W_K = olmo2_layer.self_attn.k_proj.weight 

21 W_V = olmo2_layer.self_attn.v_proj.weight 

22 W_Q = einops.rearrange(W_Q, "(n h) m->n m h", n=cfg.n_heads) 

23 W_K = einops.rearrange(W_K, "(n h) m->n m h", n=cfg.n_heads) 

24 W_V = einops.rearrange(W_V, "(n h) m->n m h", n=cfg.n_heads) 

25 state_dict[f"blocks.{l}.attn.W_Q"] = W_Q 

26 state_dict[f"blocks.{l}.attn.W_K"] = W_K 

27 state_dict[f"blocks.{l}.attn.W_V"] = W_V 

28 state_dict[f"blocks.{l}.attn.q_norm.w"] = olmo2_layer.self_attn.q_norm.weight 

29 state_dict[f"blocks.{l}.attn.k_norm.w"] = olmo2_layer.self_attn.k_norm.weight 

30 

31 state_dict[f"blocks.{l}.attn.b_Q"] = torch.zeros(cfg.n_heads, cfg.d_head, dtype=cfg.dtype) 

32 state_dict[f"blocks.{l}.attn.b_K"] = torch.zeros(cfg.n_heads, cfg.d_head, dtype=cfg.dtype) 

33 state_dict[f"blocks.{l}.attn.b_V"] = torch.zeros(cfg.n_heads, cfg.d_head, dtype=cfg.dtype) 

34 

35 W_O = olmo2_layer.self_attn.o_proj.weight 

36 W_O = einops.rearrange(W_O, "m (n h)->n h m", n=cfg.n_heads) 

37 state_dict[f"blocks.{l}.attn.W_O"] = W_O 

38 

39 state_dict[f"blocks.{l}.attn.b_O"] = torch.zeros(cfg.d_model, dtype=cfg.dtype) 

40 

41 state_dict[f"blocks.{l}.ln1.w"] = olmo2_layer.post_attention_layernorm.weight 

42 

43 state_dict[f"blocks.{l}.mlp.W_in"] = olmo2_layer.mlp.up_proj.weight.T 

44 state_dict[f"blocks.{l}.mlp.W_gate"] = olmo2_layer.mlp.gate_proj.weight.T 

45 state_dict[f"blocks.{l}.mlp.b_in"] = torch.zeros(cfg.d_mlp, dtype=cfg.dtype) 

46 

47 state_dict[f"blocks.{l}.mlp.W_out"] = olmo2_layer.mlp.down_proj.weight.T 

48 state_dict[f"blocks.{l}.mlp.b_out"] = torch.zeros(cfg.d_model, dtype=cfg.dtype) 

49 

50 state_dict[f"blocks.{l}.ln2.w"] = olmo2_layer.post_feedforward_layernorm.weight 

51 

52 state_dict["ln_final.w"] = olmo2.model.norm.weight 

53 

54 state_dict["unembed.W_U"] = olmo2.lm_head.weight.T 

55 state_dict["unembed.b_U"] = torch.zeros(cfg.d_vocab, dtype=cfg.dtype) 

56 

57 return state_dict