Coverage for transformer_lens/pretrained/weight_conversions/olmoe.py: 11%

43 statements  

« prev     ^ index     » next       coverage.py v7.10.1, created at 2026-04-30 01:33 +0000

1import einops 

2import torch 

3 

4from transformer_lens.config.HookedTransformerConfig import HookedTransformerConfig 

5 

6 

7def convert_olmoe_weights(olmoe, cfg: HookedTransformerConfig): 

8 state_dict = {} 

9 

10 assert cfg.n_key_value_heads is not None 

11 assert cfg.d_mlp is not None 

12 assert cfg.num_experts is not None 

13 

14 state_dict["embed.W_E"] = olmoe.model.embed_tokens.weight 

15 

16 for l in range(cfg.n_layers): 

17 olmoe_layer = olmoe.model.layers[l] 

18 state_dict[f"blocks.{l}.ln1.w"] = olmoe_layer.input_layernorm.weight 

19 

20 W_Q = olmoe_layer.self_attn.q_proj.weight 

21 W_K = olmoe_layer.self_attn.k_proj.weight 

22 W_V = olmoe_layer.self_attn.v_proj.weight 

23 W_Q = einops.rearrange(W_Q, "(n h) m->n m h", n=cfg.n_heads) 

24 W_K = einops.rearrange(W_K, "(n h) m->n m h", n=cfg.n_key_value_heads) 

25 W_V = einops.rearrange(W_V, "(n h) m->n m h", n=cfg.n_key_value_heads) 

26 state_dict[f"blocks.{l}.attn.W_Q"] = W_Q 

27 state_dict[f"blocks.{l}.attn._W_K"] = W_K 

28 state_dict[f"blocks.{l}.attn._W_V"] = W_V 

29 state_dict[f"blocks.{l}.attn.q_norm.w"] = olmoe_layer.self_attn.q_norm.weight 

30 state_dict[f"blocks.{l}.attn.k_norm.w"] = olmoe_layer.self_attn.k_norm.weight 

31 

32 state_dict[f"blocks.{l}.attn.b_Q"] = torch.zeros(cfg.n_heads, cfg.d_head, dtype=cfg.dtype) 

33 state_dict[f"blocks.{l}.attn._b_K"] = torch.zeros( 

34 cfg.n_key_value_heads, cfg.d_head, dtype=cfg.dtype 

35 ) 

36 state_dict[f"blocks.{l}.attn._b_V"] = torch.zeros( 

37 cfg.n_key_value_heads, cfg.d_head, dtype=cfg.dtype 

38 ) 

39 

40 W_O = olmoe_layer.self_attn.o_proj.weight 

41 W_O = einops.rearrange(W_O, "m (n h)->n h m", n=cfg.n_heads) 

42 state_dict[f"blocks.{l}.attn.W_O"] = W_O 

43 

44 state_dict[f"blocks.{l}.attn.b_O"] = torch.zeros(cfg.d_model, dtype=cfg.dtype) 

45 

46 state_dict[f"blocks.{l}.ln2.w"] = olmoe_layer.post_attention_layernorm.weight 

47 

48 state_dict[f"blocks.{l}.mlp.W_gate.weight"] = olmoe_layer.mlp.gate.weight 

49 

50 # HF OLMoE uses batched expert weights: 

51 # gate_up_proj: [num_experts, 2 * intermediate_size, hidden_size] 

52 # down_proj: [num_experts, hidden_size, intermediate_size] 

53 # The gate_up_proj fuses gate and up projections along dim 1. 

54 experts = olmoe_layer.mlp.experts 

55 gate_up = experts.gate_up_proj # [num_experts, 2*d_mlp, d_model] 

56 down = experts.down_proj # [num_experts, d_model, d_mlp] 

57 

58 for e in range(cfg.num_experts): 

59 # Split fused gate_up into gate and up projections 

60 state_dict[f"blocks.{l}.mlp.experts.{e}.W_gate.weight"] = gate_up[e, : cfg.d_mlp, :] 

61 state_dict[f"blocks.{l}.mlp.experts.{e}.W_in.weight"] = gate_up[e, cfg.d_mlp :, :] 

62 state_dict[f"blocks.{l}.mlp.experts.{e}.W_out.weight"] = down[e] 

63 

64 state_dict["ln_final.w"] = olmoe.model.norm.weight 

65 

66 state_dict["unembed.W_U"] = olmoe.lm_head.weight.T 

67 state_dict["unembed.b_U"] = torch.zeros(cfg.d_vocab, dtype=cfg.dtype) 

68 

69 return state_dict