Coverage for transformer_lens/pretrained/weight_conversions/qwen3.py: 13%

43 statements  

« prev     ^ index     » next       coverage.py v7.6.1, created at 2025-07-09 19:34 +0000

1from typing import Any 

2 

3import einops 

4import torch 

5 

6from transformer_lens.HookedTransformerConfig import HookedTransformerConfig 

7 

8 

9def convert_qwen3_weights(qwen: Any, cfg: HookedTransformerConfig): 

10 """Convert Qwen3 weights to TransformerLens format.""" 

11 state_dict = {} 

12 

13 state_dict["embed.W_E"] = qwen.model.embed_tokens.weight 

14 

15 if cfg.n_key_value_heads is None: 

16 gqa_uscore = "" 

17 n_kv_heads = cfg.n_heads 

18 else: 

19 gqa_uscore = "_" 

20 n_kv_heads = cfg.n_key_value_heads 

21 

22 assert cfg.d_mlp is not None # keep mypy happy 

23 

24 for l in range(cfg.n_layers): 

25 state_dict[f"blocks.{l}.ln1.w"] = qwen.model.layers[l].input_layernorm.weight 

26 

27 W_Q = qwen.model.layers[l].self_attn.q_proj.weight 

28 W_K = qwen.model.layers[l].self_attn.k_proj.weight 

29 W_V = qwen.model.layers[l].self_attn.v_proj.weight 

30 W_Q = einops.rearrange(W_Q, "(n h) m->n m h", n=cfg.n_heads) 

31 W_K = einops.rearrange(W_K, "(n h) m->n m h", n=n_kv_heads) 

32 W_V = einops.rearrange(W_V, "(n h) m->n m h", n=n_kv_heads) 

33 

34 state_dict[f"blocks.{l}.attn.W_Q"] = W_Q 

35 state_dict[f"blocks.{l}.attn.{gqa_uscore}W_K"] = W_K 

36 state_dict[f"blocks.{l}.attn.{gqa_uscore}W_V"] = W_V 

37 

38 # Load weights into RMSNorm modules 

39 state_dict[f"blocks.{l}.attn.q_norm.w"] = qwen.model.layers[l].self_attn.q_norm.weight 

40 state_dict[f"blocks.{l}.attn.k_norm.w"] = qwen.model.layers[l].self_attn.k_norm.weight 

41 

42 state_dict[f"blocks.{l}.attn.b_Q"] = torch.zeros(cfg.n_heads, cfg.d_head, dtype=cfg.dtype) 

43 state_dict[f"blocks.{l}.attn.{gqa_uscore}b_K"] = torch.zeros( 

44 n_kv_heads, cfg.d_head, dtype=cfg.dtype 

45 ) 

46 state_dict[f"blocks.{l}.attn.{gqa_uscore}b_V"] = torch.zeros( 

47 n_kv_heads, cfg.d_head, dtype=cfg.dtype 

48 ) 

49 

50 W_O = qwen.model.layers[l].self_attn.o_proj.weight 

51 W_O = einops.rearrange(W_O, "m (n h)->n h m", n=cfg.n_heads) 

52 state_dict[f"blocks.{l}.attn.W_O"] = W_O 

53 

54 state_dict[f"blocks.{l}.attn.b_O"] = torch.zeros(cfg.d_model, dtype=cfg.dtype) 

55 

56 state_dict[f"blocks.{l}.ln2.w"] = qwen.model.layers[l].post_attention_layernorm.weight 

57 

58 state_dict[f"blocks.{l}.mlp.W_in"] = qwen.model.layers[l].mlp.up_proj.weight.T 

59 state_dict[f"blocks.{l}.mlp.W_gate"] = qwen.model.layers[l].mlp.gate_proj.weight.T 

60 state_dict[f"blocks.{l}.mlp.b_in"] = torch.zeros(cfg.d_mlp, dtype=cfg.dtype) 

61 

62 state_dict[f"blocks.{l}.mlp.W_out"] = qwen.model.layers[l].mlp.down_proj.weight.T 

63 state_dict[f"blocks.{l}.mlp.b_out"] = torch.zeros(cfg.d_model, dtype=cfg.dtype) 

64 

65 state_dict["ln_final.w"] = qwen.model.norm.weight 

66 

67 state_dict["unembed.W_U"] = qwen.lm_head.weight.T 

68 state_dict["unembed.b_U"] = torch.zeros(cfg.d_vocab, dtype=cfg.dtype) 

69 

70 return state_dict