Skip to content

Commit 0575b92

Browse files
authored
[Fix] Gemma hidden_activation compatibility (#2614)
This PR fixes the Gemma config compatibility issue.
1 parent fbb6a48 commit 0575b92

File tree

1 file changed

+1
-1
lines changed

1 file changed

+1
-1
lines changed

python/mlc_llm/model/gemma/gemma_model.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,6 @@ class GemmaConfig(ConfigBase): # pylint: disable=too-many-instance-attributes
2222
"""Configuration of the Gemma model."""
2323

2424
hidden_size: int
25-
hidden_activation: Optional[str]
2625
intermediate_size: int
2726
attention_bias: bool
2827
num_attention_heads: int
@@ -31,6 +30,7 @@ class GemmaConfig(ConfigBase): # pylint: disable=too-many-instance-attributes
3130
num_hidden_layers: int
3231
rms_norm_eps: float
3332
vocab_size: int
33+
hidden_activation: Optional[str] = None
3434
position_embedding_base: int = 0
3535
context_window_size: int = 0
3636
prefill_chunk_size: int = 0

0 commit comments

Comments
 (0)