We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent fbb6a48 commit 0575b92Copy full SHA for 0575b92
python/mlc_llm/model/gemma/gemma_model.py
@@ -22,7 +22,6 @@ class GemmaConfig(ConfigBase): # pylint: disable=too-many-instance-attributes
22
"""Configuration of the Gemma model."""
23
24
hidden_size: int
25
- hidden_activation: Optional[str]
26
intermediate_size: int
27
attention_bias: bool
28
num_attention_heads: int
@@ -31,6 +30,7 @@ class GemmaConfig(ConfigBase): # pylint: disable=too-many-instance-attributes
31
30
num_hidden_layers: int
32
rms_norm_eps: float
33
vocab_size: int
+ hidden_activation: Optional[str] = None
34
position_embedding_base: int = 0
35
context_window_size: int = 0
36
prefill_chunk_size: int = 0
0 commit comments