We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 88ea2c4 commit d02fadfCopy full SHA for d02fadf
tensorrt_llm/_torch/modules/fused_moe/fused_moe_wide_ep.py
@@ -226,6 +226,8 @@ def __init__(
226
f"Not available alltoall method type: {self.alltoall_method_type!r}"
227
)
228
229
+ self.use_fused_finalize = not model_config.moe_disable_finalize_fusion
230
+
231
self._weights_created = False
232
if not model_config.skip_create_weights_in_init:
233
self.create_weights()
@@ -724,7 +726,7 @@ def forward_chunk(
724
726
input_sf=x_sf,
725
727
swizzled_input_sf=False,
728
min_latency_mode=False,
- use_fused_finalize=True,
729
+ use_fused_finalize=self.use_fused_finalize,
730
tuner_num_tokens=tuner_num_tokens,
731
tuner_top_k=tuner_top_k,
732
0 commit comments