Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 8 additions & 6 deletions vllm/model_executor/layers/fused_moe/layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -741,12 +741,14 @@ def __init__(

# we padding globally so EP buffer allocation works
if quant_config and quant_config.get_name() == "mxfp4":
if not is_torch_equal_or_newer("2.8.0"):
raise RuntimeError("Mxfp4 on hopper requires torch >= 2.8.0")
if current_platform.is_device_capability(
90) and not has_triton_kernels():
raise NotImplementedError(
"Triton kernels must be installed for mxfp4 on hopper")
if not current_platform.is_device_capability(100):
if not is_torch_equal_or_newer("2.8.0"):
raise RuntimeError(
"Mxfp4 on non-blackwell requires torch >= 2.8.0")
if not has_triton_kernels():
raise NotImplementedError(
"triton_kernels must be installed for "
"mxfp4 on non-blackwell")
if (current_platform.is_rocm()
or envs.VLLM_USE_FLASHINFER_MOE_MXFP4_MXFP8
or envs.VLLM_USE_FLASHINFER_MOE_MXFP4_BF16):
Expand Down