Skip to content

Commit 1016467

Browse files
committed
fix multi-graph test
Signed-off-by: Boyuan Feng <[email protected]>
1 parent a2986b3 commit 1016467

File tree

3 files changed

+103
-3
lines changed

3 files changed

+103
-3
lines changed

tests/compile/piecewise/test_multiple_graphs.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
set_current_vllm_config,
2121
)
2222
from vllm.forward_context import BatchDescriptor, set_forward_context
23+
from vllm.utils import is_torch_equal_or_newer
2324

2425
# This import automatically registers `torch.ops.silly.attention`
2526
from .. import silly_attention # noqa: F401
@@ -193,9 +194,8 @@ def run_model(
193194

194195
@pytest.mark.parametrize("use_inductor_graph_partition", [False, True])
195196
def test_multi_graph_piecewise_compile(use_inductor_graph_partition: bool):
196-
if use_inductor_graph_partition:
197-
# FIXME(luka/boyuan): this currently fails
198-
pytest.skip("Inductor graph partition not supported with multi-graph")
197+
if use_inductor_graph_partition and not is_torch_equal_or_newer("2.9.0.dev"):
198+
pytest.skip("inductor graph partition is only available in PyTorch 2.9+")
199199

200200
outputs = []
201201

vllm/env_override.py

Lines changed: 73 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
import torch
66

77
from vllm.logger import init_logger
8+
from vllm.utils import _is_torch_equal
89

910
logger = init_logger(__name__)
1011

@@ -21,3 +22,75 @@
2122
os.environ["TORCHINDUCTOR_COMPILE_THREADS"] = "1"
2223
# see https://github.com/vllm-project/vllm/issues/10619
2324
torch._inductor.config.compile_threads = 1
25+
26+
# ===================================================
27+
# torch 2.9 Inductor PythonWrapperCodegen monkeypatch
28+
# ===================================================
29+
# This change monkeypatches memory_plan_reuse in pytorch 2.9.0 to work around
30+
# a test failure for test_multi_graph_piecewise_compile_outputs_equal.
31+
# For more context, see https://github.com/pytorch/pytorch/pull/165514.
32+
33+
34+
def memory_plan_reuse_patched(self):
35+
import torch._inductor.ir as ir
36+
from torch._inductor.codegen.wrapper import (
37+
EnterSubgraphLine,
38+
ExitSubgraphLine,
39+
MemoryPlanningLine,
40+
MemoryPlanningState,
41+
SubgraphPythonWrapperCodegen,
42+
)
43+
from torch._inductor.virtualized import V
44+
45+
def get_output_names(graph_outputs) -> list[str]:
46+
import itertools
47+
48+
names = []
49+
shape_counter = itertools.count(0)
50+
none_counter = itertools.count(0)
51+
for node in graph_outputs:
52+
if isinstance(node, ir.NoneAsConstantBuffer):
53+
names.append(f"{V.graph.name}_none{next(none_counter)}")
54+
elif isinstance(node, ir.ShapeAsConstantBuffer):
55+
names.append(f"{V.graph.name}_shape{next(shape_counter)}")
56+
else:
57+
names.append(node.get_name())
58+
return names
59+
60+
if (
61+
isinstance(V.graph.wrapper_code, SubgraphPythonWrapperCodegen)
62+
and V.graph.wrapper_code.partition_signatures is not None
63+
):
64+
out_names = get_output_names(
65+
V.graph.wrapper_code.partition_signatures.output_nodes
66+
)
67+
else:
68+
out_names = V.graph.get_output_names()
69+
70+
while (
71+
self.lines
72+
and isinstance(self.lines[-1], MemoryPlanningLine)
73+
and self.lines[-1].node.name not in out_names # type: ignore[attr-defined]
74+
):
75+
# these lines will be pointless
76+
self.lines.pop()
77+
78+
# codegen allocations in two passes
79+
planning_states = [MemoryPlanningState()]
80+
past_planning_states = []
81+
for i in range(len(self.lines)):
82+
line = self.lines[i]
83+
if isinstance(line, MemoryPlanningLine):
84+
self.lines[i] = line.plan(planning_states[-1])
85+
elif isinstance(line, EnterSubgraphLine):
86+
planning_states.append(MemoryPlanningState())
87+
elif isinstance(line, ExitSubgraphLine):
88+
past_planning_states.append(planning_states.pop())
89+
past_planning_states.append(planning_states.pop())
90+
assert len(planning_states) == 0
91+
92+
93+
if _is_torch_equal("2.9.0"):
94+
from torch._inductor.codegen.wrapper import PythonWrapperCodegen
95+
96+
PythonWrapperCodegen.memory_plan_reuse = memory_plan_reuse_patched

vllm/utils/__init__.py

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3265,6 +3265,33 @@ def _is_torch_equal_or_newer(torch_version: str, target: str) -> bool:
32653265
return torch_version >= version.parse(target)
32663266

32673267

3268+
def _is_torch_equal(target: str) -> bool:
3269+
assert target.count(".") == 2
3270+
torch_version = str(torch.__version__)
3271+
torch_version = version.parse(torch_version)
3272+
# torch version is like "2.6.0.dev20240101" or "2.6.0.dev20240101+cpu"
3273+
# or "2.6.0+cu128" but never "2.6.0.1"
3274+
return (
3275+
torch_version >= version.parse(target)
3276+
and version.parse(target + ".1") >= torch_version
3277+
)
3278+
3279+
3280+
def is_torch_equal(target: str) -> bool:
3281+
"""Check if the installed torch version is == the target version.
3282+
3283+
Args:
3284+
target: a version string, like "2.6.0".
3285+
3286+
Returns:
3287+
Whether the condition meets.
3288+
"""
3289+
try:
3290+
return _is_torch_equal(target)
3291+
except Exception:
3292+
return Version(importlib.metadata.version("torch")) == Version(target)
3293+
3294+
32683295
@cache
32693296
def _has_module(module_name: str) -> bool:
32703297
"""Return True if *module_name* can be found in the current environment.

0 commit comments

Comments
 (0)