Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 34 additions & 7 deletions python/tvm/relax/frontend/torch/exported_program_translator.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,12 @@ def _group_norm(self, node: fx.Node) -> relax.Var:
)

def _upsample_impl(
self, x: relax.Expr, size, align_corners: bool, scale_factor, method: str
self,
x: relax.Expr,
size,
scale_factor,
method: str,
align_corners: bool,
) -> relax.Var:
coord_trans = "align_corners" if align_corners else "half_pixel"

Expand All @@ -119,17 +124,39 @@ def _upsample_bilinear2d(self, node: fx.Node) -> relax.Var:
align_corners = (
node.args[2] if len(node.args) > 2 else node.kwargs.get("align_corners", True)
)
scale_factor = node.args[3] if len(node.args) > 3 else node.kwargs.get("scale_factor", None)
return self._upsample_impl(x, size, align_corners, scale_factor, "linear")
scale_factor = node.args[3] if len(node.args) > 3 else node.kwargs.get("scale_factor", 1)
return self._upsample_impl(
x, size=size, scale_factor=scale_factor, method="linear", align_corners=align_corners
)

def _upsample_nearest2d(self, node: fx.node) -> relax.Var:
x = self.env[node.args[0]]
size = node.args[1] if len(node.args) > 1 else node.kwargs.get("size", None)
align_corners = (
node.args[2] if len(node.args) > 2 else node.kwargs.get("align_corners", True)

if size:
scale_factor = None # Can only define size or scale_factor, not both
align_corners = (
node.args[2] if len(node.args) > 2 else node.kwargs.get("align_corners", None)
)

else:
# TODO figure out why pytorch export passes a list such as
# [scale_factor,scale_factor] instead of just an int for
# scale_factor. Using first element for now
scale_factor = (
node.args[2][0] if len(node.args) > 2 else node.kwargs.get("scale_factor", 1)
)
align_corners = (
node.args[3] if len(node.args) > 3 else node.kwargs.get("align_corners", None)
)

return self._upsample_impl(
x,
size=size,
scale_factor=scale_factor,
method="nearest_neighbor",
align_corners=align_corners,
)
scale_factor = node.args[3] if len(node.args) > 3 else node.kwargs.get("scale_factor", None)
return self._upsample_impl(x, size, align_corners, scale_factor, "nearest_neighbor")

########## Manipulation ##########

Expand Down
46 changes: 40 additions & 6 deletions tests/python/relax/test_from_exported_to_cuda.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,14 @@
# specific language governing permissions and limitations
# under the License.

import tvm
from tvm import relax
import tvm.testing
import numpy as np
import torch
from torch.export import export

import tvm
import tvm.testing
from tvm import relax
from tvm.relax.frontend.torch import from_exported_program
from torch.nn import Softmax, Upsample


def assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, target, dev):
Expand All @@ -42,8 +42,6 @@ def assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, tar
tvm_mod, tvm_params = relax.frontend.detach_params(mod_from_torch)

relax_pipeline = relax.get_default_pipeline(tvm.target.Target.from_device(tvm.cuda()))
# TODO try pipeline below?
# releax_pipeline = relax.backend.cuda.pipeline.get_default_pipeline(target)
ex = relax.build(tvm_mod, target=target, relax_pipeline=relax_pipeline)
vm = relax.VirtualMachine(ex, dev)

Expand All @@ -57,6 +55,42 @@ def assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, tar
np.testing.assert_allclose(actual=actual, desired=desired, rtol=1e-5, atol=1e-5)


@tvm.testing.parametrize_targets("cuda")
def test_upsample_with_size(target, dev):
"""
The Upsample module can be used with the size arugment or the scale
factor argument but not both. This tests the former.
"""
batch_size = 1
channels = 3
height, width = 8, 8

torch_module = Upsample(size=(64, 64), mode="nearest", recompute_scale_factor=None)

raw_data = np.random.rand(batch_size, channels, height, width).astype("float32")

assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, target, dev)


@tvm.testing.parametrize_targets("cuda")
def test_upsample_with_scale_factor(target, dev):
"""
The Upsample module can be used with the size arugment or the scale
factor argument but not both. This tests the latter.
"""
batch_size = 2
channels = 3
height, width = 32, 32

torch_module = Upsample(
size=None, scale_factor=7, mode="nearest", align_corners=None, recompute_scale_factor=True
)

raw_data = np.random.rand(batch_size, channels, height, width).astype("float32")

assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, target, dev)


@tvm.testing.parametrize_targets("cuda")
def test_linalg_vector_norm(target, dev):
class VectorNorm0(torch.nn.Module):
Expand Down