Skip to content

Commit be8e438

Browse files
author
Siyuan Feng
authored
[Refactor] Migrate build API to tvm.compile (#17718)
* tvm.build -> tvm.compile * relax.build -> tvm.compile * update
1 parent 38b85c9 commit be8e438

File tree

170 files changed

+502
-526
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

170 files changed

+502
-526
lines changed

apps/android_rpc/tests/android_rpc_test.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ def test_rpc_module():
6464
sch.bind(xi, "threadIdx.x")
6565

6666
if test_opencl:
67-
f = tvm.build(sch.mod, target=tvm.target.Target("opencl", host=target))
67+
f = tvm.compile(sch.mod, target=tvm.target.Target("opencl", host=target))
6868
path_dso_cl = temp.relpath("dev_lib_cl.so")
6969
f.export_library(path_dso_cl, fcompile=ndk.create_shared)
7070

apps/ios_rpc/tests/ios_rpc_test.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ def test_rpc_module(host, port, key, mode):
5959

6060
# Build the dynamic lib.
6161
# If we don't want to do metal and only use cpu, just set target to be target
62-
f = tvm.build(sch.mod, target=tvm.target.Target("metal", host=target))
62+
f = tvm.compile(sch.mod, target=tvm.target.Target("metal", host=target))
6363
path_dso1 = temp.relpath("dev_lib.dylib")
6464
f.export_library(path_dso1, fcompile=xcode.create_dylib, arch=arch, sdk=sdk)
6565

docs/deep_dive/tensor_ir/tutorials/tir_creation.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -212,7 +212,7 @@ def evaluate_dynamic_shape(lib: tvm.runtime.Module, m: int, n: int, k: int):
212212

213213

214214
# Compile lib only once
215-
dyn_shape_lib = tvm.build(DynamicShapeModule, target="llvm")
215+
dyn_shape_lib = tvm.compile(DynamicShapeModule, target="llvm")
216216
# Able to handle different shapes
217217
print(evaluate_dynamic_shape(dyn_shape_lib, m=4, n=4, k=4))
218218
print(evaluate_dynamic_shape(dyn_shape_lib, m=64, n=64, k=128))

docs/deep_dive/tensor_ir/tutorials/tir_transformation.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ def main(
7878

7979

8080
def evaluate(mod: tvm.IRModule):
81-
lib = tvm.build(mod, target="llvm")
81+
lib = tvm.tir.build(mod, target="llvm")
8282
# check correctness
8383
lib(a_nd, b_nd, c_nd)
8484
np.testing.assert_allclose(c_nd.numpy(), c_np, rtol=1e-5)

docs/get_started/tutorials/ir_module.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -232,7 +232,7 @@ def main(
232232
# ~~~~~~~~~~~~~
233233
# We can deploy the IRModule on CPU by specifying the target as ``llvm``.
234234

235-
exec = relax.build(mod, target="llvm")
235+
exec = tvm.compile(mod, target="llvm")
236236
dev = tvm.cpu()
237237
vm = relax.VirtualMachine(exec, dev)
238238

@@ -263,7 +263,7 @@ def main(
263263
######################################################################
264264
# Now we can compile the IRModule on GPU, the similar way as we did on CPU.
265265

266-
exec = relax.build(gpu_mod, target="cuda")
266+
exec = tvm.compile(gpu_mod, target="cuda")
267267
dev = tvm.device("cuda", 0)
268268
vm = relax.VirtualMachine(exec, dev)
269269
# Need to allocate data and params on GPU device

docs/get_started/tutorials/quick_start.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,7 @@ def forward(self, x):
137137
import numpy as np
138138

139139
target = tvm.target.Target("llvm")
140-
ex = relax.build(mod, target)
140+
ex = tvm.compile(mod, target)
141141
device = tvm.cpu()
142142
vm = relax.VirtualMachine(ex, device)
143143
data = np.random.rand(1, 784).astype("float32")

docs/how_to/tutorials/cross_compilation_and_rpc.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@
119119
else:
120120
target = "llvm -mtriple=armv7l-linux-gnueabihf"
121121

122-
func = tvm.build(mod, target=target)
122+
func = tvm.compile(mod, target=target)
123123
# save the lib at a local temp folder
124124
temp = utils.tempdir()
125125
path = temp.relpath("lib.tar")
@@ -237,7 +237,7 @@ def run_opencl():
237237
xo, xi = sch.split(i, [None, 32])
238238
sch.bind(x, "blockIdx.x")
239239
sch.bind(x, "threadIdx.x")
240-
func = tvm.build(sch.mod, target=target)
240+
func = tvm.compile(sch.mod, target=target)
241241

242242
remote = rpc.connect(opencl_device_host, opencl_device_port)
243243

docs/how_to/tutorials/customize_opt.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -205,7 +205,7 @@ def transform_module(self, mod: IRModule, _ctx: tvm.transform.PassContext) -> IR
205205
# --------------------------
206206
# We can build and deploy the optimized model to the TVM runtime.
207207

208-
ex = relax.build(mod, target="cuda")
208+
ex = tvm.compile(mod, target="cuda")
209209
dev = tvm.device("cuda", 0)
210210
vm = relax.VirtualMachine(ex, dev)
211211
# Need to allocate data and params on GPU device

docs/how_to/tutorials/e2e_opt_model.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,7 @@
113113
# We skip this step in the CI environment.
114114

115115
if not IS_IN_CI:
116-
ex = relax.build(mod, target="cuda")
116+
ex = tvm.compile(mod, target="cuda")
117117
dev = tvm.device("cuda", 0)
118118
vm = relax.VirtualMachine(ex, dev)
119119
# Need to allocate data and params on GPU device

docs/reference/api/python/driver.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,4 +19,4 @@ tvm.driver
1919
----------
2020
.. automodule:: tvm.driver
2121

22-
.. autofunction:: tvm.build
22+
.. autofunction:: tvm.compile

0 commit comments

Comments
 (0)