Skip to content

Commit 02e1099

Browse files
Huyuweitqchen
authored andcommitted
update nnvm.runtime to tvm.contrib.graph_runtime (#41)
1 parent 79a0603 commit 02e1099

File tree

4 files changed

+13
-22
lines changed

4 files changed

+13
-22
lines changed

nnvm/docs/api/python/index.rst

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,6 @@ For user
1010

1111
compiler
1212
frontend
13-
runtime
1413
symbol
1514
graph
1615
top

nnvm/docs/api/python/runtime.rst

Lines changed: 0 additions & 8 deletions
This file was deleted.

nnvm/python/nnvm/compiler/build_module.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,9 @@
44

55
import logging
66
import tvm
7+
from tvm.contrib import graph_runtime
78
from . import graph_attr, graph_util
89
from .. import graph as _graph
9-
from .. import runtime
1010

1111
OPT_PASS_LEVEL = {
1212
"SimplifyInference": 2,
@@ -220,7 +220,7 @@ def _run_graph(graph, params):
220220
_, oshape = graph_util.infer_shape(graph, **shape)
221221
_, odtype = graph_util.infer_dtype(graph, **dtype)
222222
graph, libmod, _ = build(graph, target, shape, dtype)
223-
m = runtime.create(graph, libmod, ctx)
223+
m = graph_runtime.create(graph, libmod, ctx)
224224
set_input, run, get_output = m["set_input"], m["run"], m["get_output"]
225225
for k, v in params.items():
226226
set_input(k, tvm.nd.array(v))

nnvm/tutorials/mobilenet_inference_gpu.py

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -15,9 +15,9 @@
1515
######################################################################
1616
# Register the NVCC Compiler Option
1717
# ---------------------------------
18-
# NNVM optimizes the graph and relies on TVM to generate fast
19-
# GPU code, to get the maximum performance, we need to enable
20-
# nvcc's compiler hook. This gives better performance than nvrtc mode.
18+
# NNVM optimizes the graph and relies on TVM to generate fast GPU code.
19+
# To get the maximum performance, we need to enable nvcc's compiler hook.
20+
# This gives better performance than nvrtc mode.
2121

2222
@tvm.register_func
2323
def tvm_callback_cuda_compile(code):
@@ -28,7 +28,7 @@ def tvm_callback_cuda_compile(code):
2828
# Prepare the Benchmark
2929
# ---------------------
3030
# We construct a standard imagenet inference benchmark.
31-
# We use nnvm's testing utility to produce the model description and random parameters that so the example does not
31+
# We use nnvm's testing utility to produce the model description and random parameters so that the example does not
3232
# depend on a specific front-end framework.
3333
#
3434
# .. note::
@@ -46,17 +46,17 @@ def tvm_callback_cuda_compile(code):
4646
batch_size=1, image_shape=image_shape)
4747

4848
######################################################################
49-
# Compile The Graph
49+
# Compile the Graph
5050
# -----------------
5151
# NNVM needs two things to compile a deep learning model:
5252
#
53-
# - net which is the graph representation of the computation
54-
# - params a dictionary of str to parameters.
53+
# - net: the graph representation of the computation
54+
# - params: a dictionary of str to parameters
5555
#
5656
# To compile the graph, we call the build function with the graph
5757
# configuration and parameters.
58-
# When parameters are provided, NNVM will pre-compute certain part of the graph if possible,
59-
# the new parameter set returned as the third return value.
58+
# When parameters are provided, NNVM will pre-compute certain part of the graph if possible (e.g. simplify batch normalization to scale shift),
59+
# and return the updated parameters.
6060

6161
graph, lib, params = nnvm.compiler.build(
6262
net, target, shape={"data": data_shape}, params=params)
@@ -65,7 +65,7 @@ def tvm_callback_cuda_compile(code):
6565
# Run the Compiled Module
6666
# -----------------------
6767
#
68-
# To deploy the module, we call :any:`tvm.contrib.graph_runtime.create` passing in the graph the lib and context.
68+
# To deploy the module, we call :any:`tvm.contrib.graph_runtime.create` passing in the graph, the lib, and context.
6969
# Thanks to TVM, we can deploy the compiled module to many platforms and languages.
7070
# The deployment module is designed to contain minimum dependencies.
7171
# This example runs on the same machine.
@@ -79,5 +79,5 @@ def tvm_callback_cuda_compile(code):
7979
module.run()
8080
# get output
8181
out = module.get_output(0, tvm.nd.empty(out_shape))
82-
# Convert to numpy
82+
# convert to numpy
8383
out.asnumpy()

0 commit comments

Comments
 (0)