|
37 | 37 | # Define network |
38 | 38 | # -------------- |
39 | 39 | # First we need to define the network in relay frontend API. |
40 | | -# We can load some pre-defined network from :code:`relay.testing`. |
| 40 | +# We can either load some pre-defined network from :code:`relay.testing` |
| 41 | +# or building :any:`relay.testing.resnet` with relay. |
41 | 42 | # We can also load models from MXNet, ONNX and TensorFlow. |
42 | 43 | # |
43 | 44 | # In this tutorial, we choose resnet-18 as tuning example. |
44 | 45 |
|
| 46 | + |
45 | 47 | def get_network(name, batch_size): |
46 | 48 | """Get the symbol definition and random weight of a network""" |
47 | 49 | input_shape = (batch_size, 3, 224, 224) |
@@ -73,6 +75,7 @@ def get_network(name, batch_size): |
73 | 75 |
|
74 | 76 | return mod, params, input_shape, output_shape |
75 | 77 |
|
| 78 | + |
76 | 79 | # Replace "llvm" with the correct target of your CPU. |
77 | 80 | # For example, for AWS EC2 c5 instance with Intel Xeon |
78 | 81 | # Platinum 8000 series, the target should be "llvm -mcpu=skylake-avx512". |
@@ -121,6 +124,7 @@ def get_network(name, batch_size): |
121 | 124 | ), |
122 | 125 | } |
123 | 126 |
|
| 127 | + |
124 | 128 | # You can skip the implementation of this function for this tutorial. |
125 | 129 | def tune_kernels(tasks, |
126 | 130 | measure_option, |
@@ -165,6 +169,7 @@ def tune_kernels(tasks, |
165 | 169 | autotvm.callback.progress_bar(n_trial, prefix=prefix), |
166 | 170 | autotvm.callback.log_to_file(log_filename)]) |
167 | 171 |
|
| 172 | + |
168 | 173 | # Use graph tuner to achieve graph level optimal schedules |
169 | 174 | # Set use_DP=False if it takes too long to finish. |
170 | 175 | def tune_graph(graph, dshape, records, opt_sch_file, use_DP=True): |
|
0 commit comments