Skip to content

Commit bd890bf

Browse files
committed
[docs] Remove empty code blocks
This fixes some of the issues highlighted in #13668, the parser that checks to ensure that the hidden import is placed in the right spot was incorrect, this includes some fixes to get it working for the cases in that issue.
1 parent 45a8a44 commit bd890bf

File tree

14 files changed

+60
-57
lines changed

14 files changed

+60
-57
lines changed

gallery/how_to/compile_models/from_darknet.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -31,13 +31,12 @@
3131
pip install opencv-python
3232
"""
3333

34+
# numpy and matplotlib
3435
# sphinx_gallery_start_ignore
3536
from tvm import testing
3637

3738
testing.utils.install_request_hook(depth=3)
3839
# sphinx_gallery_end_ignore
39-
40-
# numpy and matplotlib
4140
import numpy as np
4241
import matplotlib.pyplot as plt
4342
import sys

gallery/how_to/compile_models/from_tensorflow.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,13 +24,12 @@
2424
Please refer to https://www.tensorflow.org/install
2525
"""
2626

27+
# tvm, relay
2728
# sphinx_gallery_start_ignore
2829
from tvm import testing
2930

3031
testing.utils.install_request_hook(depth=3)
3132
# sphinx_gallery_end_ignore
32-
33-
# tvm, relay
3433
import tvm
3534
from tvm import te
3635
from tvm import relay

gallery/how_to/deploy_models/deploy_prequantized.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,14 +28,15 @@
2828
Once loaded, we can run compiled, quantized models on any hardware TVM supports.
2929
"""
3030

31+
#################################################################################
32+
# First, necessary imports
33+
3134
# sphinx_gallery_start_ignore
3235
from tvm import testing
3336

3437
testing.utils.install_request_hook(depth=3)
3538
# sphinx_gallery_end_ignore
3639

37-
#################################################################################
38-
# First, necessary imports
3940
from PIL import Image
4041

4142
import numpy as np

gallery/how_to/deploy_models/deploy_prequantized_tflite.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -42,15 +42,16 @@
4242
4343
"""
4444

45+
###############################################################################
46+
# Necessary imports
47+
# -----------------
48+
4549
# sphinx_gallery_start_ignore
4650
from tvm import testing
4751

4852
testing.utils.install_request_hook(depth=3)
4953
# sphinx_gallery_end_ignore
5054

51-
###############################################################################
52-
# Necessary imports
53-
# -----------------
5455
import os
5556

5657
import numpy as np

gallery/how_to/deploy_models/deploy_sparse.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -70,17 +70,18 @@
7070
sparse speed using fake weights to see the benefit of structured sparsity.
7171
"""
7272

73+
###############################################################################
74+
# Load Required Modules
75+
# ---------------------
76+
# Other than TVM, scipy, the latest transformers, and
77+
# tensorflow 2.2+ are required.
78+
7379
# sphinx_gallery_start_ignore
7480
from tvm import testing
7581

7682
testing.utils.install_request_hook(depth=3)
7783
# sphinx_gallery_end_ignore
7884

79-
###############################################################################
80-
# Load Required Modules
81-
# ---------------------
82-
# Other than TVM, scipy, the latest transformers, and
83-
# tensorflow 2.2+ are required.
8485
import os
8586
import tvm
8687
import time

gallery/how_to/extend_tvm/bring_your_own_datatypes.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -52,17 +52,18 @@
5252
ctypes.CDLL('my-datatype-lib.so', ctypes.RTLD_GLOBAL)
5353
"""
5454

55+
######################
56+
# A Simple TVM Program
57+
# --------------------
58+
#
59+
# We'll begin by writing a simple program in TVM; afterwards, we will re-write it to use custom datatypes.
60+
5561
# sphinx_gallery_start_ignore
5662
from tvm import testing
5763

5864
testing.utils.install_request_hook(depth=3)
5965
# sphinx_gallery_end_ignore
6066

61-
######################
62-
# A Simple TVM Program
63-
# --------------------
64-
#
65-
# We'll begin by writing a simple program in TVM; afterwards, we will re-write it to use custom datatypes.
6667
import tvm
6768
from tvm import relay
6869

gallery/how_to/optimize_operators/opt_conv_cuda.py

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -30,12 +30,6 @@
3030
3131
"""
3232

33-
# sphinx_gallery_start_ignore
34-
from tvm import testing
35-
36-
testing.utils.install_request_hook(depth=3)
37-
# sphinx_gallery_end_ignore
38-
3933
################################################################
4034
# Preparation and Algorithm
4135
# -------------------------
@@ -44,7 +38,12 @@
4438
# dimensions. The batch size is 256. Convolution filters contain 512 filters
4539
# of size 3 x 3. We use stride size 1 and padding size 1 for the
4640
# convolution. The following code defines the convolution algorithm in TVM.
47-
#
41+
42+
# sphinx_gallery_start_ignore
43+
from tvm import testing
44+
45+
testing.utils.install_request_hook(depth=3)
46+
# sphinx_gallery_end_ignore
4847

4948
import numpy as np
5049
import tvm

gallery/how_to/optimize_operators/opt_gemm.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -48,19 +48,19 @@
4848
Intel i7-4770HQ CPU. The cache line size should be 64 bytes for all the x86 CPUs.
4949
"""
5050

51-
# sphinx_gallery_start_ignore
52-
from tvm import testing
53-
54-
testing.utils.install_request_hook(depth=3)
55-
# sphinx_gallery_end_ignore
56-
5751
################################################################################################
5852
# Preparation and Baseline
5953
# ------------------------
6054
# In this tutorial, we will demo how to use TVM to optimize matrix multiplication.
6155
# Before actually demonstrating, we first define these variables.
6256
# Then we write a baseline implementation, the simplest way to write a matrix multiplication in TVM.
6357

58+
# sphinx_gallery_start_ignore
59+
from tvm import testing
60+
61+
testing.utils.install_request_hook(depth=3)
62+
# sphinx_gallery_end_ignore
63+
6464
import tvm
6565
import tvm.testing
6666
from tvm import te

gallery/how_to/work_with_pytorch/using_as_torch.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,13 +24,12 @@
2424
Using the decorator `as_torch`, users can wrap TVMScript code into a PyTorch nn.Module naturally.
2525
"""
2626

27+
# Import PyTorch, as well as necessary libraries
2728
# sphinx_gallery_start_ignore
2829
from tvm import testing
2930

3031
testing.utils.install_request_hook(depth=3)
3132
# sphinx_gallery_end_ignore
32-
33-
# Import PyTorch, as well as necessary libraries
3433
import torch
3534
import torch.nn.functional as F
3635
import torch.utils.benchmark as benchmark

gallery/how_to/work_with_pytorch/using_optimized_torch.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,13 +24,12 @@
2424
To follow this tutorial, PyTorch, as well as TorchVision, should be installed.
2525
"""
2626

27+
# Import PyTorch
2728
# sphinx_gallery_start_ignore
2829
from tvm import testing
2930

3031
testing.utils.install_request_hook(depth=3)
3132
# sphinx_gallery_end_ignore
32-
33-
# Import PyTorch
3433
import torch
3534
import torch.nn as nn
3635
import torch.nn.functional as F

0 commit comments

Comments
 (0)