|
| 1 | +# Licensed to the Apache Software Foundation (ASF) under one |
| 2 | +# or more contributor license agreements. See the NOTICE file |
| 3 | +# distributed with this work for additional information |
| 4 | +# regarding copyright ownership. The ASF licenses this file |
| 5 | +# to you under the Apache License, Version 2.0 (the |
| 6 | +# "License"); you may not use this file except in compliance |
| 7 | +# with the License. You may obtain a copy of the License at |
| 8 | +# |
| 9 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | +# |
| 11 | +# Unless required by applicable law or agreed to in writing, |
| 12 | +# software distributed under the License is distributed on an |
| 13 | +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
| 14 | +# KIND, either express or implied. See the License for the |
| 15 | +# specific language governing permissions and limitations |
| 16 | +# under the License. |
| 17 | +# pylint: disable=invalid-name,unused-variable,unused-argument,no-member |
| 18 | +"""Conv2D int8 schedule on ARM""" |
| 19 | + |
| 20 | +import tvm |
| 21 | +from tvm import autotvm |
| 22 | +from .. import generic, tag |
| 23 | +from ..util import get_const_tuple |
| 24 | +from ..nn.conv2d import conv2d_NCHWc_int8 |
| 25 | +from ..generic import conv2d as conv2d_generic |
| 26 | +from .. import nn |
| 27 | +from ..nn.conv2d import _get_workload as _get_conv2d_workload |
| 28 | +from .tensor_intrin import dot_int8_int8_int32 |
| 29 | + |
| 30 | + |
| 31 | +def _get_default_config(cfg, data, kernel, strides, padding, out_dtype): |
| 32 | + """ |
| 33 | + Get default int8 schedule config for the workload |
| 34 | + """ |
| 35 | + wkl = _get_conv2d_workload(data, kernel, strides, padding, out_dtype) |
| 36 | + is_kernel_1x1 = wkl.hkernel == 1 and wkl.wkernel == 1 |
| 37 | + if is_kernel_1x1: |
| 38 | + conv2d_generic.fallback_schedule_cpu_1x1_int8( |
| 39 | + cfg, wkl, int32_lanes=2, num_int8_elements=4) |
| 40 | + else: |
| 41 | + conv2d_generic.fallback_schedule_cpu_common_int8( |
| 42 | + cfg, wkl, int32_lanes=2, num_int8_elements=4) |
| 43 | + |
| 44 | + |
| 45 | +@autotvm.register_topi_compute(conv2d_NCHWc_int8, ['arm_cpu'], 'direct') |
| 46 | +def _declaration_conv_NCHWc_int8(cfg, data, kernel, strides, |
| 47 | + padding, dilation, layout, out_layout, out_dtype): |
| 48 | + # layout and out_layout are not used here, |
| 49 | + # we keep them for debug convenience when dumping autotvm workload |
| 50 | + n, ic_chunk, ih, iw, ic_bn = get_const_tuple(data.shape) |
| 51 | + in_channel = ic_chunk * ic_bn |
| 52 | + |
| 53 | + oc_chunk, ic_chunk, kh, kw, ic_bn, oc_bn, n_elems = get_const_tuple(kernel.shape) |
| 54 | + num_filter = oc_chunk * oc_bn |
| 55 | + |
| 56 | + # If no config was set, we can fallback to NCHW config. |
| 57 | + if cfg.is_fallback: |
| 58 | + _get_default_config(cfg, tvm.placeholder((n, in_channel, ih, iw), dtype=data.dtype), |
| 59 | + tvm.placeholder((num_filter, in_channel, kh, kw), dtype=kernel.dtype), |
| 60 | + strides, padding, out_dtype) |
| 61 | + return nn.conv2d_NCHWc_int8_compute(data, |
| 62 | + kernel, |
| 63 | + strides, |
| 64 | + padding, |
| 65 | + dilation, |
| 66 | + layout, |
| 67 | + out_layout, |
| 68 | + out_dtype) |
| 69 | + |
| 70 | + |
| 71 | +@autotvm.register_topi_schedule(generic.schedule_conv2d_NCHWc_int8, ['arm_cpu'], ['direct']) |
| 72 | +def _schedule_conv2d_NCHWc_int8(cfg, outs): |
| 73 | + """Create schedule for tensors""" |
| 74 | + s = tvm.create_schedule([x.op for x in outs]) |
| 75 | + scheduled_ops = [] |
| 76 | + |
| 77 | + def traverse(op): |
| 78 | + """Traverse operators from computation graph""" |
| 79 | + # inline all one-to-one-mapping operators except the last stage (output) |
| 80 | + if tag.is_broadcast(op.tag): |
| 81 | + if op not in s.outputs: |
| 82 | + s[op].compute_inline() |
| 83 | + for tensor in op.input_tensors: |
| 84 | + if isinstance(tensor.op, tvm.tensor.ComputeOp) and tensor.op not in scheduled_ops: |
| 85 | + traverse(tensor.op) |
| 86 | + |
| 87 | + if 'conv2d_NCHWc_int8' in op.tag: |
| 88 | + conv_out = op.output(0) |
| 89 | + kernel = conv_out.op.input_tensors[1] |
| 90 | + data_vec = conv_out.op.input_tensors[0] |
| 91 | + data = data_vec.op.input_tensors[0] \ |
| 92 | + if isinstance(data_vec.op, tvm.tensor.ComputeOp) and "pad" not in data_vec.op.tag \ |
| 93 | + else data_vec |
| 94 | + if isinstance(data.op, tvm.tensor.ComputeOp) and "pad" in data.op.tag: |
| 95 | + data_pad = data |
| 96 | + data = data_pad.op.input_tensors[0] |
| 97 | + |
| 98 | + args = [s, cfg, data_vec, conv_out, outs[0]] |
| 99 | + # int8 conv kernel is 7-dim |
| 100 | + _, _, kh, kw, _, _, _ = get_const_tuple(kernel.shape) |
| 101 | + dtype = "uint" if data.dtype == "uint8" else "int" |
| 102 | + if kh == 1 and kw == 1: |
| 103 | + conv2d_generic.schedule_conv_NCHWc_cpu_1x1_int8( |
| 104 | + *args, int32_lanes=4, intrin=dot_int8_int8_int32(int32_lanes=4, dtype=dtype)) |
| 105 | + else: |
| 106 | + conv2d_generic.schedule_conv_NCHWc_cpu_common_int8( |
| 107 | + *args, int32_lanes=4, intrin=dot_int8_int8_int32(int32_lanes=4, dtype=dtype)) |
| 108 | + |
| 109 | + scheduled_ops.append(op) |
| 110 | + |
| 111 | + traverse(outs[0].op) |
| 112 | + return s |
0 commit comments