Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 21 additions & 0 deletions tests/python/relay/test_op_level2.py
Original file line number Diff line number Diff line change
Expand Up @@ -264,6 +264,25 @@ def _test_pool2d(opfunc, reffunc):
op_res1 = intrp1.evaluate(func)(data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)

def _test_pool2d_int(opfunc, reffunc, dtype):
n, c, h, w = tvm.var("n"), 10, 224, 224
x = relay.var("x", relay.TensorType((n, c, h, w), dtype))
y = opfunc(x, pool_size=(1, 1))
assert "pool_size=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 10, 224, 224), dtype)
# test execution
dtype = "int32"
dshape = (1, 3, 28, 28)
x = relay.var("x", shape=dshape, dtype=dtype)
y = opfunc(x, pool_size=(2, 2), strides=(2, 2), padding=(0, 0))
func = relay.Function([x], y)
data = np.random.random_integers(low=-128, high=128, size=dshape)
ref_res = reffunc(data.reshape(1,3,14,2,14,2), axis=(3,5)).astype(dtype)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)

def _test_global_pool2d(opfunc, reffunc):
n, c, h, w = tvm.var("n"), tvm.var("c"), 224, 224
Expand Down Expand Up @@ -294,6 +313,8 @@ def _test_global_pool2d(opfunc, reffunc):
def test_pool2d():
_test_pool2d(relay.nn.max_pool2d, np.max)
_test_pool2d(relay.nn.avg_pool2d, np.mean)
_test_pool2d_int(relay.nn.avg_pool2d, np.mean, 'int32')
_test_pool2d_int(relay.nn.avg_pool2d, np.mean, 'uint16')
_test_global_pool2d(relay.nn.global_max_pool2d, np.max)
_test_global_pool2d(relay.nn.global_avg_pool2d, np.mean)

Expand Down
19 changes: 13 additions & 6 deletions topi/include/topi/nn/pooling.h
Original file line number Diff line number Diff line change
Expand Up @@ -129,19 +129,26 @@ inline Tensor pool_impl(const Tensor& x,
return tvm::max(temp(indices), { dheight, dwidth });
}, "tensor", "pool_max");
} else if (pool_type == kAvgPool) {
// Pad the inputs
auto temp = do_pad ? pad(x, pad_before, pad_after, 0, "pad_temp") : x;
auto tavg = [&](const Array<Var>& output, Expr divide_factor) {

// TVM compute for summing the pooling window.
auto pool_sum = tvm::compute(out_shape,
[&](const Array<Var>& output) {
Array<Expr> indices;
for (const Var& var : output) indices.push_back(var);
indices.Set(height_axis, output[height_axis] * stride_height + dheight);
indices.Set(width_axis, output[width_axis] * stride_width + dwidth);
return tvm::sum(temp(indices) / divide_factor, { dheight, dwidth });
};
return tvm::sum(temp(indices), { dheight, dwidth });
Copy link
Member

@FrozenGene FrozenGene Jul 24, 2019

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If we could leave one comment of TODO here, it is better. Remind us there is one potential issue we should care in the future parsing TFLite model.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@FrozenGene If you mean int8<->uint8 casting for tflite, I think the TODO can be put into tflite frontend instead. The bugfix here in topi is complete IMO. what do you think?

Copy link
Member

@FrozenGene FrozenGene Jul 24, 2019

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I mean the tvm.sum type should be uint16 for tflite's average_pool2d, not int32.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

For TFLite adaptation, we can cast the input to uint16 and then pass the casted input to avg_pool2d. This is purely the job of framework parser. It does not need any code in the TOPI layer.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yes my understanding is avg_pool2d(int_8_value.astype("uint16")).astype("int8") will be in frontend, right?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I understand the way you want to do. Even the output dtype is "int32", you also want to do avg_pool2d(int_8_value.astype("int32")).astype("int8") in frontend.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If so, the way is ok.

}, "tensor", "pool_sum");

// TVM compute for dividing the reduced window sum by kernel size.
return tvm::compute(out_shape,
[&](const Array<Var>& output) {
Array<Expr> indices;
for (const Var& var : output) indices.push_back(var);
if (count_include_pad) {
return tavg(output, kernel_height * kernel_width);
return pool_sum(indices) / (kernel_height * kernel_width);
} else {
Expr h_start = output[height_axis] * stride_height - pad_top;
Expr w_start = output[width_axis] * stride_width - pad_left;
Expand All @@ -151,9 +158,9 @@ inline Tensor pool_impl(const Tensor& x,
w_start = ir::Max::make(w_start, make_const(Int(32), 0));
Expr divide_factor = ir::Max::make((h_end - h_start) * (w_end - w_start),
make_const(Int(32), 1));
return tavg(output, divide_factor);
return pool_sum(indices) / divide_factor;
}
}, "tensor", "pool_avg");
}, "tensor", kElementWise);
} else {
LOG(ERROR) << "Unrecognized pool_type: " << pool_type;
return x;
Expand Down