Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
54 changes: 27 additions & 27 deletions topi/include/topi/broadcast.h
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ namespace topi {
*/
inline tvm::Tensor broadcast_to(const tvm::Tensor& t,
const tvm::Array<tvm::Expr>& output_shape,
std::string name = "tensor",
std::string name = "T_broadcast_to",
std::string tag = kBroadcast) {
CHECK_GE(output_shape.size(), t->shape.size())
<< "Not a broadcast, output dimensionality smaller than input.\noutput: "
Expand All @@ -66,35 +66,35 @@ inline tvm::Tensor broadcast_to(const tvm::Tensor& t,
tag);
}

#define TOPI_DEFINE_BCAST_OP(Name, ComputeRule) \
inline tvm::Expr Name(const tvm::Expr& a, \
const tvm::Expr& b) { \
ComputeRule; \
} \
inline tvm::Tensor Name(const tvm::Tensor& A, \
const tvm::Tensor& B, \
std::string name = "tensor", \
std::string tag = kBroadcast) { \
auto l = [](tvm::Expr a, tvm::Expr b) { ComputeRule; }; \
return detail::WithBroadcast(l, A, B, name, tag); \
} \
inline tvm::Tensor Name(const tvm::Tensor& A, \
const tvm::Expr& B, \
std::string name = "tensor", \
std::string tag = kElementWise) { \
#define TOPI_DEFINE_BCAST_OP(Name, ComputeRule) \
inline tvm::Expr Name(const tvm::Expr& a, \
const tvm::Expr& b) { \
ComputeRule; \
} \
inline tvm::Tensor Name(const tvm::Tensor& A, \
const tvm::Tensor& B, \
std::string name = "T_" #Name, \
std::string tag = kBroadcast) { \
auto l = [](tvm::Expr a, tvm::Expr b) { ComputeRule; }; \
return detail::WithBroadcast(l, A, B, name, tag); \
} \
inline tvm::Tensor Name(const tvm::Tensor& A, \
const tvm::Expr& B, \
std::string name = "T_" #Name, \
std::string tag = kElementWise) { \
auto l = [](tvm::Expr a, tvm::Expr b) { ComputeRule; }; \
return compute(A->shape, [&](const ::tvm::Array<::tvm::Var>& i) { \
return l(A(i), B); \
}, name, tag); \
} \
inline tvm::Tensor Name(const tvm::Expr& A, \
const tvm::Tensor& B, \
std::string name = "tensor", \
std::string tag = kElementWise) { \
auto l = [&](tvm::Expr a, tvm::Expr b) { ComputeRule; }; \
return l(A(i), B); \
}, name, tag); \
} \
inline tvm::Tensor Name(const tvm::Expr& A, \
const tvm::Tensor& B, \
std::string name = "T_" #Name, \
std::string tag = kElementWise) { \
auto l = [&](tvm::Expr a, tvm::Expr b) { ComputeRule; }; \
return compute(B->shape, [&](const ::tvm::Array<::tvm::Var>& i) { \
return l(A, B(i)); \
}, name, tag); \
return l(A, B(i)); \
}, name, tag); \
}


Expand Down
20 changes: 10 additions & 10 deletions topi/include/topi/elemwise.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ using namespace tvm;
// Unary intrinsic operators
#define TOPI_DECLARE_UNARY_OP(OpName) \
inline Tensor OpName(const Tensor& x, \
std::string name = "tensor", \
std::string name = "T_" #OpName, \
std::string tag = kElementWise) { \
return compute(x->shape, [&](const Array<Var>& i) { \
return ::tvm::OpName(x(i)); \
Expand Down Expand Up @@ -66,7 +66,7 @@ TOPI_DECLARE_UNARY_OP(abs);
* \return A Tensor whose op member is the identity operation
*/
inline Tensor identity(const Tensor& x,
std::string name = "tensor",
std::string name = "T_identity",
std::string tag = kElementWise) {
return compute(x->shape, [&](const Array<Var>& i) {
return x(i);
Expand All @@ -83,7 +83,7 @@ inline Tensor identity(const Tensor& x,
* \return A Tensor whose op member is the negation operation
*/
inline Tensor negative(const Tensor& x,
std::string name = "tensor",
std::string name = "T_negative",
std::string tag = kElementWise) {
return compute(x->shape, [&](const Array<Var>& i) {
return -x(i);
Expand All @@ -100,7 +100,7 @@ inline Tensor negative(const Tensor& x,
* \return A Tensor whose op member is the logical NOT operation
*/
inline Tensor logical_not(const Tensor& x,
std::string name = "tensor",
std::string name = "T_logical_not",
std::string tag = kElementWise) {
return compute(x->shape, [&](const Array<Var>& i) {
return !x(i);
Expand All @@ -117,7 +117,7 @@ inline Tensor logical_not(const Tensor& x,
* \return A Tensor whose op member is the sign
*/
inline Tensor sign(const Tensor& x,
std::string name = "tensor",
std::string name = "T_sign",
std::string tag = kElementWise) {
return compute(x->shape, [&](const Array<Var>& i) {
Expr zero = make_zero(x->dtype);
Expand All @@ -144,7 +144,7 @@ inline Tensor sign(const Tensor& x,
inline Tensor clip(const Tensor& x,
const Expr& a_min,
const Expr& a_max,
std::string name = "tensor",
std::string name = "T_clip",
std::string tag = kElementWise) {
return compute(x->shape, [&](const Array<Var>& i) {
auto min_val = tvm::cast(x->dtype, a_min);
Expand All @@ -167,7 +167,7 @@ inline Tensor clip(const Tensor& x,
*/
inline Tensor cast(const Tensor& x,
Type type,
std::string name = "tensor",
std::string name = "T_cast",
std::string tag = kElementWise) {
return compute(x->shape, [&](const Array<Var>& i) {
auto expr = x(i);
Expand All @@ -193,7 +193,7 @@ inline Tensor cast(const Tensor& x,
* \return A Tensor whose op member is the sum operation
*/
inline Tensor elemwise_sum(const Array<Tensor>& xs,
std::string name = "tensor",
std::string name = "T_elemwise_sum",
std::string tag = kElementWise) {
CHECK_GT(xs.size(), 0) << "elemwise sum must have at least one input tensor.";
return compute(xs[0]->shape, [&](const Array<Var>& i) {
Expand All @@ -219,7 +219,7 @@ inline Tensor elemwise_sum(const Array<Tensor>& xs,
inline Tensor full(const Array<Expr>& shape,
Type dtype,
const Expr fill_value,
std::string name = "tensor",
std::string name = "T_full",
std::string tag = kElementWise) {
Expr ev = cast(dtype, fill_value);
if (!ev.defined()) {
Expand All @@ -243,7 +243,7 @@ inline Tensor full(const Array<Expr>& shape,
*/
inline Tensor full_like(const Tensor& x,
const Expr fill_value,
std::string name = "tensor",
std::string name = "T_full_like",
std::string tag = kElementWise) {
Expr ev = cast(x->dtype, fill_value);
return compute(x->shape, [&](const Array<Var>& i) {
Expand Down
18 changes: 9 additions & 9 deletions topi/include/topi/nn.h
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ tvm::Expr Map(const tvm::Array<tvm::Expr>& exprs, T op) {
template <typename T>
inline tvm::Tensor relu(const tvm::Tensor& t,
T threshold = static_cast<T>(0),
std::string name = "tensor",
std::string name = "T_relu",
std::string tag = kElementWise) {
return tvm::compute(
t->shape,
Expand All @@ -87,7 +87,7 @@ inline tvm::Tensor relu(const tvm::Tensor& t,
*/
inline tvm::Tensor leaky_relu(const tvm::Tensor& t,
double alpha = 0.1,
std::string name = "tensor",
std::string name = "T_leaky_relu",
std::string tag = kElementWise) {
return tvm::compute(
t->shape,
Expand All @@ -114,7 +114,7 @@ inline tvm::Tensor leaky_relu(const tvm::Tensor& t,
inline tvm::Tensor prelu(const tvm::Tensor &x,
const tvm::Tensor &slope,
const int axis = 1,
std::string name = "tensor",
std::string name = "T_prelu",
std::string tag = kBroadcast) {
CHECK((size_t)axis < x->shape.size()) <<
"Wrong axis (" << axis << ")value. ";
Expand Down Expand Up @@ -171,7 +171,7 @@ inline tvm::Tensor pad(const tvm::Tensor& t,
const tvm::Array<tvm::Expr>& pad_before,
tvm::Array<tvm::Expr> pad_after = tvm::Array<tvm::Expr>(),
Expr pad_value = Expr(),
std::string name = "tensor",
std::string name = "T_pad",
std::string tag = kElementWise) {
if (pad_after.size() < pad_before.size()) {
for (size_t i = pad_after.size(); i < pad_before.size(); ++i) {
Expand Down Expand Up @@ -247,7 +247,7 @@ inline tvm::Tensor conv2d_nchw(const tvm::Tensor& I,
int pad_w = 0,
int stride_h = 1,
int stride_w = 1,
std::string name = "tensor",
std::string name = "T_conv2d_nchw",
std::string tag = kConv2dNCHW) {
CHECK_EQ(4, I->shape.size());
CHECK_EQ(4, W->shape.size());
Expand Down Expand Up @@ -298,7 +298,7 @@ inline tvm::Tensor conv2d_hwcn(const tvm::Tensor& I,
int pad_w = 0,
int stride_h = 1,
int stride_w = 1,
std::string name = "tensor",
std::string name = "T_conv2d_hwcn",
std::string tag = kConv2dHWCN) {
CHECK_EQ(4, I->shape.size());
CHECK_EQ(4, W->shape.size());
Expand Down Expand Up @@ -349,7 +349,7 @@ inline tvm::Tensor depthwise_conv2d_nchw(const tvm::Tensor& I,
int pad_w = 0,
int stride_h = 1,
int stride_w = 1,
std::string name = "tensor",
std::string name = "T_depthwise_conv2d_nchw",
std::string tag = kDepthwiseConv2dNCHW) {
CHECK_EQ(4, I->shape.size());
CHECK_EQ(4, W->shape.size());
Expand Down Expand Up @@ -382,7 +382,7 @@ inline tvm::Tensor depthwise_conv2d_nhwc(const tvm::Tensor& I,
int pad_w = 0,
int stride_h = 1,
int stride_w = 1,
std::string name = "tensor",
std::string name = "T_depthwise_conv2d_nhwc",
std::string tag = kDepthwiseConv2dNHWC) {
CHECK_EQ(4, I->shape.size());
CHECK_EQ(4, W->shape.size());
Expand Down Expand Up @@ -435,7 +435,7 @@ inline tvm::Tensor group_conv2d_ngchw(const tvm::Tensor& I,
int pad_w = 0,
int stride_h = 1,
int stride_w = 1,
std::string name = "tensor",
std::string name = "T_group_conv2d_ngchw",
std::string tag = kGroupConv2d) {
CHECK_EQ(5, I->shape.size());
CHECK_EQ(5, W->shape.size());
Expand Down
4 changes: 2 additions & 2 deletions topi/include/topi/nn/pooling.h
Original file line number Diff line number Diff line change
Expand Up @@ -272,8 +272,8 @@ inline Tensor global_pool(const Tensor& x,
auto height = x->shape[height_axis];
auto width = x->shape[width_axis];

auto dheight = tvm::reduce_axis(Range(0, height));
auto dwidth = tvm::reduce_axis(Range(0, width));
auto dheight = tvm::reduce_axis(Range(0, height), "rv1");
auto dwidth = tvm::reduce_axis(Range(0, width), "rv2");

if (pool_type == kMaxPool) {
return tvm::compute(out_shape,
Expand Down
Loading