Skip to content

Commit 7e34988

Browse files
merrymercytqchen
authored andcommitted
[TOPI] Rename output tensors for better readability (#3006)
1 parent c64a33e commit 7e34988

File tree

7 files changed

+76
-74
lines changed

7 files changed

+76
-74
lines changed

topi/include/topi/broadcast.h

Lines changed: 27 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ namespace topi {
4646
*/
4747
inline tvm::Tensor broadcast_to(const tvm::Tensor& t,
4848
const tvm::Array<tvm::Expr>& output_shape,
49-
std::string name = "tensor",
49+
std::string name = "T_broadcast_to",
5050
std::string tag = kBroadcast) {
5151
CHECK_GE(output_shape.size(), t->shape.size())
5252
<< "Not a broadcast, output dimensionality smaller than input.\noutput: "
@@ -66,35 +66,35 @@ inline tvm::Tensor broadcast_to(const tvm::Tensor& t,
6666
tag);
6767
}
6868

69-
#define TOPI_DEFINE_BCAST_OP(Name, ComputeRule) \
70-
inline tvm::Expr Name(const tvm::Expr& a, \
71-
const tvm::Expr& b) { \
72-
ComputeRule; \
73-
} \
74-
inline tvm::Tensor Name(const tvm::Tensor& A, \
75-
const tvm::Tensor& B, \
76-
std::string name = "tensor", \
77-
std::string tag = kBroadcast) { \
78-
auto l = [](tvm::Expr a, tvm::Expr b) { ComputeRule; }; \
79-
return detail::WithBroadcast(l, A, B, name, tag); \
80-
} \
81-
inline tvm::Tensor Name(const tvm::Tensor& A, \
82-
const tvm::Expr& B, \
83-
std::string name = "tensor", \
84-
std::string tag = kElementWise) { \
69+
#define TOPI_DEFINE_BCAST_OP(Name, ComputeRule) \
70+
inline tvm::Expr Name(const tvm::Expr& a, \
71+
const tvm::Expr& b) { \
72+
ComputeRule; \
73+
} \
74+
inline tvm::Tensor Name(const tvm::Tensor& A, \
75+
const tvm::Tensor& B, \
76+
std::string name = "T_" #Name, \
77+
std::string tag = kBroadcast) { \
78+
auto l = [](tvm::Expr a, tvm::Expr b) { ComputeRule; }; \
79+
return detail::WithBroadcast(l, A, B, name, tag); \
80+
} \
81+
inline tvm::Tensor Name(const tvm::Tensor& A, \
82+
const tvm::Expr& B, \
83+
std::string name = "T_" #Name, \
84+
std::string tag = kElementWise) { \
8585
auto l = [](tvm::Expr a, tvm::Expr b) { ComputeRule; }; \
8686
return compute(A->shape, [&](const ::tvm::Array<::tvm::Var>& i) { \
87-
return l(A(i), B); \
88-
}, name, tag); \
89-
} \
90-
inline tvm::Tensor Name(const tvm::Expr& A, \
91-
const tvm::Tensor& B, \
92-
std::string name = "tensor", \
93-
std::string tag = kElementWise) { \
94-
auto l = [&](tvm::Expr a, tvm::Expr b) { ComputeRule; }; \
87+
return l(A(i), B); \
88+
}, name, tag); \
89+
} \
90+
inline tvm::Tensor Name(const tvm::Expr& A, \
91+
const tvm::Tensor& B, \
92+
std::string name = "T_" #Name, \
93+
std::string tag = kElementWise) { \
94+
auto l = [&](tvm::Expr a, tvm::Expr b) { ComputeRule; }; \
9595
return compute(B->shape, [&](const ::tvm::Array<::tvm::Var>& i) { \
96-
return l(A, B(i)); \
97-
}, name, tag); \
96+
return l(A, B(i)); \
97+
}, name, tag); \
9898
}
9999

100100

topi/include/topi/elemwise.h

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ using namespace tvm;
3838
// Unary intrinsic operators
3939
#define TOPI_DECLARE_UNARY_OP(OpName) \
4040
inline Tensor OpName(const Tensor& x, \
41-
std::string name = "tensor", \
41+
std::string name = "T_" #OpName, \
4242
std::string tag = kElementWise) { \
4343
return compute(x->shape, [&](const Array<Var>& i) { \
4444
return ::tvm::OpName(x(i)); \
@@ -66,7 +66,7 @@ TOPI_DECLARE_UNARY_OP(abs);
6666
* \return A Tensor whose op member is the identity operation
6767
*/
6868
inline Tensor identity(const Tensor& x,
69-
std::string name = "tensor",
69+
std::string name = "T_identity",
7070
std::string tag = kElementWise) {
7171
return compute(x->shape, [&](const Array<Var>& i) {
7272
return x(i);
@@ -83,7 +83,7 @@ inline Tensor identity(const Tensor& x,
8383
* \return A Tensor whose op member is the negation operation
8484
*/
8585
inline Tensor negative(const Tensor& x,
86-
std::string name = "tensor",
86+
std::string name = "T_negative",
8787
std::string tag = kElementWise) {
8888
return compute(x->shape, [&](const Array<Var>& i) {
8989
return -x(i);
@@ -100,7 +100,7 @@ inline Tensor negative(const Tensor& x,
100100
* \return A Tensor whose op member is the logical NOT operation
101101
*/
102102
inline Tensor logical_not(const Tensor& x,
103-
std::string name = "tensor",
103+
std::string name = "T_logical_not",
104104
std::string tag = kElementWise) {
105105
return compute(x->shape, [&](const Array<Var>& i) {
106106
return !x(i);
@@ -117,7 +117,7 @@ inline Tensor logical_not(const Tensor& x,
117117
* \return A Tensor whose op member is the sign
118118
*/
119119
inline Tensor sign(const Tensor& x,
120-
std::string name = "tensor",
120+
std::string name = "T_sign",
121121
std::string tag = kElementWise) {
122122
return compute(x->shape, [&](const Array<Var>& i) {
123123
Expr zero = make_zero(x->dtype);
@@ -144,7 +144,7 @@ inline Tensor sign(const Tensor& x,
144144
inline Tensor clip(const Tensor& x,
145145
const Expr& a_min,
146146
const Expr& a_max,
147-
std::string name = "tensor",
147+
std::string name = "T_clip",
148148
std::string tag = kElementWise) {
149149
return compute(x->shape, [&](const Array<Var>& i) {
150150
auto min_val = tvm::cast(x->dtype, a_min);
@@ -167,7 +167,7 @@ inline Tensor clip(const Tensor& x,
167167
*/
168168
inline Tensor cast(const Tensor& x,
169169
Type type,
170-
std::string name = "tensor",
170+
std::string name = "T_cast",
171171
std::string tag = kElementWise) {
172172
return compute(x->shape, [&](const Array<Var>& i) {
173173
auto expr = x(i);
@@ -193,7 +193,7 @@ inline Tensor cast(const Tensor& x,
193193
* \return A Tensor whose op member is the sum operation
194194
*/
195195
inline Tensor elemwise_sum(const Array<Tensor>& xs,
196-
std::string name = "tensor",
196+
std::string name = "T_elemwise_sum",
197197
std::string tag = kElementWise) {
198198
CHECK_GT(xs.size(), 0) << "elemwise sum must have at least one input tensor.";
199199
return compute(xs[0]->shape, [&](const Array<Var>& i) {
@@ -219,7 +219,7 @@ inline Tensor elemwise_sum(const Array<Tensor>& xs,
219219
inline Tensor full(const Array<Expr>& shape,
220220
Type dtype,
221221
const Expr fill_value,
222-
std::string name = "tensor",
222+
std::string name = "T_full",
223223
std::string tag = kElementWise) {
224224
Expr ev = cast(dtype, fill_value);
225225
if (!ev.defined()) {
@@ -243,7 +243,7 @@ inline Tensor full(const Array<Expr>& shape,
243243
*/
244244
inline Tensor full_like(const Tensor& x,
245245
const Expr fill_value,
246-
std::string name = "tensor",
246+
std::string name = "T_full_like",
247247
std::string tag = kElementWise) {
248248
Expr ev = cast(x->dtype, fill_value);
249249
return compute(x->shape, [&](const Array<Var>& i) {

topi/include/topi/nn.h

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ tvm::Expr Map(const tvm::Array<tvm::Expr>& exprs, T op) {
6363
template <typename T>
6464
inline tvm::Tensor relu(const tvm::Tensor& t,
6565
T threshold = static_cast<T>(0),
66-
std::string name = "tensor",
66+
std::string name = "T_relu",
6767
std::string tag = kElementWise) {
6868
return tvm::compute(
6969
t->shape,
@@ -87,7 +87,7 @@ inline tvm::Tensor relu(const tvm::Tensor& t,
8787
*/
8888
inline tvm::Tensor leaky_relu(const tvm::Tensor& t,
8989
double alpha = 0.1,
90-
std::string name = "tensor",
90+
std::string name = "T_leaky_relu",
9191
std::string tag = kElementWise) {
9292
return tvm::compute(
9393
t->shape,
@@ -114,7 +114,7 @@ inline tvm::Tensor leaky_relu(const tvm::Tensor& t,
114114
inline tvm::Tensor prelu(const tvm::Tensor &x,
115115
const tvm::Tensor &slope,
116116
const int axis = 1,
117-
std::string name = "tensor",
117+
std::string name = "T_prelu",
118118
std::string tag = kBroadcast) {
119119
CHECK((size_t)axis < x->shape.size()) <<
120120
"Wrong axis (" << axis << ")value. ";
@@ -171,7 +171,7 @@ inline tvm::Tensor pad(const tvm::Tensor& t,
171171
const tvm::Array<tvm::Expr>& pad_before,
172172
tvm::Array<tvm::Expr> pad_after = tvm::Array<tvm::Expr>(),
173173
Expr pad_value = Expr(),
174-
std::string name = "tensor",
174+
std::string name = "T_pad",
175175
std::string tag = kElementWise) {
176176
if (pad_after.size() < pad_before.size()) {
177177
for (size_t i = pad_after.size(); i < pad_before.size(); ++i) {
@@ -247,7 +247,7 @@ inline tvm::Tensor conv2d_nchw(const tvm::Tensor& I,
247247
int pad_w = 0,
248248
int stride_h = 1,
249249
int stride_w = 1,
250-
std::string name = "tensor",
250+
std::string name = "T_conv2d_nchw",
251251
std::string tag = kConv2dNCHW) {
252252
CHECK_EQ(4, I->shape.size());
253253
CHECK_EQ(4, W->shape.size());
@@ -298,7 +298,7 @@ inline tvm::Tensor conv2d_hwcn(const tvm::Tensor& I,
298298
int pad_w = 0,
299299
int stride_h = 1,
300300
int stride_w = 1,
301-
std::string name = "tensor",
301+
std::string name = "T_conv2d_hwcn",
302302
std::string tag = kConv2dHWCN) {
303303
CHECK_EQ(4, I->shape.size());
304304
CHECK_EQ(4, W->shape.size());
@@ -349,7 +349,7 @@ inline tvm::Tensor depthwise_conv2d_nchw(const tvm::Tensor& I,
349349
int pad_w = 0,
350350
int stride_h = 1,
351351
int stride_w = 1,
352-
std::string name = "tensor",
352+
std::string name = "T_depthwise_conv2d_nchw",
353353
std::string tag = kDepthwiseConv2dNCHW) {
354354
CHECK_EQ(4, I->shape.size());
355355
CHECK_EQ(4, W->shape.size());
@@ -382,7 +382,7 @@ inline tvm::Tensor depthwise_conv2d_nhwc(const tvm::Tensor& I,
382382
int pad_w = 0,
383383
int stride_h = 1,
384384
int stride_w = 1,
385-
std::string name = "tensor",
385+
std::string name = "T_depthwise_conv2d_nhwc",
386386
std::string tag = kDepthwiseConv2dNHWC) {
387387
CHECK_EQ(4, I->shape.size());
388388
CHECK_EQ(4, W->shape.size());
@@ -435,7 +435,7 @@ inline tvm::Tensor group_conv2d_ngchw(const tvm::Tensor& I,
435435
int pad_w = 0,
436436
int stride_h = 1,
437437
int stride_w = 1,
438-
std::string name = "tensor",
438+
std::string name = "T_group_conv2d_ngchw",
439439
std::string tag = kGroupConv2d) {
440440
CHECK_EQ(5, I->shape.size());
441441
CHECK_EQ(5, W->shape.size());

topi/include/topi/nn/pooling.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -272,8 +272,8 @@ inline Tensor global_pool(const Tensor& x,
272272
auto height = x->shape[height_axis];
273273
auto width = x->shape[width_axis];
274274

275-
auto dheight = tvm::reduce_axis(Range(0, height));
276-
auto dwidth = tvm::reduce_axis(Range(0, width));
275+
auto dheight = tvm::reduce_axis(Range(0, height), "rv1");
276+
auto dwidth = tvm::reduce_axis(Range(0, width), "rv2");
277277

278278
if (pool_type == kMaxPool) {
279279
return tvm::compute(out_shape,

0 commit comments

Comments
 (0)