Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions nnvm/include/nnvm/compiler/util.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,17 @@ inline tvm::Array<tvm::Expr> ShapeToArray(TShape shape) {
return result;
}

/*
* \brief Helper function to convert TShape to TVM array. Useful for
* passing data from NNVM param structures to TOPI ops.
*
* \param shape The shape to convert
*
* \return An Array of Expr, where each element is a constant int32
*/
inline tvm::Array<tvm::Integer> ShapeToIntArray(TShape shape) {
return tvm::Array<tvm::Integer>(ShapeToArray(shape).node_);
}
} // namespace compiler
} // namespace nnvm
#endif // NNVM_COMPILER_UTIL_H_
34 changes: 15 additions & 19 deletions nnvm/src/top/tensor/reduce.cc
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,6 @@
* \file reduce.cc
* \brief reduce operator.
*/
// Enforce TOPI to use old behavior that reduces to at least 1d
#define TOPI_REDUCE_ATLEAST1D 1

#include <nnvm/op.h>
#include <nnvm/node.h>
#include <nnvm/op_attr_types.h>
Expand All @@ -20,13 +17,12 @@
#include "topi/reduction.h"
#include "topi/transform.h"

static_assert(TOPI_REDUCE_ATLEAST1D, "need to use legacy reduce behavior");

namespace nnvm {
namespace top {
using namespace tvm;
using namespace nnvm::compiler;


// reduce
DMLC_REGISTER_PARAMETER(ReduceParam);

Expand Down Expand Up @@ -168,9 +164,9 @@ Example::
TShape r_axes = GetReduceAxes(inputs[0]->shape.size(),
param.axis, param.exclude);
if (!r_axes.ndim()) return Array<Tensor> { topi::identity(inputs[0]) };
auto axis = ShapeToArray(r_axes);
auto axis = ShapeToIntArray(r_axes);
return Array<Tensor>{
topi::sum(inputs[0], axis, param.keepdims) };
topi::sum(inputs[0], axis, param.keepdims, true) };
})
.set_attr<FGradient>(
"FGradient", [](const NodePtr& n,
Expand Down Expand Up @@ -202,9 +198,9 @@ NNVM_REGISTER_REDUCE_OP(max)
const ReduceParam& param = nnvm::get<ReduceParam>(attrs.parsed);
TShape r_axes = GetReduceAxes(inputs[0]->shape.size(),
param.axis, param.exclude);
auto axis = ShapeToArray(r_axes);
auto axis = ShapeToIntArray(r_axes);
return Array<Tensor>{
topi::max(inputs[0], axis, param.keepdims) };
topi::max(inputs[0], axis, param.keepdims, true) };
})
.set_attr<FGradient>(
"FGradient", [](const NodePtr& n,
Expand Down Expand Up @@ -235,9 +231,9 @@ NNVM_REGISTER_REDUCE_OP(min)
const ReduceParam& param = nnvm::get<ReduceParam>(attrs.parsed);
TShape r_axes = GetReduceAxes(inputs[0]->shape.size(),
param.axis, param.exclude);
auto axis = ShapeToArray(r_axes);
auto axis = ShapeToIntArray(r_axes);
return Array<Tensor>{
topi::min(inputs[0], axis, param.keepdims) };
topi::min(inputs[0], axis, param.keepdims, true) };
})
.set_attr<FGradient>(
"FGradient", [](const NodePtr& n,
Expand Down Expand Up @@ -299,8 +295,8 @@ values over a given axis.
const ReduceParam& param = nnvm::get<ReduceParam>(attrs.parsed);
TShape r_axes = GetReduceAxes(inputs[0]->shape.size(),
param.axis, param.exclude);
auto axis = ShapeToArray(r_axes);
Tensor out = topi::argmax(inputs[0], axis, param.keepdims);
auto axis = ShapeToIntArray(r_axes);
Tensor out = topi::argmax(inputs[0], axis, param.keepdims, true);
if (param.dtype == kFloat32) out = topi::cast(out, out_info[0]->dtype);
return Array<Tensor>{out};
});
Expand All @@ -322,8 +318,8 @@ values over a given axis.
const ReduceParam& param = nnvm::get<ReduceParam>(attrs.parsed);
TShape r_axes = GetReduceAxes(inputs[0]->shape.size(),
param.axis, param.exclude);
auto axis = ShapeToArray(r_axes);
Tensor out = topi::argmin(inputs[0], axis, param.keepdims);
auto axis = ShapeToIntArray(r_axes);
Tensor out = topi::argmin(inputs[0], axis, param.keepdims, true);
if (param.dtype == kFloat32) out = topi::cast(out, out_info[0]->dtype);
return Array<Tensor>{out};
});
Expand Down Expand Up @@ -352,15 +348,15 @@ Example::
TShape r_axes = GetReduceAxes(inputs[0]->shape.size(),
param.axis, param.exclude);
if (!r_axes.ndim()) return Array<Tensor> { topi::identity(inputs[0]) };
auto axis = ShapeToArray(r_axes);
auto axis = ShapeToIntArray(r_axes);

Expr count = make_const(inputs[0]->dtype, 1);
for (auto& i : r_axes) {
count *= inputs[0]->shape[i];
}

return Array<Tensor>{
topi::divide(topi::sum(inputs[0], axis, param.keepdims), count) };
topi::divide(topi::sum(inputs[0], axis, param.keepdims, true), count) };
});

NNVM_REGISTER_REDUCE_OP(prod)
Expand All @@ -387,9 +383,9 @@ Example::
TShape r_axes = GetReduceAxes(inputs[0]->shape.size(),
param.axis, param.exclude);
if (!r_axes.ndim()) return Array<Tensor> { topi::identity(inputs[0]) };
auto axis = ShapeToArray(r_axes);
auto axis = ShapeToIntArray(r_axes);
return Array<Tensor>{
topi::prod(inputs[0], axis, param.keepdims) };
topi::prod(inputs[0], axis, param.keepdims, true) };
});


Expand Down
4 changes: 2 additions & 2 deletions nnvm/src/top/tensor/transform.cc
Original file line number Diff line number Diff line change
Expand Up @@ -756,8 +756,8 @@ Examples::
const Array<Tensor>& inputs,
const Array<Tensor>& out_info) {
const SqueezeParam& param = nnvm::get<SqueezeParam>(attrs.parsed);
auto axis = ShapeToArray(param.axis);
return Array<Tensor>{ topi::squeeze(inputs[0], axis) };
auto axis = ShapeToIntArray(param.axis);
return Array<Tensor>{ topi::squeeze(inputs[0], axis, true) };
})
.set_attr<FGradient>(
"FGradient", [](const NodePtr& n,
Expand Down
14 changes: 4 additions & 10 deletions topi/include/topi/detail/fuse.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,22 +14,16 @@ using namespace tvm;

/*!
* \brief Fuse all of the given args
*
*
* \param stage The stage in which to apply the fuse
* \param args The iteration variables to be fused
*
* \return The fused iteration variable
*/
inline IterVar Fuse(Stage stage, const Array<IterVar>& args) {
CHECK_GE(args.size(), 1) << "Fuse requires at least 1 arg";

auto fused = args[0];
for (size_t i = 1; i < args.size(); ++i) {
IterVar out;
stage.fuse(fused, args[i], &out);
fused = out;
}
return fused;
IterVar res;
stage.fuse(args, &res);
return res;
}

} // namespace detail
Expand Down
2 changes: 1 addition & 1 deletion topi/include/topi/nn/l2_normalize.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ using namespace tvm;
*/
inline Tensor l2_normalize(const Tensor& data,
float eps,
const Array<Expr>& axis,
const Array<Integer>& axis,
std::string name = "tensor",
std::string tag = "l2_normalize") {
CHECK_EQ(data->shape.size(), 4) << "L2 normalization requires 4-D input";
Expand Down
2 changes: 1 addition & 1 deletion topi/include/topi/nn/softmax.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ inline Tensor softmax(const Tensor &x,

auto k1 = tvm::reduce_axis(Range(0, input_shape[axis]), "k1");
auto k2 = tvm::reduce_axis(Range(0, input_shape[axis]), "k2");
auto reduced_shape = MakeReduceTargetShape({axis}, x, false);
auto reduced_shape = MakeReduceTargetShape({axis}, x, false, false);

auto insert_reduce_index = [axis, ndim](const Array<Var> &indices,
const IterVar &reduce_index) {
Expand Down
Loading