@@ -742,36 +742,36 @@ inline Tensor tile(const Tensor& x,
742742 Array<Expr> reps_shape;
743743 Array<Expr> new_shape;
744744 if (ndim == rdim) {
745- for (size_t i = 0 ; i < ndim; ++i) {
745+ for (size_t i = 0 ; i < static_cast < size_t >( ndim) ; ++i) {
746746 data_shape.push_back (x->shape [i]);
747747 reps_shape.push_back (reps[i]);
748748 }
749749 } else if (ndim > rdim) {
750- for (size_t i = 0 ; i < ndim; ++i)
750+ for (size_t i = 0 ; i < static_cast < size_t >( ndim) ; ++i)
751751 data_shape.push_back (x->shape [i]);
752- for (size_t i = 0 ; i < ndim - rdim; ++i)
752+ for (size_t i = 0 ; i < static_cast < size_t >( ndim - rdim) ; ++i)
753753 reps_shape.push_back (1 );
754- for (size_t i = 0 ; i < rdim; ++i)
754+ for (size_t i = 0 ; i < static_cast < size_t >( rdim) ; ++i)
755755 reps_shape.push_back (reps[i]);
756756 } else {
757- for (size_t i = 0 ; i < rdim - ndim; ++i)
757+ for (size_t i = 0 ; i < static_cast < size_t >( rdim - ndim) ; ++i)
758758 data_shape.push_back (1 );
759- for (size_t i = 0 ; i < ndim; ++i)
759+ for (size_t i = 0 ; i < static_cast < size_t >( ndim) ; ++i)
760760 data_shape.push_back (x->shape [i]);
761- for (size_t i = 0 ; i < rdim; ++i)
761+ for (size_t i = 0 ; i < static_cast < size_t >( rdim) ; ++i)
762762 reps_shape.push_back (reps[i]);
763763 }
764- for (size_t i = 0 ; i < tdim; ++i)
764+ for (size_t i = 0 ; i < static_cast < size_t >( tdim) ; ++i)
765765 new_shape.push_back (data_shape[i] * reps_shape[i]);
766766
767767 return compute (
768768 new_shape, [&](const Array<Var>& indices) {
769769 Array<Expr> idx;
770770 if (ndim >= rdim) {
771- for (size_t i = 0 ; i < ndim; ++i)
771+ for (size_t i = 0 ; i < static_cast < size_t >( ndim) ; ++i)
772772 idx.push_back (indices[i] % x->shape [i]);
773773 } else {
774- for (size_t i = 0 ; i < ndim; ++i)
774+ for (size_t i = 0 ; i < static_cast < size_t >( ndim) ; ++i)
775775 idx.push_back (indices[rdim - ndim + i] % x->shape [i]);
776776 }
777777 return x (idx);
0 commit comments