@@ -305,6 +305,8 @@ std::tuple<at::Tensor, at::Tensor> DistanceBackwardCuda(
305
305
at::CheckedFrom c = " DistanceBackwardCuda" ;
306
306
at::checkAllSameGPU (c, {objects_t , targets_t , idx_objects_t , grad_dists_t });
307
307
at::checkAllSameType (c, {objects_t , targets_t , grad_dists_t });
308
+ // This is nondeterministic because atomicAdd
309
+ at::globalContext ().alertNotDeterministic (" DistanceBackwardCuda" );
308
310
309
311
// Set the device for the kernel launch based on the device of the input
310
312
at::cuda::CUDAGuard device_guard (objects.device ());
@@ -624,6 +626,9 @@ std::tuple<at::Tensor, at::Tensor> PointFaceArrayDistanceBackwardCuda(
624
626
at::CheckedFrom c = " PointFaceArrayDistanceBackwardCuda" ;
625
627
at::checkAllSameGPU (c, {points_t , tris_t , grad_dists_t });
626
628
at::checkAllSameType (c, {points_t , tris_t , grad_dists_t });
629
+ // This is nondeterministic because atomicAdd
630
+ at::globalContext ().alertNotDeterministic (
631
+ " PointFaceArrayDistanceBackwardCuda" );
627
632
628
633
// Set the device for the kernel launch based on the device of the input
629
634
at::cuda::CUDAGuard device_guard (points.device ());
@@ -787,6 +792,9 @@ std::tuple<at::Tensor, at::Tensor> PointEdgeArrayDistanceBackwardCuda(
787
792
at::CheckedFrom c = " PointEdgeArrayDistanceBackwardCuda" ;
788
793
at::checkAllSameGPU (c, {points_t , segms_t , grad_dists_t });
789
794
at::checkAllSameType (c, {points_t , segms_t , grad_dists_t });
795
+ // This is nondeterministic because atomicAdd
796
+ at::globalContext ().alertNotDeterministic (
797
+ " PointEdgeArrayDistanceBackwardCuda" );
790
798
791
799
// Set the device for the kernel launch based on the device of the input
792
800
at::cuda::CUDAGuard device_guard (points.device ());
0 commit comments