diff --git a/test/xpu/skip_list_common.py b/test/xpu/skip_list_common.py index 42b3957cc..9ce6e3c9f 100644 --- a/test/xpu/skip_list_common.py +++ b/test/xpu/skip_list_common.py @@ -726,9 +726,6 @@ "nn/test_dropout_xpu.py": None, "test_dataloader_xpu.py": None, "test_tensor_creation_ops_xpu.py": ( - # CPU only (vs Numpy). CUDA skips these cases since non-deterministic results are outputed for inf and nan. - "test_float_to_int_conversion_finite_xpu_int8", - "test_float_to_int_conversion_finite_xpu_int16", # Dispatch issue. It is a composite operator. But it is implemented by # DispatchStub. XPU doesn't support DispatchStub. "test_kaiser_window_xpu", diff --git a/test/xpu/test_tensor_creation_ops_xpu.py b/test/xpu/test_tensor_creation_ops_xpu.py index 077fbb6ad..21b12d784 100644 --- a/test/xpu/test_tensor_creation_ops_xpu.py +++ b/test/xpu/test_tensor_creation_ops_xpu.py @@ -1226,8 +1226,10 @@ def test_float_to_int_conversion_finite(self, device, dtype): vals = (min, -2, -1.5, -0.5, 0, 0.5, 1.5, 2, max) refs = None if self.device_type == "cuda" or self.device_type == "xpu": - if torch.version.hip: + if torch.version.hip or torch.version.xpu: # HIP min float -> int64 conversion is divergent + # XPU min float -> int8 conversion is divergent + # XPU min float -> int16 conversion is divergent vals = (-2, -1.5, -0.5, 0, 0.5, 1.5, 2) else: vals = (min, -2, -1.5, -0.5, 0, 0.5, 1.5, 2)